1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 621 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 622 { "SUN T3", SD_CONF_BSET_THROTTLE | 623 SD_CONF_BSET_BSY_RETRY_COUNT| 624 SD_CONF_BSET_RST_RETRIES| 625 SD_CONF_BSET_RSV_REL_TIME, 626 &purple_properties }, 627 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_BSY_RETRY_COUNT| 629 SD_CONF_BSET_RST_RETRIES| 630 SD_CONF_BSET_RSV_REL_TIME| 631 SD_CONF_BSET_MIN_THROTTLE| 632 SD_CONF_BSET_DISKSORT_DISABLED, 633 &sve_properties }, 634 { "SUN T4", SD_CONF_BSET_THROTTLE | 635 SD_CONF_BSET_BSY_RETRY_COUNT| 636 SD_CONF_BSET_RST_RETRIES| 637 SD_CONF_BSET_RSV_REL_TIME, 638 &purple_properties }, 639 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 640 SD_CONF_BSET_LUN_RESET_ENABLED, 641 &maserati_properties }, 642 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 643 SD_CONF_BSET_NRR_COUNT| 644 SD_CONF_BSET_BSY_RETRY_COUNT| 645 SD_CONF_BSET_RST_RETRIES| 646 SD_CONF_BSET_MIN_THROTTLE| 647 SD_CONF_BSET_DISKSORT_DISABLED| 648 SD_CONF_BSET_LUN_RESET_ENABLED, 649 &pirus_properties }, 650 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 651 SD_CONF_BSET_NRR_COUNT| 652 SD_CONF_BSET_BSY_RETRY_COUNT| 653 SD_CONF_BSET_RST_RETRIES| 654 SD_CONF_BSET_MIN_THROTTLE| 655 SD_CONF_BSET_DISKSORT_DISABLED| 656 SD_CONF_BSET_LUN_RESET_ENABLED, 657 &pirus_properties }, 658 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 659 SD_CONF_BSET_NRR_COUNT| 660 SD_CONF_BSET_BSY_RETRY_COUNT| 661 SD_CONF_BSET_RST_RETRIES| 662 SD_CONF_BSET_MIN_THROTTLE| 663 SD_CONF_BSET_DISKSORT_DISABLED| 664 SD_CONF_BSET_LUN_RESET_ENABLED, 665 &pirus_properties }, 666 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 667 SD_CONF_BSET_NRR_COUNT| 668 SD_CONF_BSET_BSY_RETRY_COUNT| 669 SD_CONF_BSET_RST_RETRIES| 670 SD_CONF_BSET_MIN_THROTTLE| 671 SD_CONF_BSET_DISKSORT_DISABLED| 672 SD_CONF_BSET_LUN_RESET_ENABLED, 673 &pirus_properties }, 674 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_NRR_COUNT| 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_MIN_THROTTLE| 679 SD_CONF_BSET_DISKSORT_DISABLED| 680 SD_CONF_BSET_LUN_RESET_ENABLED, 681 &pirus_properties }, 682 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_NRR_COUNT| 684 SD_CONF_BSET_BSY_RETRY_COUNT| 685 SD_CONF_BSET_RST_RETRIES| 686 SD_CONF_BSET_MIN_THROTTLE| 687 SD_CONF_BSET_DISKSORT_DISABLED| 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &pirus_properties }, 690 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 691 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 692 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 694 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 695 #endif /* fibre or NON-sparc platforms */ 696 #if ((defined(__sparc) && !defined(__fibre)) ||\ 697 (defined(__i386) || defined(__amd64))) 698 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 699 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 700 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 701 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 702 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 703 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 704 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 711 &symbios_properties }, 712 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 713 &lsi_properties_scsi }, 714 #if defined(__i386) || defined(__amd64) 715 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 716 | SD_CONF_BSET_READSUB_BCD 717 | SD_CONF_BSET_READ_TOC_ADDR_BCD 718 | SD_CONF_BSET_NO_READ_HEADER 719 | SD_CONF_BSET_READ_CD_XD4), NULL }, 720 721 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 722 | SD_CONF_BSET_READSUB_BCD 723 | SD_CONF_BSET_READ_TOC_ADDR_BCD 724 | SD_CONF_BSET_NO_READ_HEADER 725 | SD_CONF_BSET_READ_CD_XD4), NULL }, 726 #endif /* __i386 || __amd64 */ 727 #endif /* sparc NON-fibre or NON-sparc platforms */ 728 729 #if (defined(SD_PROP_TST)) 730 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 731 | SD_CONF_BSET_CTYPE 732 | SD_CONF_BSET_NRR_COUNT 733 | SD_CONF_BSET_FAB_DEVID 734 | SD_CONF_BSET_NOCACHE 735 | SD_CONF_BSET_BSY_RETRY_COUNT 736 | SD_CONF_BSET_PLAYMSF_BCD 737 | SD_CONF_BSET_READSUB_BCD 738 | SD_CONF_BSET_READ_TOC_TRK_BCD 739 | SD_CONF_BSET_READ_TOC_ADDR_BCD 740 | SD_CONF_BSET_NO_READ_HEADER 741 | SD_CONF_BSET_READ_CD_XD4 742 | SD_CONF_BSET_RST_RETRIES 743 | SD_CONF_BSET_RSV_REL_TIME 744 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 745 #endif 746 }; 747 748 static const int sd_disk_table_size = 749 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 750 751 752 753 #define SD_INTERCONNECT_PARALLEL 0 754 #define SD_INTERCONNECT_FABRIC 1 755 #define SD_INTERCONNECT_FIBRE 2 756 #define SD_INTERCONNECT_SSA 3 757 #define SD_INTERCONNECT_SATA 4 758 #define SD_IS_PARALLEL_SCSI(un) \ 759 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 760 #define SD_IS_SERIAL(un) \ 761 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 762 763 /* 764 * Definitions used by device id registration routines 765 */ 766 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 767 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 768 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 769 770 static kmutex_t sd_sense_mutex = {0}; 771 772 /* 773 * Macros for updates of the driver state 774 */ 775 #define New_state(un, s) \ 776 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 777 #define Restore_state(un) \ 778 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 779 780 static struct sd_cdbinfo sd_cdbtab[] = { 781 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 782 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 783 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 784 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 785 }; 786 787 /* 788 * Specifies the number of seconds that must have elapsed since the last 789 * cmd. has completed for a device to be declared idle to the PM framework. 790 */ 791 static int sd_pm_idletime = 1; 792 793 /* 794 * Internal function prototypes 795 */ 796 797 #if (defined(__fibre)) 798 /* 799 * These #defines are to avoid namespace collisions that occur because this 800 * code is currently used to compile two separate driver modules: sd and ssd. 801 * All function names need to be treated this way (even if declared static) 802 * in order to allow the debugger to resolve the names properly. 803 * It is anticipated that in the near future the ssd module will be obsoleted, 804 * at which time this ugliness should go away. 805 */ 806 #define sd_log_trace ssd_log_trace 807 #define sd_log_info ssd_log_info 808 #define sd_log_err ssd_log_err 809 #define sdprobe ssdprobe 810 #define sdinfo ssdinfo 811 #define sd_prop_op ssd_prop_op 812 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 813 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 814 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 815 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 816 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 817 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 818 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 819 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 820 #define sd_spin_up_unit ssd_spin_up_unit 821 #define sd_enable_descr_sense ssd_enable_descr_sense 822 #define sd_reenable_dsense_task ssd_reenable_dsense_task 823 #define sd_set_mmc_caps ssd_set_mmc_caps 824 #define sd_read_unit_properties ssd_read_unit_properties 825 #define sd_process_sdconf_file ssd_process_sdconf_file 826 #define sd_process_sdconf_table ssd_process_sdconf_table 827 #define sd_sdconf_id_match ssd_sdconf_id_match 828 #define sd_blank_cmp ssd_blank_cmp 829 #define sd_chk_vers1_data ssd_chk_vers1_data 830 #define sd_set_vers1_properties ssd_set_vers1_properties 831 832 #define sd_get_physical_geometry ssd_get_physical_geometry 833 #define sd_get_virtual_geometry ssd_get_virtual_geometry 834 #define sd_update_block_info ssd_update_block_info 835 #define sd_register_devid ssd_register_devid 836 #define sd_get_devid ssd_get_devid 837 #define sd_create_devid ssd_create_devid 838 #define sd_write_deviceid ssd_write_deviceid 839 #define sd_check_vpd_page_support ssd_check_vpd_page_support 840 #define sd_setup_pm ssd_setup_pm 841 #define sd_create_pm_components ssd_create_pm_components 842 #define sd_ddi_suspend ssd_ddi_suspend 843 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 844 #define sd_ddi_resume ssd_ddi_resume 845 #define sd_ddi_pm_resume ssd_ddi_pm_resume 846 #define sdpower ssdpower 847 #define sdattach ssdattach 848 #define sddetach ssddetach 849 #define sd_unit_attach ssd_unit_attach 850 #define sd_unit_detach ssd_unit_detach 851 #define sd_set_unit_attributes ssd_set_unit_attributes 852 #define sd_create_errstats ssd_create_errstats 853 #define sd_set_errstats ssd_set_errstats 854 #define sd_set_pstats ssd_set_pstats 855 #define sddump ssddump 856 #define sd_scsi_poll ssd_scsi_poll 857 #define sd_send_polled_RQS ssd_send_polled_RQS 858 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 859 #define sd_init_event_callbacks ssd_init_event_callbacks 860 #define sd_event_callback ssd_event_callback 861 #define sd_cache_control ssd_cache_control 862 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 863 #define sd_make_device ssd_make_device 864 #define sdopen ssdopen 865 #define sdclose ssdclose 866 #define sd_ready_and_valid ssd_ready_and_valid 867 #define sdmin ssdmin 868 #define sdread ssdread 869 #define sdwrite ssdwrite 870 #define sdaread ssdaread 871 #define sdawrite ssdawrite 872 #define sdstrategy ssdstrategy 873 #define sdioctl ssdioctl 874 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 875 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 876 #define sd_checksum_iostart ssd_checksum_iostart 877 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 878 #define sd_pm_iostart ssd_pm_iostart 879 #define sd_core_iostart ssd_core_iostart 880 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 881 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 882 #define sd_checksum_iodone ssd_checksum_iodone 883 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 884 #define sd_pm_iodone ssd_pm_iodone 885 #define sd_initpkt_for_buf ssd_initpkt_for_buf 886 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 887 #define sd_setup_rw_pkt ssd_setup_rw_pkt 888 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 889 #define sd_buf_iodone ssd_buf_iodone 890 #define sd_uscsi_strategy ssd_uscsi_strategy 891 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 892 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 893 #define sd_uscsi_iodone ssd_uscsi_iodone 894 #define sd_xbuf_strategy ssd_xbuf_strategy 895 #define sd_xbuf_init ssd_xbuf_init 896 #define sd_pm_entry ssd_pm_entry 897 #define sd_pm_exit ssd_pm_exit 898 899 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 900 #define sd_pm_timeout_handler ssd_pm_timeout_handler 901 902 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 903 #define sdintr ssdintr 904 #define sd_start_cmds ssd_start_cmds 905 #define sd_send_scsi_cmd ssd_send_scsi_cmd 906 #define sd_bioclone_alloc ssd_bioclone_alloc 907 #define sd_bioclone_free ssd_bioclone_free 908 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 909 #define sd_shadow_buf_free ssd_shadow_buf_free 910 #define sd_print_transport_rejected_message \ 911 ssd_print_transport_rejected_message 912 #define sd_retry_command ssd_retry_command 913 #define sd_set_retry_bp ssd_set_retry_bp 914 #define sd_send_request_sense_command ssd_send_request_sense_command 915 #define sd_start_retry_command ssd_start_retry_command 916 #define sd_start_direct_priority_command \ 917 ssd_start_direct_priority_command 918 #define sd_return_failed_command ssd_return_failed_command 919 #define sd_return_failed_command_no_restart \ 920 ssd_return_failed_command_no_restart 921 #define sd_return_command ssd_return_command 922 #define sd_sync_with_callback ssd_sync_with_callback 923 #define sdrunout ssdrunout 924 #define sd_mark_rqs_busy ssd_mark_rqs_busy 925 #define sd_mark_rqs_idle ssd_mark_rqs_idle 926 #define sd_reduce_throttle ssd_reduce_throttle 927 #define sd_restore_throttle ssd_restore_throttle 928 #define sd_print_incomplete_msg ssd_print_incomplete_msg 929 #define sd_init_cdb_limits ssd_init_cdb_limits 930 #define sd_pkt_status_good ssd_pkt_status_good 931 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 932 #define sd_pkt_status_busy ssd_pkt_status_busy 933 #define sd_pkt_status_reservation_conflict \ 934 ssd_pkt_status_reservation_conflict 935 #define sd_pkt_status_qfull ssd_pkt_status_qfull 936 #define sd_handle_request_sense ssd_handle_request_sense 937 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 938 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 939 #define sd_validate_sense_data ssd_validate_sense_data 940 #define sd_decode_sense ssd_decode_sense 941 #define sd_print_sense_msg ssd_print_sense_msg 942 #define sd_sense_key_no_sense ssd_sense_key_no_sense 943 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 944 #define sd_sense_key_not_ready ssd_sense_key_not_ready 945 #define sd_sense_key_medium_or_hardware_error \ 946 ssd_sense_key_medium_or_hardware_error 947 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 948 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 949 #define sd_sense_key_fail_command ssd_sense_key_fail_command 950 #define sd_sense_key_blank_check ssd_sense_key_blank_check 951 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 952 #define sd_sense_key_default ssd_sense_key_default 953 #define sd_print_retry_msg ssd_print_retry_msg 954 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 955 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 956 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 957 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 958 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 959 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 960 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 961 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 962 #define sd_pkt_reason_default ssd_pkt_reason_default 963 #define sd_reset_target ssd_reset_target 964 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 965 #define sd_start_stop_unit_task ssd_start_stop_unit_task 966 #define sd_taskq_create ssd_taskq_create 967 #define sd_taskq_delete ssd_taskq_delete 968 #define sd_media_change_task ssd_media_change_task 969 #define sd_handle_mchange ssd_handle_mchange 970 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 971 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 972 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 973 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 974 #define sd_send_scsi_feature_GET_CONFIGURATION \ 975 sd_send_scsi_feature_GET_CONFIGURATION 976 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 977 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 978 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 979 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 980 ssd_send_scsi_PERSISTENT_RESERVE_IN 981 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 982 ssd_send_scsi_PERSISTENT_RESERVE_OUT 983 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 984 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 985 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 986 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 987 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 988 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 989 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 990 #define sd_alloc_rqs ssd_alloc_rqs 991 #define sd_free_rqs ssd_free_rqs 992 #define sd_dump_memory ssd_dump_memory 993 #define sd_get_media_info ssd_get_media_info 994 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 995 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 996 #define sd_setup_next_xfer ssd_setup_next_xfer 997 #define sd_dkio_get_temp ssd_dkio_get_temp 998 #define sd_check_mhd ssd_check_mhd 999 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1000 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1001 #define sd_sname ssd_sname 1002 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1003 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1004 #define sd_take_ownership ssd_take_ownership 1005 #define sd_reserve_release ssd_reserve_release 1006 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1007 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1008 #define sd_persistent_reservation_in_read_keys \ 1009 ssd_persistent_reservation_in_read_keys 1010 #define sd_persistent_reservation_in_read_resv \ 1011 ssd_persistent_reservation_in_read_resv 1012 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1013 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1014 #define sd_mhdioc_release ssd_mhdioc_release 1015 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1016 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1017 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1018 #define sr_change_blkmode ssr_change_blkmode 1019 #define sr_change_speed ssr_change_speed 1020 #define sr_atapi_change_speed ssr_atapi_change_speed 1021 #define sr_pause_resume ssr_pause_resume 1022 #define sr_play_msf ssr_play_msf 1023 #define sr_play_trkind ssr_play_trkind 1024 #define sr_read_all_subcodes ssr_read_all_subcodes 1025 #define sr_read_subchannel ssr_read_subchannel 1026 #define sr_read_tocentry ssr_read_tocentry 1027 #define sr_read_tochdr ssr_read_tochdr 1028 #define sr_read_cdda ssr_read_cdda 1029 #define sr_read_cdxa ssr_read_cdxa 1030 #define sr_read_mode1 ssr_read_mode1 1031 #define sr_read_mode2 ssr_read_mode2 1032 #define sr_read_cd_mode2 ssr_read_cd_mode2 1033 #define sr_sector_mode ssr_sector_mode 1034 #define sr_eject ssr_eject 1035 #define sr_ejected ssr_ejected 1036 #define sr_check_wp ssr_check_wp 1037 #define sd_check_media ssd_check_media 1038 #define sd_media_watch_cb ssd_media_watch_cb 1039 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1040 #define sr_volume_ctrl ssr_volume_ctrl 1041 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1042 #define sd_log_page_supported ssd_log_page_supported 1043 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1044 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1045 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1046 #define sd_range_lock ssd_range_lock 1047 #define sd_get_range ssd_get_range 1048 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1049 #define sd_range_unlock ssd_range_unlock 1050 #define sd_read_modify_write_task ssd_read_modify_write_task 1051 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1052 1053 #define sd_iostart_chain ssd_iostart_chain 1054 #define sd_iodone_chain ssd_iodone_chain 1055 #define sd_initpkt_map ssd_initpkt_map 1056 #define sd_destroypkt_map ssd_destroypkt_map 1057 #define sd_chain_type_map ssd_chain_type_map 1058 #define sd_chain_index_map ssd_chain_index_map 1059 1060 #define sd_failfast_flushctl ssd_failfast_flushctl 1061 #define sd_failfast_flushq ssd_failfast_flushq 1062 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1063 1064 #define sd_is_lsi ssd_is_lsi 1065 #define sd_tg_rdwr ssd_tg_rdwr 1066 #define sd_tg_getinfo ssd_tg_getinfo 1067 1068 #endif /* #if (defined(__fibre)) */ 1069 1070 1071 int _init(void); 1072 int _fini(void); 1073 int _info(struct modinfo *modinfop); 1074 1075 /*PRINTFLIKE3*/ 1076 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1077 /*PRINTFLIKE3*/ 1078 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1079 /*PRINTFLIKE3*/ 1080 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1081 1082 static int sdprobe(dev_info_t *devi); 1083 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1084 void **result); 1085 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1086 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1087 1088 /* 1089 * Smart probe for parallel scsi 1090 */ 1091 static void sd_scsi_probe_cache_init(void); 1092 static void sd_scsi_probe_cache_fini(void); 1093 static void sd_scsi_clear_probe_cache(void); 1094 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1095 1096 /* 1097 * Attached luns on target for parallel scsi 1098 */ 1099 static void sd_scsi_target_lun_init(void); 1100 static void sd_scsi_target_lun_fini(void); 1101 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1102 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1103 1104 static int sd_spin_up_unit(struct sd_lun *un); 1105 #ifdef _LP64 1106 static void sd_enable_descr_sense(struct sd_lun *un); 1107 static void sd_reenable_dsense_task(void *arg); 1108 #endif /* _LP64 */ 1109 1110 static void sd_set_mmc_caps(struct sd_lun *un); 1111 1112 static void sd_read_unit_properties(struct sd_lun *un); 1113 static int sd_process_sdconf_file(struct sd_lun *un); 1114 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1115 int *data_list, sd_tunables *values); 1116 static void sd_process_sdconf_table(struct sd_lun *un); 1117 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1118 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1119 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1120 int list_len, char *dataname_ptr); 1121 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1122 sd_tunables *prop_list); 1123 1124 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1125 int reservation_flag); 1126 static int sd_get_devid(struct sd_lun *un); 1127 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1128 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1129 static int sd_write_deviceid(struct sd_lun *un); 1130 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1131 static int sd_check_vpd_page_support(struct sd_lun *un); 1132 1133 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1134 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1135 1136 static int sd_ddi_suspend(dev_info_t *devi); 1137 static int sd_ddi_pm_suspend(struct sd_lun *un); 1138 static int sd_ddi_resume(dev_info_t *devi); 1139 static int sd_ddi_pm_resume(struct sd_lun *un); 1140 static int sdpower(dev_info_t *devi, int component, int level); 1141 1142 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1143 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1144 static int sd_unit_attach(dev_info_t *devi); 1145 static int sd_unit_detach(dev_info_t *devi); 1146 1147 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1148 static void sd_create_errstats(struct sd_lun *un, int instance); 1149 static void sd_set_errstats(struct sd_lun *un); 1150 static void sd_set_pstats(struct sd_lun *un); 1151 1152 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1153 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1154 static int sd_send_polled_RQS(struct sd_lun *un); 1155 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1156 1157 #if (defined(__fibre)) 1158 /* 1159 * Event callbacks (photon) 1160 */ 1161 static void sd_init_event_callbacks(struct sd_lun *un); 1162 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1163 #endif 1164 1165 /* 1166 * Defines for sd_cache_control 1167 */ 1168 1169 #define SD_CACHE_ENABLE 1 1170 #define SD_CACHE_DISABLE 0 1171 #define SD_CACHE_NOCHANGE -1 1172 1173 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1174 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1175 static dev_t sd_make_device(dev_info_t *devi); 1176 1177 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1178 uint64_t capacity); 1179 1180 /* 1181 * Driver entry point functions. 1182 */ 1183 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1184 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1185 static int sd_ready_and_valid(struct sd_lun *un); 1186 1187 static void sdmin(struct buf *bp); 1188 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1189 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1190 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1191 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1192 1193 static int sdstrategy(struct buf *bp); 1194 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1195 1196 /* 1197 * Function prototypes for layering functions in the iostart chain. 1198 */ 1199 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1200 struct buf *bp); 1201 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1202 struct buf *bp); 1203 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1204 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1205 struct buf *bp); 1206 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1208 1209 /* 1210 * Function prototypes for layering functions in the iodone chain. 1211 */ 1212 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1214 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1215 struct buf *bp); 1216 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1217 struct buf *bp); 1218 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1219 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1220 struct buf *bp); 1221 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1222 1223 /* 1224 * Prototypes for functions to support buf(9S) based IO. 1225 */ 1226 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1227 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1228 static void sd_destroypkt_for_buf(struct buf *); 1229 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1230 struct buf *bp, int flags, 1231 int (*callback)(caddr_t), caddr_t callback_arg, 1232 diskaddr_t lba, uint32_t blockcount); 1233 #if defined(__i386) || defined(__amd64) 1234 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1235 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1236 #endif /* defined(__i386) || defined(__amd64) */ 1237 1238 /* 1239 * Prototypes for functions to support USCSI IO. 1240 */ 1241 static int sd_uscsi_strategy(struct buf *bp); 1242 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1243 static void sd_destroypkt_for_uscsi(struct buf *); 1244 1245 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1246 uchar_t chain_type, void *pktinfop); 1247 1248 static int sd_pm_entry(struct sd_lun *un); 1249 static void sd_pm_exit(struct sd_lun *un); 1250 1251 static void sd_pm_idletimeout_handler(void *arg); 1252 1253 /* 1254 * sd_core internal functions (used at the sd_core_io layer). 1255 */ 1256 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1257 static void sdintr(struct scsi_pkt *pktp); 1258 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1259 1260 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1261 enum uio_seg dataspace, int path_flag); 1262 1263 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1264 daddr_t blkno, int (*func)(struct buf *)); 1265 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1266 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1267 static void sd_bioclone_free(struct buf *bp); 1268 static void sd_shadow_buf_free(struct buf *bp); 1269 1270 static void sd_print_transport_rejected_message(struct sd_lun *un, 1271 struct sd_xbuf *xp, int code); 1272 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1273 void *arg, int code); 1274 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1275 void *arg, int code); 1276 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1277 void *arg, int code); 1278 1279 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1280 int retry_check_flag, 1281 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1282 int c), 1283 void *user_arg, int failure_code, clock_t retry_delay, 1284 void (*statp)(kstat_io_t *)); 1285 1286 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1287 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1288 1289 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1290 struct scsi_pkt *pktp); 1291 static void sd_start_retry_command(void *arg); 1292 static void sd_start_direct_priority_command(void *arg); 1293 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1294 int errcode); 1295 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1296 struct buf *bp, int errcode); 1297 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1298 static void sd_sync_with_callback(struct sd_lun *un); 1299 static int sdrunout(caddr_t arg); 1300 1301 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1302 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1303 1304 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1305 static void sd_restore_throttle(void *arg); 1306 1307 static void sd_init_cdb_limits(struct sd_lun *un); 1308 1309 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1310 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1311 1312 /* 1313 * Error handling functions 1314 */ 1315 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1318 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1320 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 1324 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1325 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1326 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp); 1330 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 1333 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1334 void *arg, int code); 1335 1336 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1339 uint8_t *sense_datap, 1340 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1341 static void sd_sense_key_not_ready(struct sd_lun *un, 1342 uint8_t *sense_datap, 1343 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1345 uint8_t *sense_datap, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1348 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1349 static void sd_sense_key_unit_attention(struct sd_lun *un, 1350 uint8_t *sense_datap, 1351 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1352 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1353 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1354 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1355 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1357 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_default(struct sd_lun *un, 1359 uint8_t *sense_datap, 1360 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1361 1362 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1363 void *arg, int flag); 1364 1365 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1366 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1368 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1369 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1370 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 1382 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1383 1384 static void sd_start_stop_unit_callback(void *arg); 1385 static void sd_start_stop_unit_task(void *arg); 1386 1387 static void sd_taskq_create(void); 1388 static void sd_taskq_delete(void); 1389 static void sd_media_change_task(void *arg); 1390 1391 static int sd_handle_mchange(struct sd_lun *un); 1392 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1393 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1394 uint32_t *lbap, int path_flag); 1395 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1396 uint32_t *lbap, int path_flag); 1397 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1398 int path_flag); 1399 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1400 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1401 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1402 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1403 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1404 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1405 uchar_t usr_cmd, uchar_t *usr_bufp); 1406 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1407 struct dk_callback *dkc); 1408 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1409 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1410 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1411 uchar_t *bufaddr, uint_t buflen, int path_flag); 1412 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1413 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1414 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1415 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1416 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1417 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1418 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1419 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1420 size_t buflen, daddr_t start_block, int path_flag); 1421 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1422 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1423 path_flag) 1424 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1425 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1426 path_flag) 1427 1428 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1429 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1430 uint16_t param_ptr, int path_flag); 1431 1432 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1433 static void sd_free_rqs(struct sd_lun *un); 1434 1435 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1436 uchar_t *data, int len, int fmt); 1437 static void sd_panic_for_res_conflict(struct sd_lun *un); 1438 1439 /* 1440 * Disk Ioctl Function Prototypes 1441 */ 1442 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1444 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1445 1446 /* 1447 * Multi-host Ioctl Prototypes 1448 */ 1449 static int sd_check_mhd(dev_t dev, int interval); 1450 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1451 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1452 static char *sd_sname(uchar_t status); 1453 static void sd_mhd_resvd_recover(void *arg); 1454 static void sd_resv_reclaim_thread(); 1455 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1456 static int sd_reserve_release(dev_t dev, int cmd); 1457 static void sd_rmv_resv_reclaim_req(dev_t dev); 1458 static void sd_mhd_reset_notify_cb(caddr_t arg); 1459 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1460 mhioc_inkeys_t *usrp, int flag); 1461 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1462 mhioc_inresvs_t *usrp, int flag); 1463 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1464 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1465 static int sd_mhdioc_release(dev_t dev); 1466 static int sd_mhdioc_register_devid(dev_t dev); 1467 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1468 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1469 1470 /* 1471 * SCSI removable prototypes 1472 */ 1473 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1474 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1476 static int sr_pause_resume(dev_t dev, int mode); 1477 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1478 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1488 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1489 static int sr_eject(dev_t dev); 1490 static void sr_ejected(register struct sd_lun *un); 1491 static int sr_check_wp(dev_t dev); 1492 static int sd_check_media(dev_t dev, enum dkio_state state); 1493 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1494 static void sd_delayed_cv_broadcast(void *arg); 1495 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1496 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1497 1498 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1499 1500 /* 1501 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1502 */ 1503 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1504 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1505 static void sd_wm_cache_destructor(void *wm, void *un); 1506 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1507 daddr_t endb, ushort_t typ); 1508 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1509 daddr_t endb); 1510 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1511 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1512 static void sd_read_modify_write_task(void * arg); 1513 static int 1514 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1515 struct buf **bpp); 1516 1517 1518 /* 1519 * Function prototypes for failfast support. 1520 */ 1521 static void sd_failfast_flushq(struct sd_lun *un); 1522 static int sd_failfast_flushq_callback(struct buf *bp); 1523 1524 /* 1525 * Function prototypes to check for lsi devices 1526 */ 1527 static void sd_is_lsi(struct sd_lun *un); 1528 1529 /* 1530 * Function prototypes for x86 support 1531 */ 1532 #if defined(__i386) || defined(__amd64) 1533 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1534 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1535 #endif 1536 1537 1538 /* Function prototypes for cmlb */ 1539 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1540 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1541 1542 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1543 1544 /* 1545 * Constants for failfast support: 1546 * 1547 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1548 * failfast processing being performed. 1549 * 1550 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1551 * failfast processing on all bufs with B_FAILFAST set. 1552 */ 1553 1554 #define SD_FAILFAST_INACTIVE 0 1555 #define SD_FAILFAST_ACTIVE 1 1556 1557 /* 1558 * Bitmask to control behavior of buf(9S) flushes when a transition to 1559 * the failfast state occurs. Optional bits include: 1560 * 1561 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1562 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1563 * be flushed. 1564 * 1565 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1566 * driver, in addition to the regular wait queue. This includes the xbuf 1567 * queues. When clear, only the driver's wait queue will be flushed. 1568 */ 1569 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1570 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1571 1572 /* 1573 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1574 * to flush all queues within the driver. 1575 */ 1576 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1577 1578 1579 /* 1580 * SD Testing Fault Injection 1581 */ 1582 #ifdef SD_FAULT_INJECTION 1583 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1584 static void sd_faultinjection(struct scsi_pkt *pktp); 1585 static void sd_injection_log(char *buf, struct sd_lun *un); 1586 #endif 1587 1588 /* 1589 * Device driver ops vector 1590 */ 1591 static struct cb_ops sd_cb_ops = { 1592 sdopen, /* open */ 1593 sdclose, /* close */ 1594 sdstrategy, /* strategy */ 1595 nodev, /* print */ 1596 sddump, /* dump */ 1597 sdread, /* read */ 1598 sdwrite, /* write */ 1599 sdioctl, /* ioctl */ 1600 nodev, /* devmap */ 1601 nodev, /* mmap */ 1602 nodev, /* segmap */ 1603 nochpoll, /* poll */ 1604 sd_prop_op, /* cb_prop_op */ 1605 0, /* streamtab */ 1606 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1607 CB_REV, /* cb_rev */ 1608 sdaread, /* async I/O read entry point */ 1609 sdawrite /* async I/O write entry point */ 1610 }; 1611 1612 static struct dev_ops sd_ops = { 1613 DEVO_REV, /* devo_rev, */ 1614 0, /* refcnt */ 1615 sdinfo, /* info */ 1616 nulldev, /* identify */ 1617 sdprobe, /* probe */ 1618 sdattach, /* attach */ 1619 sddetach, /* detach */ 1620 nodev, /* reset */ 1621 &sd_cb_ops, /* driver operations */ 1622 NULL, /* bus operations */ 1623 sdpower /* power */ 1624 }; 1625 1626 1627 /* 1628 * This is the loadable module wrapper. 1629 */ 1630 #include <sys/modctl.h> 1631 1632 static struct modldrv modldrv = { 1633 &mod_driverops, /* Type of module. This one is a driver */ 1634 SD_MODULE_NAME, /* Module name. */ 1635 &sd_ops /* driver ops */ 1636 }; 1637 1638 1639 static struct modlinkage modlinkage = { 1640 MODREV_1, 1641 &modldrv, 1642 NULL 1643 }; 1644 1645 static cmlb_tg_ops_t sd_tgops = { 1646 TG_DK_OPS_VERSION_1, 1647 sd_tg_rdwr, 1648 sd_tg_getinfo 1649 }; 1650 1651 static struct scsi_asq_key_strings sd_additional_codes[] = { 1652 0x81, 0, "Logical Unit is Reserved", 1653 0x85, 0, "Audio Address Not Valid", 1654 0xb6, 0, "Media Load Mechanism Failed", 1655 0xB9, 0, "Audio Play Operation Aborted", 1656 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1657 0x53, 2, "Medium removal prevented", 1658 0x6f, 0, "Authentication failed during key exchange", 1659 0x6f, 1, "Key not present", 1660 0x6f, 2, "Key not established", 1661 0x6f, 3, "Read without proper authentication", 1662 0x6f, 4, "Mismatched region to this logical unit", 1663 0x6f, 5, "Region reset count error", 1664 0xffff, 0x0, NULL 1665 }; 1666 1667 1668 /* 1669 * Struct for passing printing information for sense data messages 1670 */ 1671 struct sd_sense_info { 1672 int ssi_severity; 1673 int ssi_pfa_flag; 1674 }; 1675 1676 /* 1677 * Table of function pointers for iostart-side routines. Separate "chains" 1678 * of layered function calls are formed by placing the function pointers 1679 * sequentially in the desired order. Functions are called according to an 1680 * incrementing table index ordering. The last function in each chain must 1681 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1682 * in the sd_iodone_chain[] array. 1683 * 1684 * Note: It may seem more natural to organize both the iostart and iodone 1685 * functions together, into an array of structures (or some similar 1686 * organization) with a common index, rather than two separate arrays which 1687 * must be maintained in synchronization. The purpose of this division is 1688 * to achieve improved performance: individual arrays allows for more 1689 * effective cache line utilization on certain platforms. 1690 */ 1691 1692 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1693 1694 1695 static sd_chain_t sd_iostart_chain[] = { 1696 1697 /* Chain for buf IO for disk drive targets (PM enabled) */ 1698 sd_mapblockaddr_iostart, /* Index: 0 */ 1699 sd_pm_iostart, /* Index: 1 */ 1700 sd_core_iostart, /* Index: 2 */ 1701 1702 /* Chain for buf IO for disk drive targets (PM disabled) */ 1703 sd_mapblockaddr_iostart, /* Index: 3 */ 1704 sd_core_iostart, /* Index: 4 */ 1705 1706 /* Chain for buf IO for removable-media targets (PM enabled) */ 1707 sd_mapblockaddr_iostart, /* Index: 5 */ 1708 sd_mapblocksize_iostart, /* Index: 6 */ 1709 sd_pm_iostart, /* Index: 7 */ 1710 sd_core_iostart, /* Index: 8 */ 1711 1712 /* Chain for buf IO for removable-media targets (PM disabled) */ 1713 sd_mapblockaddr_iostart, /* Index: 9 */ 1714 sd_mapblocksize_iostart, /* Index: 10 */ 1715 sd_core_iostart, /* Index: 11 */ 1716 1717 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1718 sd_mapblockaddr_iostart, /* Index: 12 */ 1719 sd_checksum_iostart, /* Index: 13 */ 1720 sd_pm_iostart, /* Index: 14 */ 1721 sd_core_iostart, /* Index: 15 */ 1722 1723 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1724 sd_mapblockaddr_iostart, /* Index: 16 */ 1725 sd_checksum_iostart, /* Index: 17 */ 1726 sd_core_iostart, /* Index: 18 */ 1727 1728 /* Chain for USCSI commands (all targets) */ 1729 sd_pm_iostart, /* Index: 19 */ 1730 sd_core_iostart, /* Index: 20 */ 1731 1732 /* Chain for checksumming USCSI commands (all targets) */ 1733 sd_checksum_uscsi_iostart, /* Index: 21 */ 1734 sd_pm_iostart, /* Index: 22 */ 1735 sd_core_iostart, /* Index: 23 */ 1736 1737 /* Chain for "direct" USCSI commands (all targets) */ 1738 sd_core_iostart, /* Index: 24 */ 1739 1740 /* Chain for "direct priority" USCSI commands (all targets) */ 1741 sd_core_iostart, /* Index: 25 */ 1742 }; 1743 1744 /* 1745 * Macros to locate the first function of each iostart chain in the 1746 * sd_iostart_chain[] array. These are located by the index in the array. 1747 */ 1748 #define SD_CHAIN_DISK_IOSTART 0 1749 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1750 #define SD_CHAIN_RMMEDIA_IOSTART 5 1751 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1752 #define SD_CHAIN_CHKSUM_IOSTART 12 1753 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1754 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1755 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1756 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1757 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1758 1759 1760 /* 1761 * Table of function pointers for the iodone-side routines for the driver- 1762 * internal layering mechanism. The calling sequence for iodone routines 1763 * uses a decrementing table index, so the last routine called in a chain 1764 * must be at the lowest array index location for that chain. The last 1765 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1766 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1767 * of the functions in an iodone side chain must correspond to the ordering 1768 * of the iostart routines for that chain. Note that there is no iodone 1769 * side routine that corresponds to sd_core_iostart(), so there is no 1770 * entry in the table for this. 1771 */ 1772 1773 static sd_chain_t sd_iodone_chain[] = { 1774 1775 /* Chain for buf IO for disk drive targets (PM enabled) */ 1776 sd_buf_iodone, /* Index: 0 */ 1777 sd_mapblockaddr_iodone, /* Index: 1 */ 1778 sd_pm_iodone, /* Index: 2 */ 1779 1780 /* Chain for buf IO for disk drive targets (PM disabled) */ 1781 sd_buf_iodone, /* Index: 3 */ 1782 sd_mapblockaddr_iodone, /* Index: 4 */ 1783 1784 /* Chain for buf IO for removable-media targets (PM enabled) */ 1785 sd_buf_iodone, /* Index: 5 */ 1786 sd_mapblockaddr_iodone, /* Index: 6 */ 1787 sd_mapblocksize_iodone, /* Index: 7 */ 1788 sd_pm_iodone, /* Index: 8 */ 1789 1790 /* Chain for buf IO for removable-media targets (PM disabled) */ 1791 sd_buf_iodone, /* Index: 9 */ 1792 sd_mapblockaddr_iodone, /* Index: 10 */ 1793 sd_mapblocksize_iodone, /* Index: 11 */ 1794 1795 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1796 sd_buf_iodone, /* Index: 12 */ 1797 sd_mapblockaddr_iodone, /* Index: 13 */ 1798 sd_checksum_iodone, /* Index: 14 */ 1799 sd_pm_iodone, /* Index: 15 */ 1800 1801 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1802 sd_buf_iodone, /* Index: 16 */ 1803 sd_mapblockaddr_iodone, /* Index: 17 */ 1804 sd_checksum_iodone, /* Index: 18 */ 1805 1806 /* Chain for USCSI commands (non-checksum targets) */ 1807 sd_uscsi_iodone, /* Index: 19 */ 1808 sd_pm_iodone, /* Index: 20 */ 1809 1810 /* Chain for USCSI commands (checksum targets) */ 1811 sd_uscsi_iodone, /* Index: 21 */ 1812 sd_checksum_uscsi_iodone, /* Index: 22 */ 1813 sd_pm_iodone, /* Index: 22 */ 1814 1815 /* Chain for "direct" USCSI commands (all targets) */ 1816 sd_uscsi_iodone, /* Index: 24 */ 1817 1818 /* Chain for "direct priority" USCSI commands (all targets) */ 1819 sd_uscsi_iodone, /* Index: 25 */ 1820 }; 1821 1822 1823 /* 1824 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1825 * each iodone-side chain. These are located by the array index, but as the 1826 * iodone side functions are called in a decrementing-index order, the 1827 * highest index number in each chain must be specified (as these correspond 1828 * to the first function in the iodone chain that will be called by the core 1829 * at IO completion time). 1830 */ 1831 1832 #define SD_CHAIN_DISK_IODONE 2 1833 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1834 #define SD_CHAIN_RMMEDIA_IODONE 8 1835 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1836 #define SD_CHAIN_CHKSUM_IODONE 15 1837 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1838 #define SD_CHAIN_USCSI_CMD_IODONE 20 1839 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1840 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1841 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1842 1843 1844 1845 1846 /* 1847 * Array to map a layering chain index to the appropriate initpkt routine. 1848 * The redundant entries are present so that the index used for accessing 1849 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1850 * with this table as well. 1851 */ 1852 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1853 1854 static sd_initpkt_t sd_initpkt_map[] = { 1855 1856 /* Chain for buf IO for disk drive targets (PM enabled) */ 1857 sd_initpkt_for_buf, /* Index: 0 */ 1858 sd_initpkt_for_buf, /* Index: 1 */ 1859 sd_initpkt_for_buf, /* Index: 2 */ 1860 1861 /* Chain for buf IO for disk drive targets (PM disabled) */ 1862 sd_initpkt_for_buf, /* Index: 3 */ 1863 sd_initpkt_for_buf, /* Index: 4 */ 1864 1865 /* Chain for buf IO for removable-media targets (PM enabled) */ 1866 sd_initpkt_for_buf, /* Index: 5 */ 1867 sd_initpkt_for_buf, /* Index: 6 */ 1868 sd_initpkt_for_buf, /* Index: 7 */ 1869 sd_initpkt_for_buf, /* Index: 8 */ 1870 1871 /* Chain for buf IO for removable-media targets (PM disabled) */ 1872 sd_initpkt_for_buf, /* Index: 9 */ 1873 sd_initpkt_for_buf, /* Index: 10 */ 1874 sd_initpkt_for_buf, /* Index: 11 */ 1875 1876 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1877 sd_initpkt_for_buf, /* Index: 12 */ 1878 sd_initpkt_for_buf, /* Index: 13 */ 1879 sd_initpkt_for_buf, /* Index: 14 */ 1880 sd_initpkt_for_buf, /* Index: 15 */ 1881 1882 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1883 sd_initpkt_for_buf, /* Index: 16 */ 1884 sd_initpkt_for_buf, /* Index: 17 */ 1885 sd_initpkt_for_buf, /* Index: 18 */ 1886 1887 /* Chain for USCSI commands (non-checksum targets) */ 1888 sd_initpkt_for_uscsi, /* Index: 19 */ 1889 sd_initpkt_for_uscsi, /* Index: 20 */ 1890 1891 /* Chain for USCSI commands (checksum targets) */ 1892 sd_initpkt_for_uscsi, /* Index: 21 */ 1893 sd_initpkt_for_uscsi, /* Index: 22 */ 1894 sd_initpkt_for_uscsi, /* Index: 22 */ 1895 1896 /* Chain for "direct" USCSI commands (all targets) */ 1897 sd_initpkt_for_uscsi, /* Index: 24 */ 1898 1899 /* Chain for "direct priority" USCSI commands (all targets) */ 1900 sd_initpkt_for_uscsi, /* Index: 25 */ 1901 1902 }; 1903 1904 1905 /* 1906 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1907 * The redundant entries are present so that the index used for accessing 1908 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1909 * with this table as well. 1910 */ 1911 typedef void (*sd_destroypkt_t)(struct buf *); 1912 1913 static sd_destroypkt_t sd_destroypkt_map[] = { 1914 1915 /* Chain for buf IO for disk drive targets (PM enabled) */ 1916 sd_destroypkt_for_buf, /* Index: 0 */ 1917 sd_destroypkt_for_buf, /* Index: 1 */ 1918 sd_destroypkt_for_buf, /* Index: 2 */ 1919 1920 /* Chain for buf IO for disk drive targets (PM disabled) */ 1921 sd_destroypkt_for_buf, /* Index: 3 */ 1922 sd_destroypkt_for_buf, /* Index: 4 */ 1923 1924 /* Chain for buf IO for removable-media targets (PM enabled) */ 1925 sd_destroypkt_for_buf, /* Index: 5 */ 1926 sd_destroypkt_for_buf, /* Index: 6 */ 1927 sd_destroypkt_for_buf, /* Index: 7 */ 1928 sd_destroypkt_for_buf, /* Index: 8 */ 1929 1930 /* Chain for buf IO for removable-media targets (PM disabled) */ 1931 sd_destroypkt_for_buf, /* Index: 9 */ 1932 sd_destroypkt_for_buf, /* Index: 10 */ 1933 sd_destroypkt_for_buf, /* Index: 11 */ 1934 1935 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1936 sd_destroypkt_for_buf, /* Index: 12 */ 1937 sd_destroypkt_for_buf, /* Index: 13 */ 1938 sd_destroypkt_for_buf, /* Index: 14 */ 1939 sd_destroypkt_for_buf, /* Index: 15 */ 1940 1941 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1942 sd_destroypkt_for_buf, /* Index: 16 */ 1943 sd_destroypkt_for_buf, /* Index: 17 */ 1944 sd_destroypkt_for_buf, /* Index: 18 */ 1945 1946 /* Chain for USCSI commands (non-checksum targets) */ 1947 sd_destroypkt_for_uscsi, /* Index: 19 */ 1948 sd_destroypkt_for_uscsi, /* Index: 20 */ 1949 1950 /* Chain for USCSI commands (checksum targets) */ 1951 sd_destroypkt_for_uscsi, /* Index: 21 */ 1952 sd_destroypkt_for_uscsi, /* Index: 22 */ 1953 sd_destroypkt_for_uscsi, /* Index: 22 */ 1954 1955 /* Chain for "direct" USCSI commands (all targets) */ 1956 sd_destroypkt_for_uscsi, /* Index: 24 */ 1957 1958 /* Chain for "direct priority" USCSI commands (all targets) */ 1959 sd_destroypkt_for_uscsi, /* Index: 25 */ 1960 1961 }; 1962 1963 1964 1965 /* 1966 * Array to map a layering chain index to the appropriate chain "type". 1967 * The chain type indicates a specific property/usage of the chain. 1968 * The redundant entries are present so that the index used for accessing 1969 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1970 * with this table as well. 1971 */ 1972 1973 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1974 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1975 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1976 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1977 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1978 /* (for error recovery) */ 1979 1980 static int sd_chain_type_map[] = { 1981 1982 /* Chain for buf IO for disk drive targets (PM enabled) */ 1983 SD_CHAIN_BUFIO, /* Index: 0 */ 1984 SD_CHAIN_BUFIO, /* Index: 1 */ 1985 SD_CHAIN_BUFIO, /* Index: 2 */ 1986 1987 /* Chain for buf IO for disk drive targets (PM disabled) */ 1988 SD_CHAIN_BUFIO, /* Index: 3 */ 1989 SD_CHAIN_BUFIO, /* Index: 4 */ 1990 1991 /* Chain for buf IO for removable-media targets (PM enabled) */ 1992 SD_CHAIN_BUFIO, /* Index: 5 */ 1993 SD_CHAIN_BUFIO, /* Index: 6 */ 1994 SD_CHAIN_BUFIO, /* Index: 7 */ 1995 SD_CHAIN_BUFIO, /* Index: 8 */ 1996 1997 /* Chain for buf IO for removable-media targets (PM disabled) */ 1998 SD_CHAIN_BUFIO, /* Index: 9 */ 1999 SD_CHAIN_BUFIO, /* Index: 10 */ 2000 SD_CHAIN_BUFIO, /* Index: 11 */ 2001 2002 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2003 SD_CHAIN_BUFIO, /* Index: 12 */ 2004 SD_CHAIN_BUFIO, /* Index: 13 */ 2005 SD_CHAIN_BUFIO, /* Index: 14 */ 2006 SD_CHAIN_BUFIO, /* Index: 15 */ 2007 2008 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2009 SD_CHAIN_BUFIO, /* Index: 16 */ 2010 SD_CHAIN_BUFIO, /* Index: 17 */ 2011 SD_CHAIN_BUFIO, /* Index: 18 */ 2012 2013 /* Chain for USCSI commands (non-checksum targets) */ 2014 SD_CHAIN_USCSI, /* Index: 19 */ 2015 SD_CHAIN_USCSI, /* Index: 20 */ 2016 2017 /* Chain for USCSI commands (checksum targets) */ 2018 SD_CHAIN_USCSI, /* Index: 21 */ 2019 SD_CHAIN_USCSI, /* Index: 22 */ 2020 SD_CHAIN_USCSI, /* Index: 22 */ 2021 2022 /* Chain for "direct" USCSI commands (all targets) */ 2023 SD_CHAIN_DIRECT, /* Index: 24 */ 2024 2025 /* Chain for "direct priority" USCSI commands (all targets) */ 2026 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2027 }; 2028 2029 2030 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2031 #define SD_IS_BUFIO(xp) \ 2032 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2033 2034 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2035 #define SD_IS_DIRECT_PRIORITY(xp) \ 2036 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2037 2038 2039 2040 /* 2041 * Struct, array, and macros to map a specific chain to the appropriate 2042 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2043 * 2044 * The sd_chain_index_map[] array is used at attach time to set the various 2045 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2046 * chain to be used with the instance. This allows different instances to use 2047 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2048 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2049 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2050 * dynamically & without the use of locking; and (2) a layer may update the 2051 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2052 * to allow for deferred processing of an IO within the same chain from a 2053 * different execution context. 2054 */ 2055 2056 struct sd_chain_index { 2057 int sci_iostart_index; 2058 int sci_iodone_index; 2059 }; 2060 2061 static struct sd_chain_index sd_chain_index_map[] = { 2062 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2063 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2064 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2065 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2066 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2067 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2068 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2069 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2070 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2071 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2072 }; 2073 2074 2075 /* 2076 * The following are indexes into the sd_chain_index_map[] array. 2077 */ 2078 2079 /* un->un_buf_chain_type must be set to one of these */ 2080 #define SD_CHAIN_INFO_DISK 0 2081 #define SD_CHAIN_INFO_DISK_NO_PM 1 2082 #define SD_CHAIN_INFO_RMMEDIA 2 2083 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2084 #define SD_CHAIN_INFO_CHKSUM 4 2085 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2086 2087 /* un->un_uscsi_chain_type must be set to one of these */ 2088 #define SD_CHAIN_INFO_USCSI_CMD 6 2089 /* USCSI with PM disabled is the same as DIRECT */ 2090 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2091 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2092 2093 /* un->un_direct_chain_type must be set to one of these */ 2094 #define SD_CHAIN_INFO_DIRECT_CMD 8 2095 2096 /* un->un_priority_chain_type must be set to one of these */ 2097 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2098 2099 /* size for devid inquiries */ 2100 #define MAX_INQUIRY_SIZE 0xF0 2101 2102 /* 2103 * Macros used by functions to pass a given buf(9S) struct along to the 2104 * next function in the layering chain for further processing. 2105 * 2106 * In the following macros, passing more than three arguments to the called 2107 * routines causes the optimizer for the SPARC compiler to stop doing tail 2108 * call elimination which results in significant performance degradation. 2109 */ 2110 #define SD_BEGIN_IOSTART(index, un, bp) \ 2111 ((*(sd_iostart_chain[index]))(index, un, bp)) 2112 2113 #define SD_BEGIN_IODONE(index, un, bp) \ 2114 ((*(sd_iodone_chain[index]))(index, un, bp)) 2115 2116 #define SD_NEXT_IOSTART(index, un, bp) \ 2117 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2118 2119 #define SD_NEXT_IODONE(index, un, bp) \ 2120 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2121 2122 /* 2123 * Function: _init 2124 * 2125 * Description: This is the driver _init(9E) entry point. 2126 * 2127 * Return Code: Returns the value from mod_install(9F) or 2128 * ddi_soft_state_init(9F) as appropriate. 2129 * 2130 * Context: Called when driver module loaded. 2131 */ 2132 2133 int 2134 _init(void) 2135 { 2136 int err; 2137 2138 /* establish driver name from module name */ 2139 sd_label = mod_modname(&modlinkage); 2140 2141 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2142 SD_MAXUNIT); 2143 2144 if (err != 0) { 2145 return (err); 2146 } 2147 2148 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2149 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2150 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2151 2152 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2153 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2154 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2155 2156 /* 2157 * it's ok to init here even for fibre device 2158 */ 2159 sd_scsi_probe_cache_init(); 2160 2161 sd_scsi_target_lun_init(); 2162 2163 /* 2164 * Creating taskq before mod_install ensures that all callers (threads) 2165 * that enter the module after a successfull mod_install encounter 2166 * a valid taskq. 2167 */ 2168 sd_taskq_create(); 2169 2170 err = mod_install(&modlinkage); 2171 if (err != 0) { 2172 /* delete taskq if install fails */ 2173 sd_taskq_delete(); 2174 2175 mutex_destroy(&sd_detach_mutex); 2176 mutex_destroy(&sd_log_mutex); 2177 mutex_destroy(&sd_label_mutex); 2178 2179 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2180 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2181 cv_destroy(&sd_tr.srq_inprocess_cv); 2182 2183 sd_scsi_probe_cache_fini(); 2184 2185 sd_scsi_target_lun_fini(); 2186 2187 ddi_soft_state_fini(&sd_state); 2188 return (err); 2189 } 2190 2191 return (err); 2192 } 2193 2194 2195 /* 2196 * Function: _fini 2197 * 2198 * Description: This is the driver _fini(9E) entry point. 2199 * 2200 * Return Code: Returns the value from mod_remove(9F) 2201 * 2202 * Context: Called when driver module is unloaded. 2203 */ 2204 2205 int 2206 _fini(void) 2207 { 2208 int err; 2209 2210 if ((err = mod_remove(&modlinkage)) != 0) { 2211 return (err); 2212 } 2213 2214 sd_taskq_delete(); 2215 2216 mutex_destroy(&sd_detach_mutex); 2217 mutex_destroy(&sd_log_mutex); 2218 mutex_destroy(&sd_label_mutex); 2219 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2220 2221 sd_scsi_probe_cache_fini(); 2222 2223 sd_scsi_target_lun_fini(); 2224 2225 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2226 cv_destroy(&sd_tr.srq_inprocess_cv); 2227 2228 ddi_soft_state_fini(&sd_state); 2229 2230 return (err); 2231 } 2232 2233 2234 /* 2235 * Function: _info 2236 * 2237 * Description: This is the driver _info(9E) entry point. 2238 * 2239 * Arguments: modinfop - pointer to the driver modinfo structure 2240 * 2241 * Return Code: Returns the value from mod_info(9F). 2242 * 2243 * Context: Kernel thread context 2244 */ 2245 2246 int 2247 _info(struct modinfo *modinfop) 2248 { 2249 return (mod_info(&modlinkage, modinfop)); 2250 } 2251 2252 2253 /* 2254 * The following routines implement the driver message logging facility. 2255 * They provide component- and level- based debug output filtering. 2256 * Output may also be restricted to messages for a single instance by 2257 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2258 * to NULL, then messages for all instances are printed. 2259 * 2260 * These routines have been cloned from each other due to the language 2261 * constraints of macros and variable argument list processing. 2262 */ 2263 2264 2265 /* 2266 * Function: sd_log_err 2267 * 2268 * Description: This routine is called by the SD_ERROR macro for debug 2269 * logging of error conditions. 2270 * 2271 * Arguments: comp - driver component being logged 2272 * dev - pointer to driver info structure 2273 * fmt - error string and format to be logged 2274 */ 2275 2276 static void 2277 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2278 { 2279 va_list ap; 2280 dev_info_t *dev; 2281 2282 ASSERT(un != NULL); 2283 dev = SD_DEVINFO(un); 2284 ASSERT(dev != NULL); 2285 2286 /* 2287 * Filter messages based on the global component and level masks. 2288 * Also print if un matches the value of sd_debug_un, or if 2289 * sd_debug_un is set to NULL. 2290 */ 2291 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2292 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2293 mutex_enter(&sd_log_mutex); 2294 va_start(ap, fmt); 2295 (void) vsprintf(sd_log_buf, fmt, ap); 2296 va_end(ap); 2297 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2298 mutex_exit(&sd_log_mutex); 2299 } 2300 #ifdef SD_FAULT_INJECTION 2301 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2302 if (un->sd_injection_mask & comp) { 2303 mutex_enter(&sd_log_mutex); 2304 va_start(ap, fmt); 2305 (void) vsprintf(sd_log_buf, fmt, ap); 2306 va_end(ap); 2307 sd_injection_log(sd_log_buf, un); 2308 mutex_exit(&sd_log_mutex); 2309 } 2310 #endif 2311 } 2312 2313 2314 /* 2315 * Function: sd_log_info 2316 * 2317 * Description: This routine is called by the SD_INFO macro for debug 2318 * logging of general purpose informational conditions. 2319 * 2320 * Arguments: comp - driver component being logged 2321 * dev - pointer to driver info structure 2322 * fmt - info string and format to be logged 2323 */ 2324 2325 static void 2326 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2327 { 2328 va_list ap; 2329 dev_info_t *dev; 2330 2331 ASSERT(un != NULL); 2332 dev = SD_DEVINFO(un); 2333 ASSERT(dev != NULL); 2334 2335 /* 2336 * Filter messages based on the global component and level masks. 2337 * Also print if un matches the value of sd_debug_un, or if 2338 * sd_debug_un is set to NULL. 2339 */ 2340 if ((sd_component_mask & component) && 2341 (sd_level_mask & SD_LOGMASK_INFO) && 2342 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2343 mutex_enter(&sd_log_mutex); 2344 va_start(ap, fmt); 2345 (void) vsprintf(sd_log_buf, fmt, ap); 2346 va_end(ap); 2347 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2348 mutex_exit(&sd_log_mutex); 2349 } 2350 #ifdef SD_FAULT_INJECTION 2351 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2352 if (un->sd_injection_mask & component) { 2353 mutex_enter(&sd_log_mutex); 2354 va_start(ap, fmt); 2355 (void) vsprintf(sd_log_buf, fmt, ap); 2356 va_end(ap); 2357 sd_injection_log(sd_log_buf, un); 2358 mutex_exit(&sd_log_mutex); 2359 } 2360 #endif 2361 } 2362 2363 2364 /* 2365 * Function: sd_log_trace 2366 * 2367 * Description: This routine is called by the SD_TRACE macro for debug 2368 * logging of trace conditions (i.e. function entry/exit). 2369 * 2370 * Arguments: comp - driver component being logged 2371 * dev - pointer to driver info structure 2372 * fmt - trace string and format to be logged 2373 */ 2374 2375 static void 2376 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2377 { 2378 va_list ap; 2379 dev_info_t *dev; 2380 2381 ASSERT(un != NULL); 2382 dev = SD_DEVINFO(un); 2383 ASSERT(dev != NULL); 2384 2385 /* 2386 * Filter messages based on the global component and level masks. 2387 * Also print if un matches the value of sd_debug_un, or if 2388 * sd_debug_un is set to NULL. 2389 */ 2390 if ((sd_component_mask & component) && 2391 (sd_level_mask & SD_LOGMASK_TRACE) && 2392 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2393 mutex_enter(&sd_log_mutex); 2394 va_start(ap, fmt); 2395 (void) vsprintf(sd_log_buf, fmt, ap); 2396 va_end(ap); 2397 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2398 mutex_exit(&sd_log_mutex); 2399 } 2400 #ifdef SD_FAULT_INJECTION 2401 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2402 if (un->sd_injection_mask & component) { 2403 mutex_enter(&sd_log_mutex); 2404 va_start(ap, fmt); 2405 (void) vsprintf(sd_log_buf, fmt, ap); 2406 va_end(ap); 2407 sd_injection_log(sd_log_buf, un); 2408 mutex_exit(&sd_log_mutex); 2409 } 2410 #endif 2411 } 2412 2413 2414 /* 2415 * Function: sdprobe 2416 * 2417 * Description: This is the driver probe(9e) entry point function. 2418 * 2419 * Arguments: devi - opaque device info handle 2420 * 2421 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2422 * DDI_PROBE_FAILURE: If the probe failed. 2423 * DDI_PROBE_PARTIAL: If the instance is not present now, 2424 * but may be present in the future. 2425 */ 2426 2427 static int 2428 sdprobe(dev_info_t *devi) 2429 { 2430 struct scsi_device *devp; 2431 int rval; 2432 int instance; 2433 2434 /* 2435 * if it wasn't for pln, sdprobe could actually be nulldev 2436 * in the "__fibre" case. 2437 */ 2438 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2439 return (DDI_PROBE_DONTCARE); 2440 } 2441 2442 devp = ddi_get_driver_private(devi); 2443 2444 if (devp == NULL) { 2445 /* Ooops... nexus driver is mis-configured... */ 2446 return (DDI_PROBE_FAILURE); 2447 } 2448 2449 instance = ddi_get_instance(devi); 2450 2451 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2452 return (DDI_PROBE_PARTIAL); 2453 } 2454 2455 /* 2456 * Call the SCSA utility probe routine to see if we actually 2457 * have a target at this SCSI nexus. 2458 */ 2459 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2460 case SCSIPROBE_EXISTS: 2461 switch (devp->sd_inq->inq_dtype) { 2462 case DTYPE_DIRECT: 2463 rval = DDI_PROBE_SUCCESS; 2464 break; 2465 case DTYPE_RODIRECT: 2466 /* CDs etc. Can be removable media */ 2467 rval = DDI_PROBE_SUCCESS; 2468 break; 2469 case DTYPE_OPTICAL: 2470 /* 2471 * Rewritable optical driver HP115AA 2472 * Can also be removable media 2473 */ 2474 2475 /* 2476 * Do not attempt to bind to DTYPE_OPTICAL if 2477 * pre solaris 9 sparc sd behavior is required 2478 * 2479 * If first time through and sd_dtype_optical_bind 2480 * has not been set in /etc/system check properties 2481 */ 2482 2483 if (sd_dtype_optical_bind < 0) { 2484 sd_dtype_optical_bind = ddi_prop_get_int 2485 (DDI_DEV_T_ANY, devi, 0, 2486 "optical-device-bind", 1); 2487 } 2488 2489 if (sd_dtype_optical_bind == 0) { 2490 rval = DDI_PROBE_FAILURE; 2491 } else { 2492 rval = DDI_PROBE_SUCCESS; 2493 } 2494 break; 2495 2496 case DTYPE_NOTPRESENT: 2497 default: 2498 rval = DDI_PROBE_FAILURE; 2499 break; 2500 } 2501 break; 2502 default: 2503 rval = DDI_PROBE_PARTIAL; 2504 break; 2505 } 2506 2507 /* 2508 * This routine checks for resource allocation prior to freeing, 2509 * so it will take care of the "smart probing" case where a 2510 * scsi_probe() may or may not have been issued and will *not* 2511 * free previously-freed resources. 2512 */ 2513 scsi_unprobe(devp); 2514 return (rval); 2515 } 2516 2517 2518 /* 2519 * Function: sdinfo 2520 * 2521 * Description: This is the driver getinfo(9e) entry point function. 2522 * Given the device number, return the devinfo pointer from 2523 * the scsi_device structure or the instance number 2524 * associated with the dev_t. 2525 * 2526 * Arguments: dip - pointer to device info structure 2527 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2528 * DDI_INFO_DEVT2INSTANCE) 2529 * arg - driver dev_t 2530 * resultp - user buffer for request response 2531 * 2532 * Return Code: DDI_SUCCESS 2533 * DDI_FAILURE 2534 */ 2535 /* ARGSUSED */ 2536 static int 2537 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2538 { 2539 struct sd_lun *un; 2540 dev_t dev; 2541 int instance; 2542 int error; 2543 2544 switch (infocmd) { 2545 case DDI_INFO_DEVT2DEVINFO: 2546 dev = (dev_t)arg; 2547 instance = SDUNIT(dev); 2548 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2549 return (DDI_FAILURE); 2550 } 2551 *result = (void *) SD_DEVINFO(un); 2552 error = DDI_SUCCESS; 2553 break; 2554 case DDI_INFO_DEVT2INSTANCE: 2555 dev = (dev_t)arg; 2556 instance = SDUNIT(dev); 2557 *result = (void *)(uintptr_t)instance; 2558 error = DDI_SUCCESS; 2559 break; 2560 default: 2561 error = DDI_FAILURE; 2562 } 2563 return (error); 2564 } 2565 2566 /* 2567 * Function: sd_prop_op 2568 * 2569 * Description: This is the driver prop_op(9e) entry point function. 2570 * Return the number of blocks for the partition in question 2571 * or forward the request to the property facilities. 2572 * 2573 * Arguments: dev - device number 2574 * dip - pointer to device info structure 2575 * prop_op - property operator 2576 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2577 * name - pointer to property name 2578 * valuep - pointer or address of the user buffer 2579 * lengthp - property length 2580 * 2581 * Return Code: DDI_PROP_SUCCESS 2582 * DDI_PROP_NOT_FOUND 2583 * DDI_PROP_UNDEFINED 2584 * DDI_PROP_NO_MEMORY 2585 * DDI_PROP_BUF_TOO_SMALL 2586 */ 2587 2588 static int 2589 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2590 char *name, caddr_t valuep, int *lengthp) 2591 { 2592 int instance = ddi_get_instance(dip); 2593 struct sd_lun *un; 2594 uint64_t nblocks64; 2595 2596 /* 2597 * Our dynamic properties are all device specific and size oriented. 2598 * Requests issued under conditions where size is valid are passed 2599 * to ddi_prop_op_nblocks with the size information, otherwise the 2600 * request is passed to ddi_prop_op. Size depends on valid geometry. 2601 */ 2602 un = ddi_get_soft_state(sd_state, instance); 2603 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2604 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2605 name, valuep, lengthp)); 2606 } else if (!SD_IS_VALID_LABEL(un)) { 2607 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2608 valuep, lengthp)); 2609 } 2610 2611 /* get nblocks value */ 2612 ASSERT(!mutex_owned(SD_MUTEX(un))); 2613 2614 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2615 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2616 2617 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2618 name, valuep, lengthp, nblocks64)); 2619 } 2620 2621 /* 2622 * The following functions are for smart probing: 2623 * sd_scsi_probe_cache_init() 2624 * sd_scsi_probe_cache_fini() 2625 * sd_scsi_clear_probe_cache() 2626 * sd_scsi_probe_with_cache() 2627 */ 2628 2629 /* 2630 * Function: sd_scsi_probe_cache_init 2631 * 2632 * Description: Initializes the probe response cache mutex and head pointer. 2633 * 2634 * Context: Kernel thread context 2635 */ 2636 2637 static void 2638 sd_scsi_probe_cache_init(void) 2639 { 2640 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2641 sd_scsi_probe_cache_head = NULL; 2642 } 2643 2644 2645 /* 2646 * Function: sd_scsi_probe_cache_fini 2647 * 2648 * Description: Frees all resources associated with the probe response cache. 2649 * 2650 * Context: Kernel thread context 2651 */ 2652 2653 static void 2654 sd_scsi_probe_cache_fini(void) 2655 { 2656 struct sd_scsi_probe_cache *cp; 2657 struct sd_scsi_probe_cache *ncp; 2658 2659 /* Clean up our smart probing linked list */ 2660 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2661 ncp = cp->next; 2662 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2663 } 2664 sd_scsi_probe_cache_head = NULL; 2665 mutex_destroy(&sd_scsi_probe_cache_mutex); 2666 } 2667 2668 2669 /* 2670 * Function: sd_scsi_clear_probe_cache 2671 * 2672 * Description: This routine clears the probe response cache. This is 2673 * done when open() returns ENXIO so that when deferred 2674 * attach is attempted (possibly after a device has been 2675 * turned on) we will retry the probe. Since we don't know 2676 * which target we failed to open, we just clear the 2677 * entire cache. 2678 * 2679 * Context: Kernel thread context 2680 */ 2681 2682 static void 2683 sd_scsi_clear_probe_cache(void) 2684 { 2685 struct sd_scsi_probe_cache *cp; 2686 int i; 2687 2688 mutex_enter(&sd_scsi_probe_cache_mutex); 2689 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2690 /* 2691 * Reset all entries to SCSIPROBE_EXISTS. This will 2692 * force probing to be performed the next time 2693 * sd_scsi_probe_with_cache is called. 2694 */ 2695 for (i = 0; i < NTARGETS_WIDE; i++) { 2696 cp->cache[i] = SCSIPROBE_EXISTS; 2697 } 2698 } 2699 mutex_exit(&sd_scsi_probe_cache_mutex); 2700 } 2701 2702 2703 /* 2704 * Function: sd_scsi_probe_with_cache 2705 * 2706 * Description: This routine implements support for a scsi device probe 2707 * with cache. The driver maintains a cache of the target 2708 * responses to scsi probes. If we get no response from a 2709 * target during a probe inquiry, we remember that, and we 2710 * avoid additional calls to scsi_probe on non-zero LUNs 2711 * on the same target until the cache is cleared. By doing 2712 * so we avoid the 1/4 sec selection timeout for nonzero 2713 * LUNs. lun0 of a target is always probed. 2714 * 2715 * Arguments: devp - Pointer to a scsi_device(9S) structure 2716 * waitfunc - indicates what the allocator routines should 2717 * do when resources are not available. This value 2718 * is passed on to scsi_probe() when that routine 2719 * is called. 2720 * 2721 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2722 * otherwise the value returned by scsi_probe(9F). 2723 * 2724 * Context: Kernel thread context 2725 */ 2726 2727 static int 2728 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2729 { 2730 struct sd_scsi_probe_cache *cp; 2731 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2732 int lun, tgt; 2733 2734 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2735 SCSI_ADDR_PROP_LUN, 0); 2736 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2737 SCSI_ADDR_PROP_TARGET, -1); 2738 2739 /* Make sure caching enabled and target in range */ 2740 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2741 /* do it the old way (no cache) */ 2742 return (scsi_probe(devp, waitfn)); 2743 } 2744 2745 mutex_enter(&sd_scsi_probe_cache_mutex); 2746 2747 /* Find the cache for this scsi bus instance */ 2748 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2749 if (cp->pdip == pdip) { 2750 break; 2751 } 2752 } 2753 2754 /* If we can't find a cache for this pdip, create one */ 2755 if (cp == NULL) { 2756 int i; 2757 2758 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2759 KM_SLEEP); 2760 cp->pdip = pdip; 2761 cp->next = sd_scsi_probe_cache_head; 2762 sd_scsi_probe_cache_head = cp; 2763 for (i = 0; i < NTARGETS_WIDE; i++) { 2764 cp->cache[i] = SCSIPROBE_EXISTS; 2765 } 2766 } 2767 2768 mutex_exit(&sd_scsi_probe_cache_mutex); 2769 2770 /* Recompute the cache for this target if LUN zero */ 2771 if (lun == 0) { 2772 cp->cache[tgt] = SCSIPROBE_EXISTS; 2773 } 2774 2775 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2776 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2777 return (SCSIPROBE_NORESP); 2778 } 2779 2780 /* Do the actual probe; save & return the result */ 2781 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2782 } 2783 2784 2785 /* 2786 * Function: sd_scsi_target_lun_init 2787 * 2788 * Description: Initializes the attached lun chain mutex and head pointer. 2789 * 2790 * Context: Kernel thread context 2791 */ 2792 2793 static void 2794 sd_scsi_target_lun_init(void) 2795 { 2796 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2797 sd_scsi_target_lun_head = NULL; 2798 } 2799 2800 2801 /* 2802 * Function: sd_scsi_target_lun_fini 2803 * 2804 * Description: Frees all resources associated with the attached lun 2805 * chain 2806 * 2807 * Context: Kernel thread context 2808 */ 2809 2810 static void 2811 sd_scsi_target_lun_fini(void) 2812 { 2813 struct sd_scsi_hba_tgt_lun *cp; 2814 struct sd_scsi_hba_tgt_lun *ncp; 2815 2816 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2817 ncp = cp->next; 2818 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2819 } 2820 sd_scsi_target_lun_head = NULL; 2821 mutex_destroy(&sd_scsi_target_lun_mutex); 2822 } 2823 2824 2825 /* 2826 * Function: sd_scsi_get_target_lun_count 2827 * 2828 * Description: This routine will check in the attached lun chain to see 2829 * how many luns are attached on the required SCSI controller 2830 * and target. Currently, some capabilities like tagged queue 2831 * are supported per target based by HBA. So all luns in a 2832 * target have the same capabilities. Based on this assumption, 2833 * sd should only set these capabilities once per target. This 2834 * function is called when sd needs to decide how many luns 2835 * already attached on a target. 2836 * 2837 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2838 * controller device. 2839 * target - The target ID on the controller's SCSI bus. 2840 * 2841 * Return Code: The number of luns attached on the required target and 2842 * controller. 2843 * -1 if target ID is not in parallel SCSI scope or the given 2844 * dip is not in the chain. 2845 * 2846 * Context: Kernel thread context 2847 */ 2848 2849 static int 2850 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2851 { 2852 struct sd_scsi_hba_tgt_lun *cp; 2853 2854 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2855 return (-1); 2856 } 2857 2858 mutex_enter(&sd_scsi_target_lun_mutex); 2859 2860 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2861 if (cp->pdip == dip) { 2862 break; 2863 } 2864 } 2865 2866 mutex_exit(&sd_scsi_target_lun_mutex); 2867 2868 if (cp == NULL) { 2869 return (-1); 2870 } 2871 2872 return (cp->nlun[target]); 2873 } 2874 2875 2876 /* 2877 * Function: sd_scsi_update_lun_on_target 2878 * 2879 * Description: This routine is used to update the attached lun chain when a 2880 * lun is attached or detached on a target. 2881 * 2882 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2883 * controller device. 2884 * target - The target ID on the controller's SCSI bus. 2885 * flag - Indicate the lun is attached or detached. 2886 * 2887 * Context: Kernel thread context 2888 */ 2889 2890 static void 2891 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2892 { 2893 struct sd_scsi_hba_tgt_lun *cp; 2894 2895 mutex_enter(&sd_scsi_target_lun_mutex); 2896 2897 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2898 if (cp->pdip == dip) { 2899 break; 2900 } 2901 } 2902 2903 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2904 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2905 KM_SLEEP); 2906 cp->pdip = dip; 2907 cp->next = sd_scsi_target_lun_head; 2908 sd_scsi_target_lun_head = cp; 2909 } 2910 2911 mutex_exit(&sd_scsi_target_lun_mutex); 2912 2913 if (cp != NULL) { 2914 if (flag == SD_SCSI_LUN_ATTACH) { 2915 cp->nlun[target] ++; 2916 } else { 2917 cp->nlun[target] --; 2918 } 2919 } 2920 } 2921 2922 2923 /* 2924 * Function: sd_spin_up_unit 2925 * 2926 * Description: Issues the following commands to spin-up the device: 2927 * START STOP UNIT, and INQUIRY. 2928 * 2929 * Arguments: un - driver soft state (unit) structure 2930 * 2931 * Return Code: 0 - success 2932 * EIO - failure 2933 * EACCES - reservation conflict 2934 * 2935 * Context: Kernel thread context 2936 */ 2937 2938 static int 2939 sd_spin_up_unit(struct sd_lun *un) 2940 { 2941 size_t resid = 0; 2942 int has_conflict = FALSE; 2943 uchar_t *bufaddr; 2944 2945 ASSERT(un != NULL); 2946 2947 /* 2948 * Send a throwaway START UNIT command. 2949 * 2950 * If we fail on this, we don't care presently what precisely 2951 * is wrong. EMC's arrays will also fail this with a check 2952 * condition (0x2/0x4/0x3) if the device is "inactive," but 2953 * we don't want to fail the attach because it may become 2954 * "active" later. 2955 */ 2956 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2957 == EACCES) 2958 has_conflict = TRUE; 2959 2960 /* 2961 * Send another INQUIRY command to the target. This is necessary for 2962 * non-removable media direct access devices because their INQUIRY data 2963 * may not be fully qualified until they are spun up (perhaps via the 2964 * START command above). Note: This seems to be needed for some 2965 * legacy devices only.) The INQUIRY command should succeed even if a 2966 * Reservation Conflict is present. 2967 */ 2968 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2969 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2970 kmem_free(bufaddr, SUN_INQSIZE); 2971 return (EIO); 2972 } 2973 2974 /* 2975 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2976 * Note that this routine does not return a failure here even if the 2977 * INQUIRY command did not return any data. This is a legacy behavior. 2978 */ 2979 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2980 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2981 } 2982 2983 kmem_free(bufaddr, SUN_INQSIZE); 2984 2985 /* If we hit a reservation conflict above, tell the caller. */ 2986 if (has_conflict == TRUE) { 2987 return (EACCES); 2988 } 2989 2990 return (0); 2991 } 2992 2993 #ifdef _LP64 2994 /* 2995 * Function: sd_enable_descr_sense 2996 * 2997 * Description: This routine attempts to select descriptor sense format 2998 * using the Control mode page. Devices that support 64 bit 2999 * LBAs (for >2TB luns) should also implement descriptor 3000 * sense data so we will call this function whenever we see 3001 * a lun larger than 2TB. If for some reason the device 3002 * supports 64 bit LBAs but doesn't support descriptor sense 3003 * presumably the mode select will fail. Everything will 3004 * continue to work normally except that we will not get 3005 * complete sense data for commands that fail with an LBA 3006 * larger than 32 bits. 3007 * 3008 * Arguments: un - driver soft state (unit) structure 3009 * 3010 * Context: Kernel thread context only 3011 */ 3012 3013 static void 3014 sd_enable_descr_sense(struct sd_lun *un) 3015 { 3016 uchar_t *header; 3017 struct mode_control_scsi3 *ctrl_bufp; 3018 size_t buflen; 3019 size_t bd_len; 3020 3021 /* 3022 * Read MODE SENSE page 0xA, Control Mode Page 3023 */ 3024 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3025 sizeof (struct mode_control_scsi3); 3026 header = kmem_zalloc(buflen, KM_SLEEP); 3027 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3028 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3029 SD_ERROR(SD_LOG_COMMON, un, 3030 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3031 goto eds_exit; 3032 } 3033 3034 /* 3035 * Determine size of Block Descriptors in order to locate 3036 * the mode page data. ATAPI devices return 0, SCSI devices 3037 * should return MODE_BLK_DESC_LENGTH. 3038 */ 3039 bd_len = ((struct mode_header *)header)->bdesc_length; 3040 3041 /* Clear the mode data length field for MODE SELECT */ 3042 ((struct mode_header *)header)->length = 0; 3043 3044 ctrl_bufp = (struct mode_control_scsi3 *) 3045 (header + MODE_HEADER_LENGTH + bd_len); 3046 3047 /* 3048 * If the page length is smaller than the expected value, 3049 * the target device doesn't support D_SENSE. Bail out here. 3050 */ 3051 if (ctrl_bufp->mode_page.length < 3052 sizeof (struct mode_control_scsi3) - 2) { 3053 SD_ERROR(SD_LOG_COMMON, un, 3054 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3055 goto eds_exit; 3056 } 3057 3058 /* 3059 * Clear PS bit for MODE SELECT 3060 */ 3061 ctrl_bufp->mode_page.ps = 0; 3062 3063 /* 3064 * Set D_SENSE to enable descriptor sense format. 3065 */ 3066 ctrl_bufp->d_sense = 1; 3067 3068 /* 3069 * Use MODE SELECT to commit the change to the D_SENSE bit 3070 */ 3071 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3072 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3073 SD_INFO(SD_LOG_COMMON, un, 3074 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3075 goto eds_exit; 3076 } 3077 3078 eds_exit: 3079 kmem_free(header, buflen); 3080 } 3081 3082 /* 3083 * Function: sd_reenable_dsense_task 3084 * 3085 * Description: Re-enable descriptor sense after device or bus reset 3086 * 3087 * Context: Executes in a taskq() thread context 3088 */ 3089 static void 3090 sd_reenable_dsense_task(void *arg) 3091 { 3092 struct sd_lun *un = arg; 3093 3094 ASSERT(un != NULL); 3095 sd_enable_descr_sense(un); 3096 } 3097 #endif /* _LP64 */ 3098 3099 /* 3100 * Function: sd_set_mmc_caps 3101 * 3102 * Description: This routine determines if the device is MMC compliant and if 3103 * the device supports CDDA via a mode sense of the CDVD 3104 * capabilities mode page. Also checks if the device is a 3105 * dvdram writable device. 3106 * 3107 * Arguments: un - driver soft state (unit) structure 3108 * 3109 * Context: Kernel thread context only 3110 */ 3111 3112 static void 3113 sd_set_mmc_caps(struct sd_lun *un) 3114 { 3115 struct mode_header_grp2 *sense_mhp; 3116 uchar_t *sense_page; 3117 caddr_t buf; 3118 int bd_len; 3119 int status; 3120 struct uscsi_cmd com; 3121 int rtn; 3122 uchar_t *out_data_rw, *out_data_hd; 3123 uchar_t *rqbuf_rw, *rqbuf_hd; 3124 3125 ASSERT(un != NULL); 3126 3127 /* 3128 * The flags which will be set in this function are - mmc compliant, 3129 * dvdram writable device, cdda support. Initialize them to FALSE 3130 * and if a capability is detected - it will be set to TRUE. 3131 */ 3132 un->un_f_mmc_cap = FALSE; 3133 un->un_f_dvdram_writable_device = FALSE; 3134 un->un_f_cfg_cdda = FALSE; 3135 3136 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3137 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3138 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3139 3140 if (status != 0) { 3141 /* command failed; just return */ 3142 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3143 return; 3144 } 3145 /* 3146 * If the mode sense request for the CDROM CAPABILITIES 3147 * page (0x2A) succeeds the device is assumed to be MMC. 3148 */ 3149 un->un_f_mmc_cap = TRUE; 3150 3151 /* Get to the page data */ 3152 sense_mhp = (struct mode_header_grp2 *)buf; 3153 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3154 sense_mhp->bdesc_length_lo; 3155 if (bd_len > MODE_BLK_DESC_LENGTH) { 3156 /* 3157 * We did not get back the expected block descriptor 3158 * length so we cannot determine if the device supports 3159 * CDDA. However, we still indicate the device is MMC 3160 * according to the successful response to the page 3161 * 0x2A mode sense request. 3162 */ 3163 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3164 "sd_set_mmc_caps: Mode Sense returned " 3165 "invalid block descriptor length\n"); 3166 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3167 return; 3168 } 3169 3170 /* See if read CDDA is supported */ 3171 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3172 bd_len); 3173 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3174 3175 /* See if writing DVD RAM is supported. */ 3176 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3177 if (un->un_f_dvdram_writable_device == TRUE) { 3178 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3179 return; 3180 } 3181 3182 /* 3183 * If the device presents DVD or CD capabilities in the mode 3184 * page, we can return here since a RRD will not have 3185 * these capabilities. 3186 */ 3187 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3188 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3189 return; 3190 } 3191 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3192 3193 /* 3194 * If un->un_f_dvdram_writable_device is still FALSE, 3195 * check for a Removable Rigid Disk (RRD). A RRD 3196 * device is identified by the features RANDOM_WRITABLE and 3197 * HARDWARE_DEFECT_MANAGEMENT. 3198 */ 3199 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3200 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3201 3202 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3203 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3204 RANDOM_WRITABLE, SD_PATH_STANDARD); 3205 if (rtn != 0) { 3206 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3207 kmem_free(rqbuf_rw, SENSE_LENGTH); 3208 return; 3209 } 3210 3211 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3212 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3213 3214 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3215 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3216 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3217 if (rtn == 0) { 3218 /* 3219 * We have good information, check for random writable 3220 * and hardware defect features. 3221 */ 3222 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3223 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3224 un->un_f_dvdram_writable_device = TRUE; 3225 } 3226 } 3227 3228 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3229 kmem_free(rqbuf_rw, SENSE_LENGTH); 3230 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3231 kmem_free(rqbuf_hd, SENSE_LENGTH); 3232 } 3233 3234 /* 3235 * Function: sd_check_for_writable_cd 3236 * 3237 * Description: This routine determines if the media in the device is 3238 * writable or not. It uses the get configuration command (0x46) 3239 * to determine if the media is writable 3240 * 3241 * Arguments: un - driver soft state (unit) structure 3242 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3243 * chain and the normal command waitq, or 3244 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3245 * "direct" chain and bypass the normal command 3246 * waitq. 3247 * 3248 * Context: Never called at interrupt context. 3249 */ 3250 3251 static void 3252 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3253 { 3254 struct uscsi_cmd com; 3255 uchar_t *out_data; 3256 uchar_t *rqbuf; 3257 int rtn; 3258 uchar_t *out_data_rw, *out_data_hd; 3259 uchar_t *rqbuf_rw, *rqbuf_hd; 3260 struct mode_header_grp2 *sense_mhp; 3261 uchar_t *sense_page; 3262 caddr_t buf; 3263 int bd_len; 3264 int status; 3265 3266 ASSERT(un != NULL); 3267 ASSERT(mutex_owned(SD_MUTEX(un))); 3268 3269 /* 3270 * Initialize the writable media to false, if configuration info. 3271 * tells us otherwise then only we will set it. 3272 */ 3273 un->un_f_mmc_writable_media = FALSE; 3274 mutex_exit(SD_MUTEX(un)); 3275 3276 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3277 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3278 3279 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3280 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3281 3282 mutex_enter(SD_MUTEX(un)); 3283 if (rtn == 0) { 3284 /* 3285 * We have good information, check for writable DVD. 3286 */ 3287 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3288 un->un_f_mmc_writable_media = TRUE; 3289 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3290 kmem_free(rqbuf, SENSE_LENGTH); 3291 return; 3292 } 3293 } 3294 3295 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3296 kmem_free(rqbuf, SENSE_LENGTH); 3297 3298 /* 3299 * Determine if this is a RRD type device. 3300 */ 3301 mutex_exit(SD_MUTEX(un)); 3302 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3303 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3304 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3305 mutex_enter(SD_MUTEX(un)); 3306 if (status != 0) { 3307 /* command failed; just return */ 3308 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3309 return; 3310 } 3311 3312 /* Get to the page data */ 3313 sense_mhp = (struct mode_header_grp2 *)buf; 3314 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3315 if (bd_len > MODE_BLK_DESC_LENGTH) { 3316 /* 3317 * We did not get back the expected block descriptor length so 3318 * we cannot check the mode page. 3319 */ 3320 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3321 "sd_check_for_writable_cd: Mode Sense returned " 3322 "invalid block descriptor length\n"); 3323 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3324 return; 3325 } 3326 3327 /* 3328 * If the device presents DVD or CD capabilities in the mode 3329 * page, we can return here since a RRD device will not have 3330 * these capabilities. 3331 */ 3332 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3333 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3334 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3335 return; 3336 } 3337 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3338 3339 /* 3340 * If un->un_f_mmc_writable_media is still FALSE, 3341 * check for RRD type media. A RRD device is identified 3342 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3343 */ 3344 mutex_exit(SD_MUTEX(un)); 3345 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3346 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3347 3348 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3349 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3350 RANDOM_WRITABLE, path_flag); 3351 if (rtn != 0) { 3352 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3353 kmem_free(rqbuf_rw, SENSE_LENGTH); 3354 mutex_enter(SD_MUTEX(un)); 3355 return; 3356 } 3357 3358 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3359 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3360 3361 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3362 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3363 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3364 mutex_enter(SD_MUTEX(un)); 3365 if (rtn == 0) { 3366 /* 3367 * We have good information, check for random writable 3368 * and hardware defect features as current. 3369 */ 3370 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3371 (out_data_rw[10] & 0x1) && 3372 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3373 (out_data_hd[10] & 0x1)) { 3374 un->un_f_mmc_writable_media = TRUE; 3375 } 3376 } 3377 3378 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3379 kmem_free(rqbuf_rw, SENSE_LENGTH); 3380 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3381 kmem_free(rqbuf_hd, SENSE_LENGTH); 3382 } 3383 3384 /* 3385 * Function: sd_read_unit_properties 3386 * 3387 * Description: The following implements a property lookup mechanism. 3388 * Properties for particular disks (keyed on vendor, model 3389 * and rev numbers) are sought in the sd.conf file via 3390 * sd_process_sdconf_file(), and if not found there, are 3391 * looked for in a list hardcoded in this driver via 3392 * sd_process_sdconf_table() Once located the properties 3393 * are used to update the driver unit structure. 3394 * 3395 * Arguments: un - driver soft state (unit) structure 3396 */ 3397 3398 static void 3399 sd_read_unit_properties(struct sd_lun *un) 3400 { 3401 /* 3402 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3403 * the "sd-config-list" property (from the sd.conf file) or if 3404 * there was not a match for the inquiry vid/pid. If this event 3405 * occurs the static driver configuration table is searched for 3406 * a match. 3407 */ 3408 ASSERT(un != NULL); 3409 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3410 sd_process_sdconf_table(un); 3411 } 3412 3413 /* check for LSI device */ 3414 sd_is_lsi(un); 3415 3416 3417 } 3418 3419 3420 /* 3421 * Function: sd_process_sdconf_file 3422 * 3423 * Description: Use ddi_getlongprop to obtain the properties from the 3424 * driver's config file (ie, sd.conf) and update the driver 3425 * soft state structure accordingly. 3426 * 3427 * Arguments: un - driver soft state (unit) structure 3428 * 3429 * Return Code: SD_SUCCESS - The properties were successfully set according 3430 * to the driver configuration file. 3431 * SD_FAILURE - The driver config list was not obtained or 3432 * there was no vid/pid match. This indicates that 3433 * the static config table should be used. 3434 * 3435 * The config file has a property, "sd-config-list", which consists of 3436 * one or more duplets as follows: 3437 * 3438 * sd-config-list= 3439 * <duplet>, 3440 * [<duplet>,] 3441 * [<duplet>]; 3442 * 3443 * The structure of each duplet is as follows: 3444 * 3445 * <duplet>:= <vid+pid>,<data-property-name_list> 3446 * 3447 * The first entry of the duplet is the device ID string (the concatenated 3448 * vid & pid; not to be confused with a device_id). This is defined in 3449 * the same way as in the sd_disk_table. 3450 * 3451 * The second part of the duplet is a string that identifies a 3452 * data-property-name-list. The data-property-name-list is defined as 3453 * follows: 3454 * 3455 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3456 * 3457 * The syntax of <data-property-name> depends on the <version> field. 3458 * 3459 * If version = SD_CONF_VERSION_1 we have the following syntax: 3460 * 3461 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3462 * 3463 * where the prop0 value will be used to set prop0 if bit0 set in the 3464 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3465 * 3466 */ 3467 3468 static int 3469 sd_process_sdconf_file(struct sd_lun *un) 3470 { 3471 char *config_list = NULL; 3472 int config_list_len; 3473 int len; 3474 int dupletlen = 0; 3475 char *vidptr; 3476 int vidlen; 3477 char *dnlist_ptr; 3478 char *dataname_ptr; 3479 int dnlist_len; 3480 int dataname_len; 3481 int *data_list; 3482 int data_list_len; 3483 int rval = SD_FAILURE; 3484 int i; 3485 3486 ASSERT(un != NULL); 3487 3488 /* Obtain the configuration list associated with the .conf file */ 3489 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3490 sd_config_list, (caddr_t)&config_list, &config_list_len) 3491 != DDI_PROP_SUCCESS) { 3492 return (SD_FAILURE); 3493 } 3494 3495 /* 3496 * Compare vids in each duplet to the inquiry vid - if a match is 3497 * made, get the data value and update the soft state structure 3498 * accordingly. 3499 * 3500 * Note: This algorithm is complex and difficult to maintain. It should 3501 * be replaced with a more robust implementation. 3502 */ 3503 for (len = config_list_len, vidptr = config_list; len > 0; 3504 vidptr += dupletlen, len -= dupletlen) { 3505 /* 3506 * Note: The assumption here is that each vid entry is on 3507 * a unique line from its associated duplet. 3508 */ 3509 vidlen = dupletlen = (int)strlen(vidptr); 3510 if ((vidlen == 0) || 3511 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3512 dupletlen++; 3513 continue; 3514 } 3515 3516 /* 3517 * dnlist contains 1 or more blank separated 3518 * data-property-name entries 3519 */ 3520 dnlist_ptr = vidptr + vidlen + 1; 3521 dnlist_len = (int)strlen(dnlist_ptr); 3522 dupletlen += dnlist_len + 2; 3523 3524 /* 3525 * Set a pointer for the first data-property-name 3526 * entry in the list 3527 */ 3528 dataname_ptr = dnlist_ptr; 3529 dataname_len = 0; 3530 3531 /* 3532 * Loop through all data-property-name entries in the 3533 * data-property-name-list setting the properties for each. 3534 */ 3535 while (dataname_len < dnlist_len) { 3536 int version; 3537 3538 /* 3539 * Determine the length of the current 3540 * data-property-name entry by indexing until a 3541 * blank or NULL is encountered. When the space is 3542 * encountered reset it to a NULL for compliance 3543 * with ddi_getlongprop(). 3544 */ 3545 for (i = 0; ((dataname_ptr[i] != ' ') && 3546 (dataname_ptr[i] != '\0')); i++) { 3547 ; 3548 } 3549 3550 dataname_len += i; 3551 /* If not null terminated, Make it so */ 3552 if (dataname_ptr[i] == ' ') { 3553 dataname_ptr[i] = '\0'; 3554 } 3555 dataname_len++; 3556 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3557 "sd_process_sdconf_file: disk:%s, data:%s\n", 3558 vidptr, dataname_ptr); 3559 3560 /* Get the data list */ 3561 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3562 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3563 != DDI_PROP_SUCCESS) { 3564 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3565 "sd_process_sdconf_file: data property (%s)" 3566 " has no value\n", dataname_ptr); 3567 dataname_ptr = dnlist_ptr + dataname_len; 3568 continue; 3569 } 3570 3571 version = data_list[0]; 3572 3573 if (version == SD_CONF_VERSION_1) { 3574 sd_tunables values; 3575 3576 /* Set the properties */ 3577 if (sd_chk_vers1_data(un, data_list[1], 3578 &data_list[2], data_list_len, dataname_ptr) 3579 == SD_SUCCESS) { 3580 sd_get_tunables_from_conf(un, 3581 data_list[1], &data_list[2], 3582 &values); 3583 sd_set_vers1_properties(un, 3584 data_list[1], &values); 3585 rval = SD_SUCCESS; 3586 } else { 3587 rval = SD_FAILURE; 3588 } 3589 } else { 3590 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3591 "data property %s version 0x%x is invalid.", 3592 dataname_ptr, version); 3593 rval = SD_FAILURE; 3594 } 3595 kmem_free(data_list, data_list_len); 3596 dataname_ptr = dnlist_ptr + dataname_len; 3597 } 3598 } 3599 3600 /* free up the memory allocated by ddi_getlongprop */ 3601 if (config_list) { 3602 kmem_free(config_list, config_list_len); 3603 } 3604 3605 return (rval); 3606 } 3607 3608 /* 3609 * Function: sd_get_tunables_from_conf() 3610 * 3611 * 3612 * This function reads the data list from the sd.conf file and pulls 3613 * the values that can have numeric values as arguments and places 3614 * the values in the appropriate sd_tunables member. 3615 * Since the order of the data list members varies across platforms 3616 * This function reads them from the data list in a platform specific 3617 * order and places them into the correct sd_tunable member that is 3618 * consistent across all platforms. 3619 */ 3620 static void 3621 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3622 sd_tunables *values) 3623 { 3624 int i; 3625 int mask; 3626 3627 bzero(values, sizeof (sd_tunables)); 3628 3629 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3630 3631 mask = 1 << i; 3632 if (mask > flags) { 3633 break; 3634 } 3635 3636 switch (mask & flags) { 3637 case 0: /* This mask bit not set in flags */ 3638 continue; 3639 case SD_CONF_BSET_THROTTLE: 3640 values->sdt_throttle = data_list[i]; 3641 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3642 "sd_get_tunables_from_conf: throttle = %d\n", 3643 values->sdt_throttle); 3644 break; 3645 case SD_CONF_BSET_CTYPE: 3646 values->sdt_ctype = data_list[i]; 3647 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3648 "sd_get_tunables_from_conf: ctype = %d\n", 3649 values->sdt_ctype); 3650 break; 3651 case SD_CONF_BSET_NRR_COUNT: 3652 values->sdt_not_rdy_retries = data_list[i]; 3653 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3654 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3655 values->sdt_not_rdy_retries); 3656 break; 3657 case SD_CONF_BSET_BSY_RETRY_COUNT: 3658 values->sdt_busy_retries = data_list[i]; 3659 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3660 "sd_get_tunables_from_conf: busy_retries = %d\n", 3661 values->sdt_busy_retries); 3662 break; 3663 case SD_CONF_BSET_RST_RETRIES: 3664 values->sdt_reset_retries = data_list[i]; 3665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3666 "sd_get_tunables_from_conf: reset_retries = %d\n", 3667 values->sdt_reset_retries); 3668 break; 3669 case SD_CONF_BSET_RSV_REL_TIME: 3670 values->sdt_reserv_rel_time = data_list[i]; 3671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3672 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3673 values->sdt_reserv_rel_time); 3674 break; 3675 case SD_CONF_BSET_MIN_THROTTLE: 3676 values->sdt_min_throttle = data_list[i]; 3677 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3678 "sd_get_tunables_from_conf: min_throttle = %d\n", 3679 values->sdt_min_throttle); 3680 break; 3681 case SD_CONF_BSET_DISKSORT_DISABLED: 3682 values->sdt_disk_sort_dis = data_list[i]; 3683 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3684 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3685 values->sdt_disk_sort_dis); 3686 break; 3687 case SD_CONF_BSET_LUN_RESET_ENABLED: 3688 values->sdt_lun_reset_enable = data_list[i]; 3689 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3690 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3691 "\n", values->sdt_lun_reset_enable); 3692 break; 3693 } 3694 } 3695 } 3696 3697 /* 3698 * Function: sd_process_sdconf_table 3699 * 3700 * Description: Search the static configuration table for a match on the 3701 * inquiry vid/pid and update the driver soft state structure 3702 * according to the table property values for the device. 3703 * 3704 * The form of a configuration table entry is: 3705 * <vid+pid>,<flags>,<property-data> 3706 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3707 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3708 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3709 * 3710 * Arguments: un - driver soft state (unit) structure 3711 */ 3712 3713 static void 3714 sd_process_sdconf_table(struct sd_lun *un) 3715 { 3716 char *id = NULL; 3717 int table_index; 3718 int idlen; 3719 3720 ASSERT(un != NULL); 3721 for (table_index = 0; table_index < sd_disk_table_size; 3722 table_index++) { 3723 id = sd_disk_table[table_index].device_id; 3724 idlen = strlen(id); 3725 if (idlen == 0) { 3726 continue; 3727 } 3728 3729 /* 3730 * The static configuration table currently does not 3731 * implement version 10 properties. Additionally, 3732 * multiple data-property-name entries are not 3733 * implemented in the static configuration table. 3734 */ 3735 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3736 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3737 "sd_process_sdconf_table: disk %s\n", id); 3738 sd_set_vers1_properties(un, 3739 sd_disk_table[table_index].flags, 3740 sd_disk_table[table_index].properties); 3741 break; 3742 } 3743 } 3744 } 3745 3746 3747 /* 3748 * Function: sd_sdconf_id_match 3749 * 3750 * Description: This local function implements a case sensitive vid/pid 3751 * comparison as well as the boundary cases of wild card and 3752 * multiple blanks. 3753 * 3754 * Note: An implicit assumption made here is that the scsi 3755 * inquiry structure will always keep the vid, pid and 3756 * revision strings in consecutive sequence, so they can be 3757 * read as a single string. If this assumption is not the 3758 * case, a separate string, to be used for the check, needs 3759 * to be built with these strings concatenated. 3760 * 3761 * Arguments: un - driver soft state (unit) structure 3762 * id - table or config file vid/pid 3763 * idlen - length of the vid/pid (bytes) 3764 * 3765 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3766 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3767 */ 3768 3769 static int 3770 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3771 { 3772 struct scsi_inquiry *sd_inq; 3773 int rval = SD_SUCCESS; 3774 3775 ASSERT(un != NULL); 3776 sd_inq = un->un_sd->sd_inq; 3777 ASSERT(id != NULL); 3778 3779 /* 3780 * We use the inq_vid as a pointer to a buffer containing the 3781 * vid and pid and use the entire vid/pid length of the table 3782 * entry for the comparison. This works because the inq_pid 3783 * data member follows inq_vid in the scsi_inquiry structure. 3784 */ 3785 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3786 /* 3787 * The user id string is compared to the inquiry vid/pid 3788 * using a case insensitive comparison and ignoring 3789 * multiple spaces. 3790 */ 3791 rval = sd_blank_cmp(un, id, idlen); 3792 if (rval != SD_SUCCESS) { 3793 /* 3794 * User id strings that start and end with a "*" 3795 * are a special case. These do not have a 3796 * specific vendor, and the product string can 3797 * appear anywhere in the 16 byte PID portion of 3798 * the inquiry data. This is a simple strstr() 3799 * type search for the user id in the inquiry data. 3800 */ 3801 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3802 char *pidptr = &id[1]; 3803 int i; 3804 int j; 3805 int pidstrlen = idlen - 2; 3806 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3807 pidstrlen; 3808 3809 if (j < 0) { 3810 return (SD_FAILURE); 3811 } 3812 for (i = 0; i < j; i++) { 3813 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3814 pidptr, pidstrlen) == 0) { 3815 rval = SD_SUCCESS; 3816 break; 3817 } 3818 } 3819 } 3820 } 3821 } 3822 return (rval); 3823 } 3824 3825 3826 /* 3827 * Function: sd_blank_cmp 3828 * 3829 * Description: If the id string starts and ends with a space, treat 3830 * multiple consecutive spaces as equivalent to a single 3831 * space. For example, this causes a sd_disk_table entry 3832 * of " NEC CDROM " to match a device's id string of 3833 * "NEC CDROM". 3834 * 3835 * Note: The success exit condition for this routine is if 3836 * the pointer to the table entry is '\0' and the cnt of 3837 * the inquiry length is zero. This will happen if the inquiry 3838 * string returned by the device is padded with spaces to be 3839 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3840 * SCSI spec states that the inquiry string is to be padded with 3841 * spaces. 3842 * 3843 * Arguments: un - driver soft state (unit) structure 3844 * id - table or config file vid/pid 3845 * idlen - length of the vid/pid (bytes) 3846 * 3847 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3848 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3849 */ 3850 3851 static int 3852 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3853 { 3854 char *p1; 3855 char *p2; 3856 int cnt; 3857 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3858 sizeof (SD_INQUIRY(un)->inq_pid); 3859 3860 ASSERT(un != NULL); 3861 p2 = un->un_sd->sd_inq->inq_vid; 3862 ASSERT(id != NULL); 3863 p1 = id; 3864 3865 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3866 /* 3867 * Note: string p1 is terminated by a NUL but string p2 3868 * isn't. The end of p2 is determined by cnt. 3869 */ 3870 for (;;) { 3871 /* skip over any extra blanks in both strings */ 3872 while ((*p1 != '\0') && (*p1 == ' ')) { 3873 p1++; 3874 } 3875 while ((cnt != 0) && (*p2 == ' ')) { 3876 p2++; 3877 cnt--; 3878 } 3879 3880 /* compare the two strings */ 3881 if ((cnt == 0) || 3882 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3883 break; 3884 } 3885 while ((cnt > 0) && 3886 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3887 p1++; 3888 p2++; 3889 cnt--; 3890 } 3891 } 3892 } 3893 3894 /* return SD_SUCCESS if both strings match */ 3895 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3896 } 3897 3898 3899 /* 3900 * Function: sd_chk_vers1_data 3901 * 3902 * Description: Verify the version 1 device properties provided by the 3903 * user via the configuration file 3904 * 3905 * Arguments: un - driver soft state (unit) structure 3906 * flags - integer mask indicating properties to be set 3907 * prop_list - integer list of property values 3908 * list_len - length of user provided data 3909 * 3910 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3911 * SD_FAILURE - Indicates the user provided data is invalid 3912 */ 3913 3914 static int 3915 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3916 int list_len, char *dataname_ptr) 3917 { 3918 int i; 3919 int mask = 1; 3920 int index = 0; 3921 3922 ASSERT(un != NULL); 3923 3924 /* Check for a NULL property name and list */ 3925 if (dataname_ptr == NULL) { 3926 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3927 "sd_chk_vers1_data: NULL data property name."); 3928 return (SD_FAILURE); 3929 } 3930 if (prop_list == NULL) { 3931 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3932 "sd_chk_vers1_data: %s NULL data property list.", 3933 dataname_ptr); 3934 return (SD_FAILURE); 3935 } 3936 3937 /* Display a warning if undefined bits are set in the flags */ 3938 if (flags & ~SD_CONF_BIT_MASK) { 3939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3940 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3941 "Properties not set.", 3942 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3943 return (SD_FAILURE); 3944 } 3945 3946 /* 3947 * Verify the length of the list by identifying the highest bit set 3948 * in the flags and validating that the property list has a length 3949 * up to the index of this bit. 3950 */ 3951 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3952 if (flags & mask) { 3953 index++; 3954 } 3955 mask = 1 << i; 3956 } 3957 if ((list_len / sizeof (int)) < (index + 2)) { 3958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3959 "sd_chk_vers1_data: " 3960 "Data property list %s size is incorrect. " 3961 "Properties not set.", dataname_ptr); 3962 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3963 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3964 return (SD_FAILURE); 3965 } 3966 return (SD_SUCCESS); 3967 } 3968 3969 3970 /* 3971 * Function: sd_set_vers1_properties 3972 * 3973 * Description: Set version 1 device properties based on a property list 3974 * retrieved from the driver configuration file or static 3975 * configuration table. Version 1 properties have the format: 3976 * 3977 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3978 * 3979 * where the prop0 value will be used to set prop0 if bit0 3980 * is set in the flags 3981 * 3982 * Arguments: un - driver soft state (unit) structure 3983 * flags - integer mask indicating properties to be set 3984 * prop_list - integer list of property values 3985 */ 3986 3987 static void 3988 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3989 { 3990 ASSERT(un != NULL); 3991 3992 /* 3993 * Set the flag to indicate cache is to be disabled. An attempt 3994 * to disable the cache via sd_cache_control() will be made 3995 * later during attach once the basic initialization is complete. 3996 */ 3997 if (flags & SD_CONF_BSET_NOCACHE) { 3998 un->un_f_opt_disable_cache = TRUE; 3999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4000 "sd_set_vers1_properties: caching disabled flag set\n"); 4001 } 4002 4003 /* CD-specific configuration parameters */ 4004 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4005 un->un_f_cfg_playmsf_bcd = TRUE; 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4007 "sd_set_vers1_properties: playmsf_bcd set\n"); 4008 } 4009 if (flags & SD_CONF_BSET_READSUB_BCD) { 4010 un->un_f_cfg_readsub_bcd = TRUE; 4011 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4012 "sd_set_vers1_properties: readsub_bcd set\n"); 4013 } 4014 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4015 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4017 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4018 } 4019 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4020 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4021 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4022 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4023 } 4024 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4025 un->un_f_cfg_no_read_header = TRUE; 4026 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4027 "sd_set_vers1_properties: no_read_header set\n"); 4028 } 4029 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4030 un->un_f_cfg_read_cd_xd4 = TRUE; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4033 } 4034 4035 /* Support for devices which do not have valid/unique serial numbers */ 4036 if (flags & SD_CONF_BSET_FAB_DEVID) { 4037 un->un_f_opt_fab_devid = TRUE; 4038 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4039 "sd_set_vers1_properties: fab_devid bit set\n"); 4040 } 4041 4042 /* Support for user throttle configuration */ 4043 if (flags & SD_CONF_BSET_THROTTLE) { 4044 ASSERT(prop_list != NULL); 4045 un->un_saved_throttle = un->un_throttle = 4046 prop_list->sdt_throttle; 4047 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4048 "sd_set_vers1_properties: throttle set to %d\n", 4049 prop_list->sdt_throttle); 4050 } 4051 4052 /* Set the per disk retry count according to the conf file or table. */ 4053 if (flags & SD_CONF_BSET_NRR_COUNT) { 4054 ASSERT(prop_list != NULL); 4055 if (prop_list->sdt_not_rdy_retries) { 4056 un->un_notready_retry_count = 4057 prop_list->sdt_not_rdy_retries; 4058 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4059 "sd_set_vers1_properties: not ready retry count" 4060 " set to %d\n", un->un_notready_retry_count); 4061 } 4062 } 4063 4064 /* The controller type is reported for generic disk driver ioctls */ 4065 if (flags & SD_CONF_BSET_CTYPE) { 4066 ASSERT(prop_list != NULL); 4067 switch (prop_list->sdt_ctype) { 4068 case CTYPE_CDROM: 4069 un->un_ctype = prop_list->sdt_ctype; 4070 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4071 "sd_set_vers1_properties: ctype set to " 4072 "CTYPE_CDROM\n"); 4073 break; 4074 case CTYPE_CCS: 4075 un->un_ctype = prop_list->sdt_ctype; 4076 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4077 "sd_set_vers1_properties: ctype set to " 4078 "CTYPE_CCS\n"); 4079 break; 4080 case CTYPE_ROD: /* RW optical */ 4081 un->un_ctype = prop_list->sdt_ctype; 4082 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4083 "sd_set_vers1_properties: ctype set to " 4084 "CTYPE_ROD\n"); 4085 break; 4086 default: 4087 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4088 "sd_set_vers1_properties: Could not set " 4089 "invalid ctype value (%d)", 4090 prop_list->sdt_ctype); 4091 } 4092 } 4093 4094 /* Purple failover timeout */ 4095 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4096 ASSERT(prop_list != NULL); 4097 un->un_busy_retry_count = 4098 prop_list->sdt_busy_retries; 4099 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4100 "sd_set_vers1_properties: " 4101 "busy retry count set to %d\n", 4102 un->un_busy_retry_count); 4103 } 4104 4105 /* Purple reset retry count */ 4106 if (flags & SD_CONF_BSET_RST_RETRIES) { 4107 ASSERT(prop_list != NULL); 4108 un->un_reset_retry_count = 4109 prop_list->sdt_reset_retries; 4110 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4111 "sd_set_vers1_properties: " 4112 "reset retry count set to %d\n", 4113 un->un_reset_retry_count); 4114 } 4115 4116 /* Purple reservation release timeout */ 4117 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4118 ASSERT(prop_list != NULL); 4119 un->un_reserve_release_time = 4120 prop_list->sdt_reserv_rel_time; 4121 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4122 "sd_set_vers1_properties: " 4123 "reservation release timeout set to %d\n", 4124 un->un_reserve_release_time); 4125 } 4126 4127 /* 4128 * Driver flag telling the driver to verify that no commands are pending 4129 * for a device before issuing a Test Unit Ready. This is a workaround 4130 * for a firmware bug in some Seagate eliteI drives. 4131 */ 4132 if (flags & SD_CONF_BSET_TUR_CHECK) { 4133 un->un_f_cfg_tur_check = TRUE; 4134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4135 "sd_set_vers1_properties: tur queue check set\n"); 4136 } 4137 4138 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4139 un->un_min_throttle = prop_list->sdt_min_throttle; 4140 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4141 "sd_set_vers1_properties: min throttle set to %d\n", 4142 un->un_min_throttle); 4143 } 4144 4145 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4146 un->un_f_disksort_disabled = 4147 (prop_list->sdt_disk_sort_dis != 0) ? 4148 TRUE : FALSE; 4149 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4150 "sd_set_vers1_properties: disksort disabled " 4151 "flag set to %d\n", 4152 prop_list->sdt_disk_sort_dis); 4153 } 4154 4155 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4156 un->un_f_lun_reset_enabled = 4157 (prop_list->sdt_lun_reset_enable != 0) ? 4158 TRUE : FALSE; 4159 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4160 "sd_set_vers1_properties: lun reset enabled " 4161 "flag set to %d\n", 4162 prop_list->sdt_lun_reset_enable); 4163 } 4164 4165 /* 4166 * Validate the throttle values. 4167 * If any of the numbers are invalid, set everything to defaults. 4168 */ 4169 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4170 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4171 (un->un_min_throttle > un->un_throttle)) { 4172 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4173 un->un_min_throttle = sd_min_throttle; 4174 } 4175 } 4176 4177 /* 4178 * Function: sd_is_lsi() 4179 * 4180 * Description: Check for lsi devices, step through the static device 4181 * table to match vid/pid. 4182 * 4183 * Args: un - ptr to sd_lun 4184 * 4185 * Notes: When creating new LSI property, need to add the new LSI property 4186 * to this function. 4187 */ 4188 static void 4189 sd_is_lsi(struct sd_lun *un) 4190 { 4191 char *id = NULL; 4192 int table_index; 4193 int idlen; 4194 void *prop; 4195 4196 ASSERT(un != NULL); 4197 for (table_index = 0; table_index < sd_disk_table_size; 4198 table_index++) { 4199 id = sd_disk_table[table_index].device_id; 4200 idlen = strlen(id); 4201 if (idlen == 0) { 4202 continue; 4203 } 4204 4205 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4206 prop = sd_disk_table[table_index].properties; 4207 if (prop == &lsi_properties || 4208 prop == &lsi_oem_properties || 4209 prop == &lsi_properties_scsi || 4210 prop == &symbios_properties) { 4211 un->un_f_cfg_is_lsi = TRUE; 4212 } 4213 break; 4214 } 4215 } 4216 } 4217 4218 /* 4219 * Function: sd_get_physical_geometry 4220 * 4221 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4222 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4223 * target, and use this information to initialize the physical 4224 * geometry cache specified by pgeom_p. 4225 * 4226 * MODE SENSE is an optional command, so failure in this case 4227 * does not necessarily denote an error. We want to use the 4228 * MODE SENSE commands to derive the physical geometry of the 4229 * device, but if either command fails, the logical geometry is 4230 * used as the fallback for disk label geometry in cmlb. 4231 * 4232 * This requires that un->un_blockcount and un->un_tgt_blocksize 4233 * have already been initialized for the current target and 4234 * that the current values be passed as args so that we don't 4235 * end up ever trying to use -1 as a valid value. This could 4236 * happen if either value is reset while we're not holding 4237 * the mutex. 4238 * 4239 * Arguments: un - driver soft state (unit) structure 4240 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4241 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4242 * to use the USCSI "direct" chain and bypass the normal 4243 * command waitq. 4244 * 4245 * Context: Kernel thread only (can sleep). 4246 */ 4247 4248 static int 4249 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4250 diskaddr_t capacity, int lbasize, int path_flag) 4251 { 4252 struct mode_format *page3p; 4253 struct mode_geometry *page4p; 4254 struct mode_header *headerp; 4255 int sector_size; 4256 int nsect; 4257 int nhead; 4258 int ncyl; 4259 int intrlv; 4260 int spc; 4261 diskaddr_t modesense_capacity; 4262 int rpm; 4263 int bd_len; 4264 int mode_header_length; 4265 uchar_t *p3bufp; 4266 uchar_t *p4bufp; 4267 int cdbsize; 4268 int ret = EIO; 4269 4270 ASSERT(un != NULL); 4271 4272 if (lbasize == 0) { 4273 if (ISCD(un)) { 4274 lbasize = 2048; 4275 } else { 4276 lbasize = un->un_sys_blocksize; 4277 } 4278 } 4279 pgeom_p->g_secsize = (unsigned short)lbasize; 4280 4281 /* 4282 * If the unit is a cd/dvd drive MODE SENSE page three 4283 * and MODE SENSE page four are reserved (see SBC spec 4284 * and MMC spec). To prevent soft errors just return 4285 * using the default LBA size. 4286 */ 4287 if (ISCD(un)) 4288 return (ret); 4289 4290 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4291 4292 /* 4293 * Retrieve MODE SENSE page 3 - Format Device Page 4294 */ 4295 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4296 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4297 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4298 != 0) { 4299 SD_ERROR(SD_LOG_COMMON, un, 4300 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4301 goto page3_exit; 4302 } 4303 4304 /* 4305 * Determine size of Block Descriptors in order to locate the mode 4306 * page data. ATAPI devices return 0, SCSI devices should return 4307 * MODE_BLK_DESC_LENGTH. 4308 */ 4309 headerp = (struct mode_header *)p3bufp; 4310 if (un->un_f_cfg_is_atapi == TRUE) { 4311 struct mode_header_grp2 *mhp = 4312 (struct mode_header_grp2 *)headerp; 4313 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4314 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4315 } else { 4316 mode_header_length = MODE_HEADER_LENGTH; 4317 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4318 } 4319 4320 if (bd_len > MODE_BLK_DESC_LENGTH) { 4321 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4322 "received unexpected bd_len of %d, page3\n", bd_len); 4323 goto page3_exit; 4324 } 4325 4326 page3p = (struct mode_format *) 4327 ((caddr_t)headerp + mode_header_length + bd_len); 4328 4329 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4330 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4331 "mode sense pg3 code mismatch %d\n", 4332 page3p->mode_page.code); 4333 goto page3_exit; 4334 } 4335 4336 /* 4337 * Use this physical geometry data only if BOTH MODE SENSE commands 4338 * complete successfully; otherwise, revert to the logical geometry. 4339 * So, we need to save everything in temporary variables. 4340 */ 4341 sector_size = BE_16(page3p->data_bytes_sect); 4342 4343 /* 4344 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4345 */ 4346 if (sector_size == 0) { 4347 sector_size = un->un_sys_blocksize; 4348 } else { 4349 sector_size &= ~(un->un_sys_blocksize - 1); 4350 } 4351 4352 nsect = BE_16(page3p->sect_track); 4353 intrlv = BE_16(page3p->interleave); 4354 4355 SD_INFO(SD_LOG_COMMON, un, 4356 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4357 SD_INFO(SD_LOG_COMMON, un, 4358 " mode page: %d; nsect: %d; sector size: %d;\n", 4359 page3p->mode_page.code, nsect, sector_size); 4360 SD_INFO(SD_LOG_COMMON, un, 4361 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4362 BE_16(page3p->track_skew), 4363 BE_16(page3p->cylinder_skew)); 4364 4365 4366 /* 4367 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4368 */ 4369 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4370 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4371 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4372 != 0) { 4373 SD_ERROR(SD_LOG_COMMON, un, 4374 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4375 goto page4_exit; 4376 } 4377 4378 /* 4379 * Determine size of Block Descriptors in order to locate the mode 4380 * page data. ATAPI devices return 0, SCSI devices should return 4381 * MODE_BLK_DESC_LENGTH. 4382 */ 4383 headerp = (struct mode_header *)p4bufp; 4384 if (un->un_f_cfg_is_atapi == TRUE) { 4385 struct mode_header_grp2 *mhp = 4386 (struct mode_header_grp2 *)headerp; 4387 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4388 } else { 4389 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4390 } 4391 4392 if (bd_len > MODE_BLK_DESC_LENGTH) { 4393 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4394 "received unexpected bd_len of %d, page4\n", bd_len); 4395 goto page4_exit; 4396 } 4397 4398 page4p = (struct mode_geometry *) 4399 ((caddr_t)headerp + mode_header_length + bd_len); 4400 4401 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4402 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4403 "mode sense pg4 code mismatch %d\n", 4404 page4p->mode_page.code); 4405 goto page4_exit; 4406 } 4407 4408 /* 4409 * Stash the data now, after we know that both commands completed. 4410 */ 4411 4412 4413 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4414 spc = nhead * nsect; 4415 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4416 rpm = BE_16(page4p->rpm); 4417 4418 modesense_capacity = spc * ncyl; 4419 4420 SD_INFO(SD_LOG_COMMON, un, 4421 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4422 SD_INFO(SD_LOG_COMMON, un, 4423 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4424 SD_INFO(SD_LOG_COMMON, un, 4425 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4426 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4427 (void *)pgeom_p, capacity); 4428 4429 /* 4430 * Compensate if the drive's geometry is not rectangular, i.e., 4431 * the product of C * H * S returned by MODE SENSE >= that returned 4432 * by read capacity. This is an idiosyncrasy of the original x86 4433 * disk subsystem. 4434 */ 4435 if (modesense_capacity >= capacity) { 4436 SD_INFO(SD_LOG_COMMON, un, 4437 "sd_get_physical_geometry: adjusting acyl; " 4438 "old: %d; new: %d\n", pgeom_p->g_acyl, 4439 (modesense_capacity - capacity + spc - 1) / spc); 4440 if (sector_size != 0) { 4441 /* 1243403: NEC D38x7 drives don't support sec size */ 4442 pgeom_p->g_secsize = (unsigned short)sector_size; 4443 } 4444 pgeom_p->g_nsect = (unsigned short)nsect; 4445 pgeom_p->g_nhead = (unsigned short)nhead; 4446 pgeom_p->g_capacity = capacity; 4447 pgeom_p->g_acyl = 4448 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4449 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4450 } 4451 4452 pgeom_p->g_rpm = (unsigned short)rpm; 4453 pgeom_p->g_intrlv = (unsigned short)intrlv; 4454 ret = 0; 4455 4456 SD_INFO(SD_LOG_COMMON, un, 4457 "sd_get_physical_geometry: mode sense geometry:\n"); 4458 SD_INFO(SD_LOG_COMMON, un, 4459 " nsect: %d; sector size: %d; interlv: %d\n", 4460 nsect, sector_size, intrlv); 4461 SD_INFO(SD_LOG_COMMON, un, 4462 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4463 nhead, ncyl, rpm, modesense_capacity); 4464 SD_INFO(SD_LOG_COMMON, un, 4465 "sd_get_physical_geometry: (cached)\n"); 4466 SD_INFO(SD_LOG_COMMON, un, 4467 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4468 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4469 pgeom_p->g_nhead, pgeom_p->g_nsect); 4470 SD_INFO(SD_LOG_COMMON, un, 4471 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4472 pgeom_p->g_secsize, pgeom_p->g_capacity, 4473 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4474 4475 page4_exit: 4476 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4477 page3_exit: 4478 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4479 4480 return (ret); 4481 } 4482 4483 /* 4484 * Function: sd_get_virtual_geometry 4485 * 4486 * Description: Ask the controller to tell us about the target device. 4487 * 4488 * Arguments: un - pointer to softstate 4489 * capacity - disk capacity in #blocks 4490 * lbasize - disk block size in bytes 4491 * 4492 * Context: Kernel thread only 4493 */ 4494 4495 static int 4496 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4497 diskaddr_t capacity, int lbasize) 4498 { 4499 uint_t geombuf; 4500 int spc; 4501 4502 ASSERT(un != NULL); 4503 4504 /* Set sector size, and total number of sectors */ 4505 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4506 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4507 4508 /* Let the HBA tell us its geometry */ 4509 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4510 4511 /* A value of -1 indicates an undefined "geometry" property */ 4512 if (geombuf == (-1)) { 4513 return (EINVAL); 4514 } 4515 4516 /* Initialize the logical geometry cache. */ 4517 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4518 lgeom_p->g_nsect = geombuf & 0xffff; 4519 lgeom_p->g_secsize = un->un_sys_blocksize; 4520 4521 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4522 4523 /* 4524 * Note: The driver originally converted the capacity value from 4525 * target blocks to system blocks. However, the capacity value passed 4526 * to this routine is already in terms of system blocks (this scaling 4527 * is done when the READ CAPACITY command is issued and processed). 4528 * This 'error' may have gone undetected because the usage of g_ncyl 4529 * (which is based upon g_capacity) is very limited within the driver 4530 */ 4531 lgeom_p->g_capacity = capacity; 4532 4533 /* 4534 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4535 * hba may return zero values if the device has been removed. 4536 */ 4537 if (spc == 0) { 4538 lgeom_p->g_ncyl = 0; 4539 } else { 4540 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4541 } 4542 lgeom_p->g_acyl = 0; 4543 4544 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4545 return (0); 4546 4547 } 4548 /* 4549 * Function: sd_update_block_info 4550 * 4551 * Description: Calculate a byte count to sector count bitshift value 4552 * from sector size. 4553 * 4554 * Arguments: un: unit struct. 4555 * lbasize: new target sector size 4556 * capacity: new target capacity, ie. block count 4557 * 4558 * Context: Kernel thread context 4559 */ 4560 4561 static void 4562 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4563 { 4564 if (lbasize != 0) { 4565 un->un_tgt_blocksize = lbasize; 4566 un->un_f_tgt_blocksize_is_valid = TRUE; 4567 } 4568 4569 if (capacity != 0) { 4570 un->un_blockcount = capacity; 4571 un->un_f_blockcount_is_valid = TRUE; 4572 } 4573 } 4574 4575 4576 /* 4577 * Function: sd_register_devid 4578 * 4579 * Description: This routine will obtain the device id information from the 4580 * target, obtain the serial number, and register the device 4581 * id with the ddi framework. 4582 * 4583 * Arguments: devi - the system's dev_info_t for the device. 4584 * un - driver soft state (unit) structure 4585 * reservation_flag - indicates if a reservation conflict 4586 * occurred during attach 4587 * 4588 * Context: Kernel Thread 4589 */ 4590 static void 4591 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4592 { 4593 int rval = 0; 4594 uchar_t *inq80 = NULL; 4595 size_t inq80_len = MAX_INQUIRY_SIZE; 4596 size_t inq80_resid = 0; 4597 uchar_t *inq83 = NULL; 4598 size_t inq83_len = MAX_INQUIRY_SIZE; 4599 size_t inq83_resid = 0; 4600 4601 ASSERT(un != NULL); 4602 ASSERT(mutex_owned(SD_MUTEX(un))); 4603 ASSERT((SD_DEVINFO(un)) == devi); 4604 4605 /* 4606 * This is the case of antiquated Sun disk drives that have the 4607 * FAB_DEVID property set in the disk_table. These drives 4608 * manage the devid's by storing them in last 2 available sectors 4609 * on the drive and have them fabricated by the ddi layer by calling 4610 * ddi_devid_init and passing the DEVID_FAB flag. 4611 */ 4612 if (un->un_f_opt_fab_devid == TRUE) { 4613 /* 4614 * Depending on EINVAL isn't reliable, since a reserved disk 4615 * may result in invalid geometry, so check to make sure a 4616 * reservation conflict did not occur during attach. 4617 */ 4618 if ((sd_get_devid(un) == EINVAL) && 4619 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4620 /* 4621 * The devid is invalid AND there is no reservation 4622 * conflict. Fabricate a new devid. 4623 */ 4624 (void) sd_create_devid(un); 4625 } 4626 4627 /* Register the devid if it exists */ 4628 if (un->un_devid != NULL) { 4629 (void) ddi_devid_register(SD_DEVINFO(un), 4630 un->un_devid); 4631 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4632 "sd_register_devid: Devid Fabricated\n"); 4633 } 4634 return; 4635 } 4636 4637 /* 4638 * We check the availibility of the World Wide Name (0x83) and Unit 4639 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4640 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4641 * 0x83 is availible, that is the best choice. Our next choice is 4642 * 0x80. If neither are availible, we munge the devid from the device 4643 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4644 * to fabricate a devid for non-Sun qualified disks. 4645 */ 4646 if (sd_check_vpd_page_support(un) == 0) { 4647 /* collect page 80 data if available */ 4648 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4649 4650 mutex_exit(SD_MUTEX(un)); 4651 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4652 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4653 0x01, 0x80, &inq80_resid); 4654 4655 if (rval != 0) { 4656 kmem_free(inq80, inq80_len); 4657 inq80 = NULL; 4658 inq80_len = 0; 4659 } 4660 mutex_enter(SD_MUTEX(un)); 4661 } 4662 4663 /* collect page 83 data if available */ 4664 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4665 mutex_exit(SD_MUTEX(un)); 4666 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4667 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4668 0x01, 0x83, &inq83_resid); 4669 4670 if (rval != 0) { 4671 kmem_free(inq83, inq83_len); 4672 inq83 = NULL; 4673 inq83_len = 0; 4674 } 4675 mutex_enter(SD_MUTEX(un)); 4676 } 4677 } 4678 4679 /* encode best devid possible based on data available */ 4680 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4681 (char *)ddi_driver_name(SD_DEVINFO(un)), 4682 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4683 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4684 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4685 4686 /* devid successfully encoded, register devid */ 4687 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4688 4689 } else { 4690 /* 4691 * Unable to encode a devid based on data available. 4692 * This is not a Sun qualified disk. Older Sun disk 4693 * drives that have the SD_FAB_DEVID property 4694 * set in the disk_table and non Sun qualified 4695 * disks are treated in the same manner. These 4696 * drives manage the devid's by storing them in 4697 * last 2 available sectors on the drive and 4698 * have them fabricated by the ddi layer by 4699 * calling ddi_devid_init and passing the 4700 * DEVID_FAB flag. 4701 * Create a fabricate devid only if there's no 4702 * fabricate devid existed. 4703 */ 4704 if (sd_get_devid(un) == EINVAL) { 4705 (void) sd_create_devid(un); 4706 } 4707 un->un_f_opt_fab_devid = TRUE; 4708 4709 /* Register the devid if it exists */ 4710 if (un->un_devid != NULL) { 4711 (void) ddi_devid_register(SD_DEVINFO(un), 4712 un->un_devid); 4713 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4714 "sd_register_devid: devid fabricated using " 4715 "ddi framework\n"); 4716 } 4717 } 4718 4719 /* clean up resources */ 4720 if (inq80 != NULL) { 4721 kmem_free(inq80, inq80_len); 4722 } 4723 if (inq83 != NULL) { 4724 kmem_free(inq83, inq83_len); 4725 } 4726 } 4727 4728 4729 4730 /* 4731 * Function: sd_get_devid 4732 * 4733 * Description: This routine will return 0 if a valid device id has been 4734 * obtained from the target and stored in the soft state. If a 4735 * valid device id has not been previously read and stored, a 4736 * read attempt will be made. 4737 * 4738 * Arguments: un - driver soft state (unit) structure 4739 * 4740 * Return Code: 0 if we successfully get the device id 4741 * 4742 * Context: Kernel Thread 4743 */ 4744 4745 static int 4746 sd_get_devid(struct sd_lun *un) 4747 { 4748 struct dk_devid *dkdevid; 4749 ddi_devid_t tmpid; 4750 uint_t *ip; 4751 size_t sz; 4752 diskaddr_t blk; 4753 int status; 4754 int chksum; 4755 int i; 4756 size_t buffer_size; 4757 4758 ASSERT(un != NULL); 4759 ASSERT(mutex_owned(SD_MUTEX(un))); 4760 4761 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4762 un); 4763 4764 if (un->un_devid != NULL) { 4765 return (0); 4766 } 4767 4768 mutex_exit(SD_MUTEX(un)); 4769 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4770 (void *)SD_PATH_DIRECT) != 0) { 4771 mutex_enter(SD_MUTEX(un)); 4772 return (EINVAL); 4773 } 4774 4775 /* 4776 * Read and verify device id, stored in the reserved cylinders at the 4777 * end of the disk. Backup label is on the odd sectors of the last 4778 * track of the last cylinder. Device id will be on track of the next 4779 * to last cylinder. 4780 */ 4781 mutex_enter(SD_MUTEX(un)); 4782 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4783 mutex_exit(SD_MUTEX(un)); 4784 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4785 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4786 SD_PATH_DIRECT); 4787 if (status != 0) { 4788 goto error; 4789 } 4790 4791 /* Validate the revision */ 4792 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4793 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4794 status = EINVAL; 4795 goto error; 4796 } 4797 4798 /* Calculate the checksum */ 4799 chksum = 0; 4800 ip = (uint_t *)dkdevid; 4801 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4802 i++) { 4803 chksum ^= ip[i]; 4804 } 4805 4806 /* Compare the checksums */ 4807 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4808 status = EINVAL; 4809 goto error; 4810 } 4811 4812 /* Validate the device id */ 4813 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4814 status = EINVAL; 4815 goto error; 4816 } 4817 4818 /* 4819 * Store the device id in the driver soft state 4820 */ 4821 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4822 tmpid = kmem_alloc(sz, KM_SLEEP); 4823 4824 mutex_enter(SD_MUTEX(un)); 4825 4826 un->un_devid = tmpid; 4827 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4828 4829 kmem_free(dkdevid, buffer_size); 4830 4831 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4832 4833 return (status); 4834 error: 4835 mutex_enter(SD_MUTEX(un)); 4836 kmem_free(dkdevid, buffer_size); 4837 return (status); 4838 } 4839 4840 4841 /* 4842 * Function: sd_create_devid 4843 * 4844 * Description: This routine will fabricate the device id and write it 4845 * to the disk. 4846 * 4847 * Arguments: un - driver soft state (unit) structure 4848 * 4849 * Return Code: value of the fabricated device id 4850 * 4851 * Context: Kernel Thread 4852 */ 4853 4854 static ddi_devid_t 4855 sd_create_devid(struct sd_lun *un) 4856 { 4857 ASSERT(un != NULL); 4858 4859 /* Fabricate the devid */ 4860 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4861 == DDI_FAILURE) { 4862 return (NULL); 4863 } 4864 4865 /* Write the devid to disk */ 4866 if (sd_write_deviceid(un) != 0) { 4867 ddi_devid_free(un->un_devid); 4868 un->un_devid = NULL; 4869 } 4870 4871 return (un->un_devid); 4872 } 4873 4874 4875 /* 4876 * Function: sd_write_deviceid 4877 * 4878 * Description: This routine will write the device id to the disk 4879 * reserved sector. 4880 * 4881 * Arguments: un - driver soft state (unit) structure 4882 * 4883 * Return Code: EINVAL 4884 * value returned by sd_send_scsi_cmd 4885 * 4886 * Context: Kernel Thread 4887 */ 4888 4889 static int 4890 sd_write_deviceid(struct sd_lun *un) 4891 { 4892 struct dk_devid *dkdevid; 4893 diskaddr_t blk; 4894 uint_t *ip, chksum; 4895 int status; 4896 int i; 4897 4898 ASSERT(mutex_owned(SD_MUTEX(un))); 4899 4900 mutex_exit(SD_MUTEX(un)); 4901 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4902 (void *)SD_PATH_DIRECT) != 0) { 4903 mutex_enter(SD_MUTEX(un)); 4904 return (-1); 4905 } 4906 4907 4908 /* Allocate the buffer */ 4909 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4910 4911 /* Fill in the revision */ 4912 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4913 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4914 4915 /* Copy in the device id */ 4916 mutex_enter(SD_MUTEX(un)); 4917 bcopy(un->un_devid, &dkdevid->dkd_devid, 4918 ddi_devid_sizeof(un->un_devid)); 4919 mutex_exit(SD_MUTEX(un)); 4920 4921 /* Calculate the checksum */ 4922 chksum = 0; 4923 ip = (uint_t *)dkdevid; 4924 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4925 i++) { 4926 chksum ^= ip[i]; 4927 } 4928 4929 /* Fill-in checksum */ 4930 DKD_FORMCHKSUM(chksum, dkdevid); 4931 4932 /* Write the reserved sector */ 4933 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4934 SD_PATH_DIRECT); 4935 4936 kmem_free(dkdevid, un->un_sys_blocksize); 4937 4938 mutex_enter(SD_MUTEX(un)); 4939 return (status); 4940 } 4941 4942 4943 /* 4944 * Function: sd_check_vpd_page_support 4945 * 4946 * Description: This routine sends an inquiry command with the EVPD bit set and 4947 * a page code of 0x00 to the device. It is used to determine which 4948 * vital product pages are availible to find the devid. We are 4949 * looking for pages 0x83 or 0x80. If we return a negative 1, the 4950 * device does not support that command. 4951 * 4952 * Arguments: un - driver soft state (unit) structure 4953 * 4954 * Return Code: 0 - success 4955 * 1 - check condition 4956 * 4957 * Context: This routine can sleep. 4958 */ 4959 4960 static int 4961 sd_check_vpd_page_support(struct sd_lun *un) 4962 { 4963 uchar_t *page_list = NULL; 4964 uchar_t page_length = 0xff; /* Use max possible length */ 4965 uchar_t evpd = 0x01; /* Set the EVPD bit */ 4966 uchar_t page_code = 0x00; /* Supported VPD Pages */ 4967 int rval = 0; 4968 int counter; 4969 4970 ASSERT(un != NULL); 4971 ASSERT(mutex_owned(SD_MUTEX(un))); 4972 4973 mutex_exit(SD_MUTEX(un)); 4974 4975 /* 4976 * We'll set the page length to the maximum to save figuring it out 4977 * with an additional call. 4978 */ 4979 page_list = kmem_zalloc(page_length, KM_SLEEP); 4980 4981 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 4982 page_code, NULL); 4983 4984 mutex_enter(SD_MUTEX(un)); 4985 4986 /* 4987 * Now we must validate that the device accepted the command, as some 4988 * drives do not support it. If the drive does support it, we will 4989 * return 0, and the supported pages will be in un_vpd_page_mask. If 4990 * not, we return -1. 4991 */ 4992 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 4993 /* Loop to find one of the 2 pages we need */ 4994 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 4995 4996 /* 4997 * Pages are returned in ascending order, and 0x83 is what we 4998 * are hoping for. 4999 */ 5000 while ((page_list[counter] <= 0x83) && 5001 (counter <= (page_list[VPD_PAGE_LENGTH] + 5002 VPD_HEAD_OFFSET))) { 5003 /* 5004 * Add 3 because page_list[3] is the number of 5005 * pages minus 3 5006 */ 5007 5008 switch (page_list[counter]) { 5009 case 0x00: 5010 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5011 break; 5012 case 0x80: 5013 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5014 break; 5015 case 0x81: 5016 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5017 break; 5018 case 0x82: 5019 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5020 break; 5021 case 0x83: 5022 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5023 break; 5024 } 5025 counter++; 5026 } 5027 5028 } else { 5029 rval = -1; 5030 5031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5032 "sd_check_vpd_page_support: This drive does not implement " 5033 "VPD pages.\n"); 5034 } 5035 5036 kmem_free(page_list, page_length); 5037 5038 return (rval); 5039 } 5040 5041 5042 /* 5043 * Function: sd_setup_pm 5044 * 5045 * Description: Initialize Power Management on the device 5046 * 5047 * Context: Kernel Thread 5048 */ 5049 5050 static void 5051 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5052 { 5053 uint_t log_page_size; 5054 uchar_t *log_page_data; 5055 int rval; 5056 5057 /* 5058 * Since we are called from attach, holding a mutex for 5059 * un is unnecessary. Because some of the routines called 5060 * from here require SD_MUTEX to not be held, assert this 5061 * right up front. 5062 */ 5063 ASSERT(!mutex_owned(SD_MUTEX(un))); 5064 /* 5065 * Since the sd device does not have the 'reg' property, 5066 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5067 * The following code is to tell cpr that this device 5068 * DOES need to be suspended and resumed. 5069 */ 5070 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5071 "pm-hardware-state", "needs-suspend-resume"); 5072 5073 /* 5074 * This complies with the new power management framework 5075 * for certain desktop machines. Create the pm_components 5076 * property as a string array property. 5077 */ 5078 if (un->un_f_pm_supported) { 5079 /* 5080 * not all devices have a motor, try it first. 5081 * some devices may return ILLEGAL REQUEST, some 5082 * will hang 5083 * The following START_STOP_UNIT is used to check if target 5084 * device has a motor. 5085 */ 5086 un->un_f_start_stop_supported = TRUE; 5087 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5088 SD_PATH_DIRECT) != 0) { 5089 un->un_f_start_stop_supported = FALSE; 5090 } 5091 5092 /* 5093 * create pm properties anyways otherwise the parent can't 5094 * go to sleep 5095 */ 5096 (void) sd_create_pm_components(devi, un); 5097 un->un_f_pm_is_enabled = TRUE; 5098 return; 5099 } 5100 5101 if (!un->un_f_log_sense_supported) { 5102 un->un_power_level = SD_SPINDLE_ON; 5103 un->un_f_pm_is_enabled = FALSE; 5104 return; 5105 } 5106 5107 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5108 5109 #ifdef SDDEBUG 5110 if (sd_force_pm_supported) { 5111 /* Force a successful result */ 5112 rval = 1; 5113 } 5114 #endif 5115 5116 /* 5117 * If the start-stop cycle counter log page is not supported 5118 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5119 * then we should not create the pm_components property. 5120 */ 5121 if (rval == -1) { 5122 /* 5123 * Error. 5124 * Reading log sense failed, most likely this is 5125 * an older drive that does not support log sense. 5126 * If this fails auto-pm is not supported. 5127 */ 5128 un->un_power_level = SD_SPINDLE_ON; 5129 un->un_f_pm_is_enabled = FALSE; 5130 5131 } else if (rval == 0) { 5132 /* 5133 * Page not found. 5134 * The start stop cycle counter is implemented as page 5135 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5136 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5137 */ 5138 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5139 /* 5140 * Page found, use this one. 5141 */ 5142 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5143 un->un_f_pm_is_enabled = TRUE; 5144 } else { 5145 /* 5146 * Error or page not found. 5147 * auto-pm is not supported for this device. 5148 */ 5149 un->un_power_level = SD_SPINDLE_ON; 5150 un->un_f_pm_is_enabled = FALSE; 5151 } 5152 } else { 5153 /* 5154 * Page found, use it. 5155 */ 5156 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5157 un->un_f_pm_is_enabled = TRUE; 5158 } 5159 5160 5161 if (un->un_f_pm_is_enabled == TRUE) { 5162 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5163 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5164 5165 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5166 log_page_size, un->un_start_stop_cycle_page, 5167 0x01, 0, SD_PATH_DIRECT); 5168 #ifdef SDDEBUG 5169 if (sd_force_pm_supported) { 5170 /* Force a successful result */ 5171 rval = 0; 5172 } 5173 #endif 5174 5175 /* 5176 * If the Log sense for Page( Start/stop cycle counter page) 5177 * succeeds, then power managment is supported and we can 5178 * enable auto-pm. 5179 */ 5180 if (rval == 0) { 5181 (void) sd_create_pm_components(devi, un); 5182 } else { 5183 un->un_power_level = SD_SPINDLE_ON; 5184 un->un_f_pm_is_enabled = FALSE; 5185 } 5186 5187 kmem_free(log_page_data, log_page_size); 5188 } 5189 } 5190 5191 5192 /* 5193 * Function: sd_create_pm_components 5194 * 5195 * Description: Initialize PM property. 5196 * 5197 * Context: Kernel thread context 5198 */ 5199 5200 static void 5201 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5202 { 5203 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5204 5205 ASSERT(!mutex_owned(SD_MUTEX(un))); 5206 5207 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5208 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5209 /* 5210 * When components are initially created they are idle, 5211 * power up any non-removables. 5212 * Note: the return value of pm_raise_power can't be used 5213 * for determining if PM should be enabled for this device. 5214 * Even if you check the return values and remove this 5215 * property created above, the PM framework will not honor the 5216 * change after the first call to pm_raise_power. Hence, 5217 * removal of that property does not help if pm_raise_power 5218 * fails. In the case of removable media, the start/stop 5219 * will fail if the media is not present. 5220 */ 5221 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5222 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5223 mutex_enter(SD_MUTEX(un)); 5224 un->un_power_level = SD_SPINDLE_ON; 5225 mutex_enter(&un->un_pm_mutex); 5226 /* Set to on and not busy. */ 5227 un->un_pm_count = 0; 5228 } else { 5229 mutex_enter(SD_MUTEX(un)); 5230 un->un_power_level = SD_SPINDLE_OFF; 5231 mutex_enter(&un->un_pm_mutex); 5232 /* Set to off. */ 5233 un->un_pm_count = -1; 5234 } 5235 mutex_exit(&un->un_pm_mutex); 5236 mutex_exit(SD_MUTEX(un)); 5237 } else { 5238 un->un_power_level = SD_SPINDLE_ON; 5239 un->un_f_pm_is_enabled = FALSE; 5240 } 5241 } 5242 5243 5244 /* 5245 * Function: sd_ddi_suspend 5246 * 5247 * Description: Performs system power-down operations. This includes 5248 * setting the drive state to indicate its suspended so 5249 * that no new commands will be accepted. Also, wait for 5250 * all commands that are in transport or queued to a timer 5251 * for retry to complete. All timeout threads are cancelled. 5252 * 5253 * Return Code: DDI_FAILURE or DDI_SUCCESS 5254 * 5255 * Context: Kernel thread context 5256 */ 5257 5258 static int 5259 sd_ddi_suspend(dev_info_t *devi) 5260 { 5261 struct sd_lun *un; 5262 clock_t wait_cmds_complete; 5263 5264 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5265 if (un == NULL) { 5266 return (DDI_FAILURE); 5267 } 5268 5269 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5270 5271 mutex_enter(SD_MUTEX(un)); 5272 5273 /* Return success if the device is already suspended. */ 5274 if (un->un_state == SD_STATE_SUSPENDED) { 5275 mutex_exit(SD_MUTEX(un)); 5276 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5277 "device already suspended, exiting\n"); 5278 return (DDI_SUCCESS); 5279 } 5280 5281 /* Return failure if the device is being used by HA */ 5282 if (un->un_resvd_status & 5283 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5284 mutex_exit(SD_MUTEX(un)); 5285 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5286 "device in use by HA, exiting\n"); 5287 return (DDI_FAILURE); 5288 } 5289 5290 /* 5291 * Return failure if the device is in a resource wait 5292 * or power changing state. 5293 */ 5294 if ((un->un_state == SD_STATE_RWAIT) || 5295 (un->un_state == SD_STATE_PM_CHANGING)) { 5296 mutex_exit(SD_MUTEX(un)); 5297 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5298 "device in resource wait state, exiting\n"); 5299 return (DDI_FAILURE); 5300 } 5301 5302 5303 un->un_save_state = un->un_last_state; 5304 New_state(un, SD_STATE_SUSPENDED); 5305 5306 /* 5307 * Wait for all commands that are in transport or queued to a timer 5308 * for retry to complete. 5309 * 5310 * While waiting, no new commands will be accepted or sent because of 5311 * the new state we set above. 5312 * 5313 * Wait till current operation has completed. If we are in the resource 5314 * wait state (with an intr outstanding) then we need to wait till the 5315 * intr completes and starts the next cmd. We want to wait for 5316 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5317 */ 5318 wait_cmds_complete = ddi_get_lbolt() + 5319 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5320 5321 while (un->un_ncmds_in_transport != 0) { 5322 /* 5323 * Fail if commands do not finish in the specified time. 5324 */ 5325 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5326 wait_cmds_complete) == -1) { 5327 /* 5328 * Undo the state changes made above. Everything 5329 * must go back to it's original value. 5330 */ 5331 Restore_state(un); 5332 un->un_last_state = un->un_save_state; 5333 /* Wake up any threads that might be waiting. */ 5334 cv_broadcast(&un->un_suspend_cv); 5335 mutex_exit(SD_MUTEX(un)); 5336 SD_ERROR(SD_LOG_IO_PM, un, 5337 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5338 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5339 return (DDI_FAILURE); 5340 } 5341 } 5342 5343 /* 5344 * Cancel SCSI watch thread and timeouts, if any are active 5345 */ 5346 5347 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5348 opaque_t temp_token = un->un_swr_token; 5349 mutex_exit(SD_MUTEX(un)); 5350 scsi_watch_suspend(temp_token); 5351 mutex_enter(SD_MUTEX(un)); 5352 } 5353 5354 if (un->un_reset_throttle_timeid != NULL) { 5355 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5356 un->un_reset_throttle_timeid = NULL; 5357 mutex_exit(SD_MUTEX(un)); 5358 (void) untimeout(temp_id); 5359 mutex_enter(SD_MUTEX(un)); 5360 } 5361 5362 if (un->un_dcvb_timeid != NULL) { 5363 timeout_id_t temp_id = un->un_dcvb_timeid; 5364 un->un_dcvb_timeid = NULL; 5365 mutex_exit(SD_MUTEX(un)); 5366 (void) untimeout(temp_id); 5367 mutex_enter(SD_MUTEX(un)); 5368 } 5369 5370 mutex_enter(&un->un_pm_mutex); 5371 if (un->un_pm_timeid != NULL) { 5372 timeout_id_t temp_id = un->un_pm_timeid; 5373 un->un_pm_timeid = NULL; 5374 mutex_exit(&un->un_pm_mutex); 5375 mutex_exit(SD_MUTEX(un)); 5376 (void) untimeout(temp_id); 5377 mutex_enter(SD_MUTEX(un)); 5378 } else { 5379 mutex_exit(&un->un_pm_mutex); 5380 } 5381 5382 if (un->un_retry_timeid != NULL) { 5383 timeout_id_t temp_id = un->un_retry_timeid; 5384 un->un_retry_timeid = NULL; 5385 mutex_exit(SD_MUTEX(un)); 5386 (void) untimeout(temp_id); 5387 mutex_enter(SD_MUTEX(un)); 5388 } 5389 5390 if (un->un_direct_priority_timeid != NULL) { 5391 timeout_id_t temp_id = un->un_direct_priority_timeid; 5392 un->un_direct_priority_timeid = NULL; 5393 mutex_exit(SD_MUTEX(un)); 5394 (void) untimeout(temp_id); 5395 mutex_enter(SD_MUTEX(un)); 5396 } 5397 5398 if (un->un_f_is_fibre == TRUE) { 5399 /* 5400 * Remove callbacks for insert and remove events 5401 */ 5402 if (un->un_insert_event != NULL) { 5403 mutex_exit(SD_MUTEX(un)); 5404 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5405 mutex_enter(SD_MUTEX(un)); 5406 un->un_insert_event = NULL; 5407 } 5408 5409 if (un->un_remove_event != NULL) { 5410 mutex_exit(SD_MUTEX(un)); 5411 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5412 mutex_enter(SD_MUTEX(un)); 5413 un->un_remove_event = NULL; 5414 } 5415 } 5416 5417 mutex_exit(SD_MUTEX(un)); 5418 5419 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5420 5421 return (DDI_SUCCESS); 5422 } 5423 5424 5425 /* 5426 * Function: sd_ddi_pm_suspend 5427 * 5428 * Description: Set the drive state to low power. 5429 * Someone else is required to actually change the drive 5430 * power level. 5431 * 5432 * Arguments: un - driver soft state (unit) structure 5433 * 5434 * Return Code: DDI_FAILURE or DDI_SUCCESS 5435 * 5436 * Context: Kernel thread context 5437 */ 5438 5439 static int 5440 sd_ddi_pm_suspend(struct sd_lun *un) 5441 { 5442 ASSERT(un != NULL); 5443 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5444 5445 ASSERT(!mutex_owned(SD_MUTEX(un))); 5446 mutex_enter(SD_MUTEX(un)); 5447 5448 /* 5449 * Exit if power management is not enabled for this device, or if 5450 * the device is being used by HA. 5451 */ 5452 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5453 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5454 mutex_exit(SD_MUTEX(un)); 5455 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5456 return (DDI_SUCCESS); 5457 } 5458 5459 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5460 un->un_ncmds_in_driver); 5461 5462 /* 5463 * See if the device is not busy, ie.: 5464 * - we have no commands in the driver for this device 5465 * - not waiting for resources 5466 */ 5467 if ((un->un_ncmds_in_driver == 0) && 5468 (un->un_state != SD_STATE_RWAIT)) { 5469 /* 5470 * The device is not busy, so it is OK to go to low power state. 5471 * Indicate low power, but rely on someone else to actually 5472 * change it. 5473 */ 5474 mutex_enter(&un->un_pm_mutex); 5475 un->un_pm_count = -1; 5476 mutex_exit(&un->un_pm_mutex); 5477 un->un_power_level = SD_SPINDLE_OFF; 5478 } 5479 5480 mutex_exit(SD_MUTEX(un)); 5481 5482 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5483 5484 return (DDI_SUCCESS); 5485 } 5486 5487 5488 /* 5489 * Function: sd_ddi_resume 5490 * 5491 * Description: Performs system power-up operations.. 5492 * 5493 * Return Code: DDI_SUCCESS 5494 * DDI_FAILURE 5495 * 5496 * Context: Kernel thread context 5497 */ 5498 5499 static int 5500 sd_ddi_resume(dev_info_t *devi) 5501 { 5502 struct sd_lun *un; 5503 5504 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5505 if (un == NULL) { 5506 return (DDI_FAILURE); 5507 } 5508 5509 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5510 5511 mutex_enter(SD_MUTEX(un)); 5512 Restore_state(un); 5513 5514 /* 5515 * Restore the state which was saved to give the 5516 * the right state in un_last_state 5517 */ 5518 un->un_last_state = un->un_save_state; 5519 /* 5520 * Note: throttle comes back at full. 5521 * Also note: this MUST be done before calling pm_raise_power 5522 * otherwise the system can get hung in biowait. The scenario where 5523 * this'll happen is under cpr suspend. Writing of the system 5524 * state goes through sddump, which writes 0 to un_throttle. If 5525 * writing the system state then fails, example if the partition is 5526 * too small, then cpr attempts a resume. If throttle isn't restored 5527 * from the saved value until after calling pm_raise_power then 5528 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5529 * in biowait. 5530 */ 5531 un->un_throttle = un->un_saved_throttle; 5532 5533 /* 5534 * The chance of failure is very rare as the only command done in power 5535 * entry point is START command when you transition from 0->1 or 5536 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5537 * which suspend was done. Ignore the return value as the resume should 5538 * not be failed. In the case of removable media the media need not be 5539 * inserted and hence there is a chance that raise power will fail with 5540 * media not present. 5541 */ 5542 if (un->un_f_attach_spinup) { 5543 mutex_exit(SD_MUTEX(un)); 5544 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5545 mutex_enter(SD_MUTEX(un)); 5546 } 5547 5548 /* 5549 * Don't broadcast to the suspend cv and therefore possibly 5550 * start I/O until after power has been restored. 5551 */ 5552 cv_broadcast(&un->un_suspend_cv); 5553 cv_broadcast(&un->un_state_cv); 5554 5555 /* restart thread */ 5556 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5557 scsi_watch_resume(un->un_swr_token); 5558 } 5559 5560 #if (defined(__fibre)) 5561 if (un->un_f_is_fibre == TRUE) { 5562 /* 5563 * Add callbacks for insert and remove events 5564 */ 5565 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5566 sd_init_event_callbacks(un); 5567 } 5568 } 5569 #endif 5570 5571 /* 5572 * Transport any pending commands to the target. 5573 * 5574 * If this is a low-activity device commands in queue will have to wait 5575 * until new commands come in, which may take awhile. Also, we 5576 * specifically don't check un_ncmds_in_transport because we know that 5577 * there really are no commands in progress after the unit was 5578 * suspended and we could have reached the throttle level, been 5579 * suspended, and have no new commands coming in for awhile. Highly 5580 * unlikely, but so is the low-activity disk scenario. 5581 */ 5582 ddi_xbuf_dispatch(un->un_xbuf_attr); 5583 5584 sd_start_cmds(un, NULL); 5585 mutex_exit(SD_MUTEX(un)); 5586 5587 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5588 5589 return (DDI_SUCCESS); 5590 } 5591 5592 5593 /* 5594 * Function: sd_ddi_pm_resume 5595 * 5596 * Description: Set the drive state to powered on. 5597 * Someone else is required to actually change the drive 5598 * power level. 5599 * 5600 * Arguments: un - driver soft state (unit) structure 5601 * 5602 * Return Code: DDI_SUCCESS 5603 * 5604 * Context: Kernel thread context 5605 */ 5606 5607 static int 5608 sd_ddi_pm_resume(struct sd_lun *un) 5609 { 5610 ASSERT(un != NULL); 5611 5612 ASSERT(!mutex_owned(SD_MUTEX(un))); 5613 mutex_enter(SD_MUTEX(un)); 5614 un->un_power_level = SD_SPINDLE_ON; 5615 5616 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5617 mutex_enter(&un->un_pm_mutex); 5618 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5619 un->un_pm_count++; 5620 ASSERT(un->un_pm_count == 0); 5621 /* 5622 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5623 * un_suspend_cv is for a system resume, not a power management 5624 * device resume. (4297749) 5625 * cv_broadcast(&un->un_suspend_cv); 5626 */ 5627 } 5628 mutex_exit(&un->un_pm_mutex); 5629 mutex_exit(SD_MUTEX(un)); 5630 5631 return (DDI_SUCCESS); 5632 } 5633 5634 5635 /* 5636 * Function: sd_pm_idletimeout_handler 5637 * 5638 * Description: A timer routine that's active only while a device is busy. 5639 * The purpose is to extend slightly the pm framework's busy 5640 * view of the device to prevent busy/idle thrashing for 5641 * back-to-back commands. Do this by comparing the current time 5642 * to the time at which the last command completed and when the 5643 * difference is greater than sd_pm_idletime, call 5644 * pm_idle_component. In addition to indicating idle to the pm 5645 * framework, update the chain type to again use the internal pm 5646 * layers of the driver. 5647 * 5648 * Arguments: arg - driver soft state (unit) structure 5649 * 5650 * Context: Executes in a timeout(9F) thread context 5651 */ 5652 5653 static void 5654 sd_pm_idletimeout_handler(void *arg) 5655 { 5656 struct sd_lun *un = arg; 5657 5658 time_t now; 5659 5660 mutex_enter(&sd_detach_mutex); 5661 if (un->un_detach_count != 0) { 5662 /* Abort if the instance is detaching */ 5663 mutex_exit(&sd_detach_mutex); 5664 return; 5665 } 5666 mutex_exit(&sd_detach_mutex); 5667 5668 now = ddi_get_time(); 5669 /* 5670 * Grab both mutexes, in the proper order, since we're accessing 5671 * both PM and softstate variables. 5672 */ 5673 mutex_enter(SD_MUTEX(un)); 5674 mutex_enter(&un->un_pm_mutex); 5675 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5676 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5677 /* 5678 * Update the chain types. 5679 * This takes affect on the next new command received. 5680 */ 5681 if (un->un_f_non_devbsize_supported) { 5682 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5683 } else { 5684 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5685 } 5686 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5687 5688 SD_TRACE(SD_LOG_IO_PM, un, 5689 "sd_pm_idletimeout_handler: idling device\n"); 5690 (void) pm_idle_component(SD_DEVINFO(un), 0); 5691 un->un_pm_idle_timeid = NULL; 5692 } else { 5693 un->un_pm_idle_timeid = 5694 timeout(sd_pm_idletimeout_handler, un, 5695 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5696 } 5697 mutex_exit(&un->un_pm_mutex); 5698 mutex_exit(SD_MUTEX(un)); 5699 } 5700 5701 5702 /* 5703 * Function: sd_pm_timeout_handler 5704 * 5705 * Description: Callback to tell framework we are idle. 5706 * 5707 * Context: timeout(9f) thread context. 5708 */ 5709 5710 static void 5711 sd_pm_timeout_handler(void *arg) 5712 { 5713 struct sd_lun *un = arg; 5714 5715 (void) pm_idle_component(SD_DEVINFO(un), 0); 5716 mutex_enter(&un->un_pm_mutex); 5717 un->un_pm_timeid = NULL; 5718 mutex_exit(&un->un_pm_mutex); 5719 } 5720 5721 5722 /* 5723 * Function: sdpower 5724 * 5725 * Description: PM entry point. 5726 * 5727 * Return Code: DDI_SUCCESS 5728 * DDI_FAILURE 5729 * 5730 * Context: Kernel thread context 5731 */ 5732 5733 static int 5734 sdpower(dev_info_t *devi, int component, int level) 5735 { 5736 struct sd_lun *un; 5737 int instance; 5738 int rval = DDI_SUCCESS; 5739 uint_t i, log_page_size, maxcycles, ncycles; 5740 uchar_t *log_page_data; 5741 int log_sense_page; 5742 int medium_present; 5743 time_t intvlp; 5744 dev_t dev; 5745 struct pm_trans_data sd_pm_tran_data; 5746 uchar_t save_state; 5747 int sval; 5748 uchar_t state_before_pm; 5749 int got_semaphore_here; 5750 5751 instance = ddi_get_instance(devi); 5752 5753 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5754 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5755 component != 0) { 5756 return (DDI_FAILURE); 5757 } 5758 5759 dev = sd_make_device(SD_DEVINFO(un)); 5760 5761 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5762 5763 /* 5764 * Must synchronize power down with close. 5765 * Attempt to decrement/acquire the open/close semaphore, 5766 * but do NOT wait on it. If it's not greater than zero, 5767 * ie. it can't be decremented without waiting, then 5768 * someone else, either open or close, already has it 5769 * and the try returns 0. Use that knowledge here to determine 5770 * if it's OK to change the device power level. 5771 * Also, only increment it on exit if it was decremented, ie. gotten, 5772 * here. 5773 */ 5774 got_semaphore_here = sema_tryp(&un->un_semoclose); 5775 5776 mutex_enter(SD_MUTEX(un)); 5777 5778 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5779 un->un_ncmds_in_driver); 5780 5781 /* 5782 * If un_ncmds_in_driver is non-zero it indicates commands are 5783 * already being processed in the driver, or if the semaphore was 5784 * not gotten here it indicates an open or close is being processed. 5785 * At the same time somebody is requesting to go low power which 5786 * can't happen, therefore we need to return failure. 5787 */ 5788 if ((level == SD_SPINDLE_OFF) && 5789 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5790 mutex_exit(SD_MUTEX(un)); 5791 5792 if (got_semaphore_here != 0) { 5793 sema_v(&un->un_semoclose); 5794 } 5795 SD_TRACE(SD_LOG_IO_PM, un, 5796 "sdpower: exit, device has queued cmds.\n"); 5797 return (DDI_FAILURE); 5798 } 5799 5800 /* 5801 * if it is OFFLINE that means the disk is completely dead 5802 * in our case we have to put the disk in on or off by sending commands 5803 * Of course that will fail anyway so return back here. 5804 * 5805 * Power changes to a device that's OFFLINE or SUSPENDED 5806 * are not allowed. 5807 */ 5808 if ((un->un_state == SD_STATE_OFFLINE) || 5809 (un->un_state == SD_STATE_SUSPENDED)) { 5810 mutex_exit(SD_MUTEX(un)); 5811 5812 if (got_semaphore_here != 0) { 5813 sema_v(&un->un_semoclose); 5814 } 5815 SD_TRACE(SD_LOG_IO_PM, un, 5816 "sdpower: exit, device is off-line.\n"); 5817 return (DDI_FAILURE); 5818 } 5819 5820 /* 5821 * Change the device's state to indicate it's power level 5822 * is being changed. Do this to prevent a power off in the 5823 * middle of commands, which is especially bad on devices 5824 * that are really powered off instead of just spun down. 5825 */ 5826 state_before_pm = un->un_state; 5827 un->un_state = SD_STATE_PM_CHANGING; 5828 5829 mutex_exit(SD_MUTEX(un)); 5830 5831 /* 5832 * If "pm-capable" property is set to TRUE by HBA drivers, 5833 * bypass the following checking, otherwise, check the log 5834 * sense information for this device 5835 */ 5836 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5837 /* 5838 * Get the log sense information to understand whether the 5839 * the powercycle counts have gone beyond the threshhold. 5840 */ 5841 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5842 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5843 5844 mutex_enter(SD_MUTEX(un)); 5845 log_sense_page = un->un_start_stop_cycle_page; 5846 mutex_exit(SD_MUTEX(un)); 5847 5848 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5849 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5850 #ifdef SDDEBUG 5851 if (sd_force_pm_supported) { 5852 /* Force a successful result */ 5853 rval = 0; 5854 } 5855 #endif 5856 if (rval != 0) { 5857 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5858 "Log Sense Failed\n"); 5859 kmem_free(log_page_data, log_page_size); 5860 /* Cannot support power management on those drives */ 5861 5862 if (got_semaphore_here != 0) { 5863 sema_v(&un->un_semoclose); 5864 } 5865 /* 5866 * On exit put the state back to it's original value 5867 * and broadcast to anyone waiting for the power 5868 * change completion. 5869 */ 5870 mutex_enter(SD_MUTEX(un)); 5871 un->un_state = state_before_pm; 5872 cv_broadcast(&un->un_suspend_cv); 5873 mutex_exit(SD_MUTEX(un)); 5874 SD_TRACE(SD_LOG_IO_PM, un, 5875 "sdpower: exit, Log Sense Failed.\n"); 5876 return (DDI_FAILURE); 5877 } 5878 5879 /* 5880 * From the page data - Convert the essential information to 5881 * pm_trans_data 5882 */ 5883 maxcycles = 5884 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5885 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5886 5887 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5888 5889 ncycles = 5890 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5891 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5892 5893 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5894 5895 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5896 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5897 log_page_data[8+i]; 5898 } 5899 5900 kmem_free(log_page_data, log_page_size); 5901 5902 /* 5903 * Call pm_trans_check routine to get the Ok from 5904 * the global policy 5905 */ 5906 5907 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5908 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5909 5910 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5911 #ifdef SDDEBUG 5912 if (sd_force_pm_supported) { 5913 /* Force a successful result */ 5914 rval = 1; 5915 } 5916 #endif 5917 switch (rval) { 5918 case 0: 5919 /* 5920 * Not Ok to Power cycle or error in parameters passed 5921 * Would have given the advised time to consider power 5922 * cycle. Based on the new intvlp parameter we are 5923 * supposed to pretend we are busy so that pm framework 5924 * will never call our power entry point. Because of 5925 * that install a timeout handler and wait for the 5926 * recommended time to elapse so that power management 5927 * can be effective again. 5928 * 5929 * To effect this behavior, call pm_busy_component to 5930 * indicate to the framework this device is busy. 5931 * By not adjusting un_pm_count the rest of PM in 5932 * the driver will function normally, and independant 5933 * of this but because the framework is told the device 5934 * is busy it won't attempt powering down until it gets 5935 * a matching idle. The timeout handler sends this. 5936 * Note: sd_pm_entry can't be called here to do this 5937 * because sdpower may have been called as a result 5938 * of a call to pm_raise_power from within sd_pm_entry. 5939 * 5940 * If a timeout handler is already active then 5941 * don't install another. 5942 */ 5943 mutex_enter(&un->un_pm_mutex); 5944 if (un->un_pm_timeid == NULL) { 5945 un->un_pm_timeid = 5946 timeout(sd_pm_timeout_handler, 5947 un, intvlp * drv_usectohz(1000000)); 5948 mutex_exit(&un->un_pm_mutex); 5949 (void) pm_busy_component(SD_DEVINFO(un), 0); 5950 } else { 5951 mutex_exit(&un->un_pm_mutex); 5952 } 5953 if (got_semaphore_here != 0) { 5954 sema_v(&un->un_semoclose); 5955 } 5956 /* 5957 * On exit put the state back to it's original value 5958 * and broadcast to anyone waiting for the power 5959 * change completion. 5960 */ 5961 mutex_enter(SD_MUTEX(un)); 5962 un->un_state = state_before_pm; 5963 cv_broadcast(&un->un_suspend_cv); 5964 mutex_exit(SD_MUTEX(un)); 5965 5966 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 5967 "trans check Failed, not ok to power cycle.\n"); 5968 return (DDI_FAILURE); 5969 5970 case -1: 5971 if (got_semaphore_here != 0) { 5972 sema_v(&un->un_semoclose); 5973 } 5974 /* 5975 * On exit put the state back to it's original value 5976 * and broadcast to anyone waiting for the power 5977 * change completion. 5978 */ 5979 mutex_enter(SD_MUTEX(un)); 5980 un->un_state = state_before_pm; 5981 cv_broadcast(&un->un_suspend_cv); 5982 mutex_exit(SD_MUTEX(un)); 5983 SD_TRACE(SD_LOG_IO_PM, un, 5984 "sdpower: exit, trans check command Failed.\n"); 5985 return (DDI_FAILURE); 5986 } 5987 } 5988 5989 if (level == SD_SPINDLE_OFF) { 5990 /* 5991 * Save the last state... if the STOP FAILS we need it 5992 * for restoring 5993 */ 5994 mutex_enter(SD_MUTEX(un)); 5995 save_state = un->un_last_state; 5996 /* 5997 * There must not be any cmds. getting processed 5998 * in the driver when we get here. Power to the 5999 * device is potentially going off. 6000 */ 6001 ASSERT(un->un_ncmds_in_driver == 0); 6002 mutex_exit(SD_MUTEX(un)); 6003 6004 /* 6005 * For now suspend the device completely before spindle is 6006 * turned off 6007 */ 6008 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6009 if (got_semaphore_here != 0) { 6010 sema_v(&un->un_semoclose); 6011 } 6012 /* 6013 * On exit put the state back to it's original value 6014 * and broadcast to anyone waiting for the power 6015 * change completion. 6016 */ 6017 mutex_enter(SD_MUTEX(un)); 6018 un->un_state = state_before_pm; 6019 cv_broadcast(&un->un_suspend_cv); 6020 mutex_exit(SD_MUTEX(un)); 6021 SD_TRACE(SD_LOG_IO_PM, un, 6022 "sdpower: exit, PM suspend Failed.\n"); 6023 return (DDI_FAILURE); 6024 } 6025 } 6026 6027 /* 6028 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6029 * close, or strategy. Dump no long uses this routine, it uses it's 6030 * own code so it can be done in polled mode. 6031 */ 6032 6033 medium_present = TRUE; 6034 6035 /* 6036 * When powering up, issue a TUR in case the device is at unit 6037 * attention. Don't do retries. Bypass the PM layer, otherwise 6038 * a deadlock on un_pm_busy_cv will occur. 6039 */ 6040 if (level == SD_SPINDLE_ON) { 6041 (void) sd_send_scsi_TEST_UNIT_READY(un, 6042 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6043 } 6044 6045 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6046 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6047 6048 sval = sd_send_scsi_START_STOP_UNIT(un, 6049 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6050 SD_PATH_DIRECT); 6051 /* Command failed, check for media present. */ 6052 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6053 medium_present = FALSE; 6054 } 6055 6056 /* 6057 * The conditions of interest here are: 6058 * if a spindle off with media present fails, 6059 * then restore the state and return an error. 6060 * else if a spindle on fails, 6061 * then return an error (there's no state to restore). 6062 * In all other cases we setup for the new state 6063 * and return success. 6064 */ 6065 switch (level) { 6066 case SD_SPINDLE_OFF: 6067 if ((medium_present == TRUE) && (sval != 0)) { 6068 /* The stop command from above failed */ 6069 rval = DDI_FAILURE; 6070 /* 6071 * The stop command failed, and we have media 6072 * present. Put the level back by calling the 6073 * sd_pm_resume() and set the state back to 6074 * it's previous value. 6075 */ 6076 (void) sd_ddi_pm_resume(un); 6077 mutex_enter(SD_MUTEX(un)); 6078 un->un_last_state = save_state; 6079 mutex_exit(SD_MUTEX(un)); 6080 break; 6081 } 6082 /* 6083 * The stop command from above succeeded. 6084 */ 6085 if (un->un_f_monitor_media_state) { 6086 /* 6087 * Terminate watch thread in case of removable media 6088 * devices going into low power state. This is as per 6089 * the requirements of pm framework, otherwise commands 6090 * will be generated for the device (through watch 6091 * thread), even when the device is in low power state. 6092 */ 6093 mutex_enter(SD_MUTEX(un)); 6094 un->un_f_watcht_stopped = FALSE; 6095 if (un->un_swr_token != NULL) { 6096 opaque_t temp_token = un->un_swr_token; 6097 un->un_f_watcht_stopped = TRUE; 6098 un->un_swr_token = NULL; 6099 mutex_exit(SD_MUTEX(un)); 6100 (void) scsi_watch_request_terminate(temp_token, 6101 SCSI_WATCH_TERMINATE_WAIT); 6102 } else { 6103 mutex_exit(SD_MUTEX(un)); 6104 } 6105 } 6106 break; 6107 6108 default: /* The level requested is spindle on... */ 6109 /* 6110 * Legacy behavior: return success on a failed spinup 6111 * if there is no media in the drive. 6112 * Do this by looking at medium_present here. 6113 */ 6114 if ((sval != 0) && medium_present) { 6115 /* The start command from above failed */ 6116 rval = DDI_FAILURE; 6117 break; 6118 } 6119 /* 6120 * The start command from above succeeded 6121 * Resume the devices now that we have 6122 * started the disks 6123 */ 6124 (void) sd_ddi_pm_resume(un); 6125 6126 /* 6127 * Resume the watch thread since it was suspended 6128 * when the device went into low power mode. 6129 */ 6130 if (un->un_f_monitor_media_state) { 6131 mutex_enter(SD_MUTEX(un)); 6132 if (un->un_f_watcht_stopped == TRUE) { 6133 opaque_t temp_token; 6134 6135 un->un_f_watcht_stopped = FALSE; 6136 mutex_exit(SD_MUTEX(un)); 6137 temp_token = scsi_watch_request_submit( 6138 SD_SCSI_DEVP(un), 6139 sd_check_media_time, 6140 SENSE_LENGTH, sd_media_watch_cb, 6141 (caddr_t)dev); 6142 mutex_enter(SD_MUTEX(un)); 6143 un->un_swr_token = temp_token; 6144 } 6145 mutex_exit(SD_MUTEX(un)); 6146 } 6147 } 6148 if (got_semaphore_here != 0) { 6149 sema_v(&un->un_semoclose); 6150 } 6151 /* 6152 * On exit put the state back to it's original value 6153 * and broadcast to anyone waiting for the power 6154 * change completion. 6155 */ 6156 mutex_enter(SD_MUTEX(un)); 6157 un->un_state = state_before_pm; 6158 cv_broadcast(&un->un_suspend_cv); 6159 mutex_exit(SD_MUTEX(un)); 6160 6161 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6162 6163 return (rval); 6164 } 6165 6166 6167 6168 /* 6169 * Function: sdattach 6170 * 6171 * Description: Driver's attach(9e) entry point function. 6172 * 6173 * Arguments: devi - opaque device info handle 6174 * cmd - attach type 6175 * 6176 * Return Code: DDI_SUCCESS 6177 * DDI_FAILURE 6178 * 6179 * Context: Kernel thread context 6180 */ 6181 6182 static int 6183 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6184 { 6185 switch (cmd) { 6186 case DDI_ATTACH: 6187 return (sd_unit_attach(devi)); 6188 case DDI_RESUME: 6189 return (sd_ddi_resume(devi)); 6190 default: 6191 break; 6192 } 6193 return (DDI_FAILURE); 6194 } 6195 6196 6197 /* 6198 * Function: sddetach 6199 * 6200 * Description: Driver's detach(9E) entry point function. 6201 * 6202 * Arguments: devi - opaque device info handle 6203 * cmd - detach type 6204 * 6205 * Return Code: DDI_SUCCESS 6206 * DDI_FAILURE 6207 * 6208 * Context: Kernel thread context 6209 */ 6210 6211 static int 6212 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6213 { 6214 switch (cmd) { 6215 case DDI_DETACH: 6216 return (sd_unit_detach(devi)); 6217 case DDI_SUSPEND: 6218 return (sd_ddi_suspend(devi)); 6219 default: 6220 break; 6221 } 6222 return (DDI_FAILURE); 6223 } 6224 6225 6226 /* 6227 * Function: sd_sync_with_callback 6228 * 6229 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6230 * state while the callback routine is active. 6231 * 6232 * Arguments: un: softstate structure for the instance 6233 * 6234 * Context: Kernel thread context 6235 */ 6236 6237 static void 6238 sd_sync_with_callback(struct sd_lun *un) 6239 { 6240 ASSERT(un != NULL); 6241 6242 mutex_enter(SD_MUTEX(un)); 6243 6244 ASSERT(un->un_in_callback >= 0); 6245 6246 while (un->un_in_callback > 0) { 6247 mutex_exit(SD_MUTEX(un)); 6248 delay(2); 6249 mutex_enter(SD_MUTEX(un)); 6250 } 6251 6252 mutex_exit(SD_MUTEX(un)); 6253 } 6254 6255 /* 6256 * Function: sd_unit_attach 6257 * 6258 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6259 * the soft state structure for the device and performs 6260 * all necessary structure and device initializations. 6261 * 6262 * Arguments: devi: the system's dev_info_t for the device. 6263 * 6264 * Return Code: DDI_SUCCESS if attach is successful. 6265 * DDI_FAILURE if any part of the attach fails. 6266 * 6267 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6268 * Kernel thread context only. Can sleep. 6269 */ 6270 6271 static int 6272 sd_unit_attach(dev_info_t *devi) 6273 { 6274 struct scsi_device *devp; 6275 struct sd_lun *un; 6276 char *variantp; 6277 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6278 int instance; 6279 int rval; 6280 int wc_enabled; 6281 int tgt; 6282 uint64_t capacity; 6283 uint_t lbasize = 0; 6284 dev_info_t *pdip = ddi_get_parent(devi); 6285 int offbyone = 0; 6286 int geom_label_valid = 0; 6287 6288 /* 6289 * Retrieve the target driver's private data area. This was set 6290 * up by the HBA. 6291 */ 6292 devp = ddi_get_driver_private(devi); 6293 6294 /* 6295 * Retrieve the target ID of the device. 6296 */ 6297 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6298 SCSI_ADDR_PROP_TARGET, -1); 6299 6300 /* 6301 * Since we have no idea what state things were left in by the last 6302 * user of the device, set up some 'default' settings, ie. turn 'em 6303 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6304 * Do this before the scsi_probe, which sends an inquiry. 6305 * This is a fix for bug (4430280). 6306 * Of special importance is wide-xfer. The drive could have been left 6307 * in wide transfer mode by the last driver to communicate with it, 6308 * this includes us. If that's the case, and if the following is not 6309 * setup properly or we don't re-negotiate with the drive prior to 6310 * transferring data to/from the drive, it causes bus parity errors, 6311 * data overruns, and unexpected interrupts. This first occurred when 6312 * the fix for bug (4378686) was made. 6313 */ 6314 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6315 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6316 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6317 6318 /* 6319 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6320 * on a target. Setting it per lun instance actually sets the 6321 * capability of this target, which affects those luns already 6322 * attached on the same target. So during attach, we can only disable 6323 * this capability only when no other lun has been attached on this 6324 * target. By doing this, we assume a target has the same tagged-qing 6325 * capability for every lun. The condition can be removed when HBA 6326 * is changed to support per lun based tagged-qing capability. 6327 */ 6328 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6329 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6330 } 6331 6332 /* 6333 * Use scsi_probe() to issue an INQUIRY command to the device. 6334 * This call will allocate and fill in the scsi_inquiry structure 6335 * and point the sd_inq member of the scsi_device structure to it. 6336 * If the attach succeeds, then this memory will not be de-allocated 6337 * (via scsi_unprobe()) until the instance is detached. 6338 */ 6339 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6340 goto probe_failed; 6341 } 6342 6343 /* 6344 * Check the device type as specified in the inquiry data and 6345 * claim it if it is of a type that we support. 6346 */ 6347 switch (devp->sd_inq->inq_dtype) { 6348 case DTYPE_DIRECT: 6349 break; 6350 case DTYPE_RODIRECT: 6351 break; 6352 case DTYPE_OPTICAL: 6353 break; 6354 case DTYPE_NOTPRESENT: 6355 default: 6356 /* Unsupported device type; fail the attach. */ 6357 goto probe_failed; 6358 } 6359 6360 /* 6361 * Allocate the soft state structure for this unit. 6362 * 6363 * We rely upon this memory being set to all zeroes by 6364 * ddi_soft_state_zalloc(). We assume that any member of the 6365 * soft state structure that is not explicitly initialized by 6366 * this routine will have a value of zero. 6367 */ 6368 instance = ddi_get_instance(devp->sd_dev); 6369 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6370 goto probe_failed; 6371 } 6372 6373 /* 6374 * Retrieve a pointer to the newly-allocated soft state. 6375 * 6376 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6377 * was successful, unless something has gone horribly wrong and the 6378 * ddi's soft state internals are corrupt (in which case it is 6379 * probably better to halt here than just fail the attach....) 6380 */ 6381 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6382 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6383 instance); 6384 /*NOTREACHED*/ 6385 } 6386 6387 /* 6388 * Link the back ptr of the driver soft state to the scsi_device 6389 * struct for this lun. 6390 * Save a pointer to the softstate in the driver-private area of 6391 * the scsi_device struct. 6392 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6393 * we first set un->un_sd below. 6394 */ 6395 un->un_sd = devp; 6396 devp->sd_private = (opaque_t)un; 6397 6398 /* 6399 * The following must be after devp is stored in the soft state struct. 6400 */ 6401 #ifdef SDDEBUG 6402 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6403 "%s_unit_attach: un:0x%p instance:%d\n", 6404 ddi_driver_name(devi), un, instance); 6405 #endif 6406 6407 /* 6408 * Set up the device type and node type (for the minor nodes). 6409 * By default we assume that the device can at least support the 6410 * Common Command Set. Call it a CD-ROM if it reports itself 6411 * as a RODIRECT device. 6412 */ 6413 switch (devp->sd_inq->inq_dtype) { 6414 case DTYPE_RODIRECT: 6415 un->un_node_type = DDI_NT_CD_CHAN; 6416 un->un_ctype = CTYPE_CDROM; 6417 break; 6418 case DTYPE_OPTICAL: 6419 un->un_node_type = DDI_NT_BLOCK_CHAN; 6420 un->un_ctype = CTYPE_ROD; 6421 break; 6422 default: 6423 un->un_node_type = DDI_NT_BLOCK_CHAN; 6424 un->un_ctype = CTYPE_CCS; 6425 break; 6426 } 6427 6428 /* 6429 * Try to read the interconnect type from the HBA. 6430 * 6431 * Note: This driver is currently compiled as two binaries, a parallel 6432 * scsi version (sd) and a fibre channel version (ssd). All functional 6433 * differences are determined at compile time. In the future a single 6434 * binary will be provided and the inteconnect type will be used to 6435 * differentiate between fibre and parallel scsi behaviors. At that time 6436 * it will be necessary for all fibre channel HBAs to support this 6437 * property. 6438 * 6439 * set un_f_is_fiber to TRUE ( default fiber ) 6440 */ 6441 un->un_f_is_fibre = TRUE; 6442 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6443 case INTERCONNECT_SSA: 6444 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6445 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6446 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6447 break; 6448 case INTERCONNECT_PARALLEL: 6449 un->un_f_is_fibre = FALSE; 6450 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6451 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6452 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6453 break; 6454 case INTERCONNECT_SATA: 6455 un->un_f_is_fibre = FALSE; 6456 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6457 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6458 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6459 break; 6460 case INTERCONNECT_FIBRE: 6461 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6462 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6463 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6464 break; 6465 case INTERCONNECT_FABRIC: 6466 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6467 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6468 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6469 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6470 break; 6471 default: 6472 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6473 /* 6474 * The HBA does not support the "interconnect-type" property 6475 * (or did not provide a recognized type). 6476 * 6477 * Note: This will be obsoleted when a single fibre channel 6478 * and parallel scsi driver is delivered. In the meantime the 6479 * interconnect type will be set to the platform default.If that 6480 * type is not parallel SCSI, it means that we should be 6481 * assuming "ssd" semantics. However, here this also means that 6482 * the FC HBA is not supporting the "interconnect-type" property 6483 * like we expect it to, so log this occurrence. 6484 */ 6485 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6486 if (!SD_IS_PARALLEL_SCSI(un)) { 6487 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6488 "sd_unit_attach: un:0x%p Assuming " 6489 "INTERCONNECT_FIBRE\n", un); 6490 } else { 6491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6492 "sd_unit_attach: un:0x%p Assuming " 6493 "INTERCONNECT_PARALLEL\n", un); 6494 un->un_f_is_fibre = FALSE; 6495 } 6496 #else 6497 /* 6498 * Note: This source will be implemented when a single fibre 6499 * channel and parallel scsi driver is delivered. The default 6500 * will be to assume that if a device does not support the 6501 * "interconnect-type" property it is a parallel SCSI HBA and 6502 * we will set the interconnect type for parallel scsi. 6503 */ 6504 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6505 un->un_f_is_fibre = FALSE; 6506 #endif 6507 break; 6508 } 6509 6510 if (un->un_f_is_fibre == TRUE) { 6511 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6512 SCSI_VERSION_3) { 6513 switch (un->un_interconnect_type) { 6514 case SD_INTERCONNECT_FIBRE: 6515 case SD_INTERCONNECT_SSA: 6516 un->un_node_type = DDI_NT_BLOCK_WWN; 6517 break; 6518 default: 6519 break; 6520 } 6521 } 6522 } 6523 6524 /* 6525 * Initialize the Request Sense command for the target 6526 */ 6527 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6528 goto alloc_rqs_failed; 6529 } 6530 6531 /* 6532 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6533 * with separate binary for sd and ssd. 6534 * 6535 * x86 has 1 binary, un_retry_count is set base on connection type. 6536 * The hardcoded values will go away when Sparc uses 1 binary 6537 * for sd and ssd. This hardcoded values need to match 6538 * SD_RETRY_COUNT in sddef.h 6539 * The value used is base on interconnect type. 6540 * fibre = 3, parallel = 5 6541 */ 6542 #if defined(__i386) || defined(__amd64) 6543 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6544 #else 6545 un->un_retry_count = SD_RETRY_COUNT; 6546 #endif 6547 6548 /* 6549 * Set the per disk retry count to the default number of retries 6550 * for disks and CDROMs. This value can be overridden by the 6551 * disk property list or an entry in sd.conf. 6552 */ 6553 un->un_notready_retry_count = 6554 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6555 : DISK_NOT_READY_RETRY_COUNT(un); 6556 6557 /* 6558 * Set the busy retry count to the default value of un_retry_count. 6559 * This can be overridden by entries in sd.conf or the device 6560 * config table. 6561 */ 6562 un->un_busy_retry_count = un->un_retry_count; 6563 6564 /* 6565 * Init the reset threshold for retries. This number determines 6566 * how many retries must be performed before a reset can be issued 6567 * (for certain error conditions). This can be overridden by entries 6568 * in sd.conf or the device config table. 6569 */ 6570 un->un_reset_retry_count = (un->un_retry_count / 2); 6571 6572 /* 6573 * Set the victim_retry_count to the default un_retry_count 6574 */ 6575 un->un_victim_retry_count = (2 * un->un_retry_count); 6576 6577 /* 6578 * Set the reservation release timeout to the default value of 6579 * 5 seconds. This can be overridden by entries in ssd.conf or the 6580 * device config table. 6581 */ 6582 un->un_reserve_release_time = 5; 6583 6584 /* 6585 * Set up the default maximum transfer size. Note that this may 6586 * get updated later in the attach, when setting up default wide 6587 * operations for disks. 6588 */ 6589 #if defined(__i386) || defined(__amd64) 6590 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6591 #else 6592 un->un_max_xfer_size = (uint_t)maxphys; 6593 #endif 6594 6595 /* 6596 * Get "allow bus device reset" property (defaults to "enabled" if 6597 * the property was not defined). This is to disable bus resets for 6598 * certain kinds of error recovery. Note: In the future when a run-time 6599 * fibre check is available the soft state flag should default to 6600 * enabled. 6601 */ 6602 if (un->un_f_is_fibre == TRUE) { 6603 un->un_f_allow_bus_device_reset = TRUE; 6604 } else { 6605 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6606 "allow-bus-device-reset", 1) != 0) { 6607 un->un_f_allow_bus_device_reset = TRUE; 6608 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6609 "sd_unit_attach: un:0x%p Bus device reset " 6610 "enabled\n", un); 6611 } else { 6612 un->un_f_allow_bus_device_reset = FALSE; 6613 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6614 "sd_unit_attach: un:0x%p Bus device reset " 6615 "disabled\n", un); 6616 } 6617 } 6618 6619 /* 6620 * Check if this is an ATAPI device. ATAPI devices use Group 1 6621 * Read/Write commands and Group 2 Mode Sense/Select commands. 6622 * 6623 * Note: The "obsolete" way of doing this is to check for the "atapi" 6624 * property. The new "variant" property with a value of "atapi" has been 6625 * introduced so that future 'variants' of standard SCSI behavior (like 6626 * atapi) could be specified by the underlying HBA drivers by supplying 6627 * a new value for the "variant" property, instead of having to define a 6628 * new property. 6629 */ 6630 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6631 un->un_f_cfg_is_atapi = TRUE; 6632 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6633 "sd_unit_attach: un:0x%p Atapi device\n", un); 6634 } 6635 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6636 &variantp) == DDI_PROP_SUCCESS) { 6637 if (strcmp(variantp, "atapi") == 0) { 6638 un->un_f_cfg_is_atapi = TRUE; 6639 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6640 "sd_unit_attach: un:0x%p Atapi device\n", un); 6641 } 6642 ddi_prop_free(variantp); 6643 } 6644 6645 un->un_cmd_timeout = SD_IO_TIME; 6646 6647 /* Info on current states, statuses, etc. (Updated frequently) */ 6648 un->un_state = SD_STATE_NORMAL; 6649 un->un_last_state = SD_STATE_NORMAL; 6650 6651 /* Control & status info for command throttling */ 6652 un->un_throttle = sd_max_throttle; 6653 un->un_saved_throttle = sd_max_throttle; 6654 un->un_min_throttle = sd_min_throttle; 6655 6656 if (un->un_f_is_fibre == TRUE) { 6657 un->un_f_use_adaptive_throttle = TRUE; 6658 } else { 6659 un->un_f_use_adaptive_throttle = FALSE; 6660 } 6661 6662 /* Removable media support. */ 6663 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6664 un->un_mediastate = DKIO_NONE; 6665 un->un_specified_mediastate = DKIO_NONE; 6666 6667 /* CVs for suspend/resume (PM or DR) */ 6668 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6669 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6670 6671 /* Power management support. */ 6672 un->un_power_level = SD_SPINDLE_UNINIT; 6673 6674 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6675 un->un_f_wcc_inprog = 0; 6676 6677 /* 6678 * The open/close semaphore is used to serialize threads executing 6679 * in the driver's open & close entry point routines for a given 6680 * instance. 6681 */ 6682 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6683 6684 /* 6685 * The conf file entry and softstate variable is a forceful override, 6686 * meaning a non-zero value must be entered to change the default. 6687 */ 6688 un->un_f_disksort_disabled = FALSE; 6689 6690 /* 6691 * Retrieve the properties from the static driver table or the driver 6692 * configuration file (.conf) for this unit and update the soft state 6693 * for the device as needed for the indicated properties. 6694 * Note: the property configuration needs to occur here as some of the 6695 * following routines may have dependancies on soft state flags set 6696 * as part of the driver property configuration. 6697 */ 6698 sd_read_unit_properties(un); 6699 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6700 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6701 6702 /* 6703 * Only if a device has "hotpluggable" property, it is 6704 * treated as hotpluggable device. Otherwise, it is 6705 * regarded as non-hotpluggable one. 6706 */ 6707 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6708 -1) != -1) { 6709 un->un_f_is_hotpluggable = TRUE; 6710 } 6711 6712 /* 6713 * set unit's attributes(flags) according to "hotpluggable" and 6714 * RMB bit in INQUIRY data. 6715 */ 6716 sd_set_unit_attributes(un, devi); 6717 6718 /* 6719 * By default, we mark the capacity, lbasize, and geometry 6720 * as invalid. Only if we successfully read a valid capacity 6721 * will we update the un_blockcount and un_tgt_blocksize with the 6722 * valid values (the geometry will be validated later). 6723 */ 6724 un->un_f_blockcount_is_valid = FALSE; 6725 un->un_f_tgt_blocksize_is_valid = FALSE; 6726 6727 /* 6728 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6729 * otherwise. 6730 */ 6731 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6732 un->un_blockcount = 0; 6733 6734 /* 6735 * Set up the per-instance info needed to determine the correct 6736 * CDBs and other info for issuing commands to the target. 6737 */ 6738 sd_init_cdb_limits(un); 6739 6740 /* 6741 * Set up the IO chains to use, based upon the target type. 6742 */ 6743 if (un->un_f_non_devbsize_supported) { 6744 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6745 } else { 6746 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6747 } 6748 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6749 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6750 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6751 6752 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6753 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6754 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6755 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6756 6757 6758 if (ISCD(un)) { 6759 un->un_additional_codes = sd_additional_codes; 6760 } else { 6761 un->un_additional_codes = NULL; 6762 } 6763 6764 /* 6765 * Create the kstats here so they can be available for attach-time 6766 * routines that send commands to the unit (either polled or via 6767 * sd_send_scsi_cmd). 6768 * 6769 * Note: This is a critical sequence that needs to be maintained: 6770 * 1) Instantiate the kstats here, before any routines using the 6771 * iopath (i.e. sd_send_scsi_cmd). 6772 * 2) Instantiate and initialize the partition stats 6773 * (sd_set_pstats). 6774 * 3) Initialize the error stats (sd_set_errstats), following 6775 * sd_validate_geometry(),sd_register_devid(), 6776 * and sd_cache_control(). 6777 */ 6778 6779 un->un_stats = kstat_create(sd_label, instance, 6780 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6781 if (un->un_stats != NULL) { 6782 un->un_stats->ks_lock = SD_MUTEX(un); 6783 kstat_install(un->un_stats); 6784 } 6785 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6786 "sd_unit_attach: un:0x%p un_stats created\n", un); 6787 6788 sd_create_errstats(un, instance); 6789 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6790 "sd_unit_attach: un:0x%p errstats created\n", un); 6791 6792 /* 6793 * The following if/else code was relocated here from below as part 6794 * of the fix for bug (4430280). However with the default setup added 6795 * on entry to this routine, it's no longer absolutely necessary for 6796 * this to be before the call to sd_spin_up_unit. 6797 */ 6798 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6799 /* 6800 * If SCSI-2 tagged queueing is supported by the target 6801 * and by the host adapter then we will enable it. 6802 */ 6803 un->un_tagflags = 0; 6804 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6805 (devp->sd_inq->inq_cmdque) && 6806 (un->un_f_arq_enabled == TRUE)) { 6807 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6808 1, 1) == 1) { 6809 un->un_tagflags = FLAG_STAG; 6810 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6811 "sd_unit_attach: un:0x%p tag queueing " 6812 "enabled\n", un); 6813 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6814 "untagged-qing", 0) == 1) { 6815 un->un_f_opt_queueing = TRUE; 6816 un->un_saved_throttle = un->un_throttle = 6817 min(un->un_throttle, 3); 6818 } else { 6819 un->un_f_opt_queueing = FALSE; 6820 un->un_saved_throttle = un->un_throttle = 1; 6821 } 6822 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6823 == 1) && (un->un_f_arq_enabled == TRUE)) { 6824 /* The Host Adapter supports internal queueing. */ 6825 un->un_f_opt_queueing = TRUE; 6826 un->un_saved_throttle = un->un_throttle = 6827 min(un->un_throttle, 3); 6828 } else { 6829 un->un_f_opt_queueing = FALSE; 6830 un->un_saved_throttle = un->un_throttle = 1; 6831 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6832 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6833 } 6834 6835 /* 6836 * Enable large transfers for SATA/SAS drives 6837 */ 6838 if (SD_IS_SERIAL(un)) { 6839 un->un_max_xfer_size = 6840 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6841 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6842 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6843 "sd_unit_attach: un:0x%p max transfer " 6844 "size=0x%x\n", un, un->un_max_xfer_size); 6845 6846 } 6847 6848 /* Setup or tear down default wide operations for disks */ 6849 6850 /* 6851 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6852 * and "ssd_max_xfer_size" to exist simultaneously on the same 6853 * system and be set to different values. In the future this 6854 * code may need to be updated when the ssd module is 6855 * obsoleted and removed from the system. (4299588) 6856 */ 6857 if (SD_IS_PARALLEL_SCSI(un) && 6858 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6859 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6860 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6861 1, 1) == 1) { 6862 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6863 "sd_unit_attach: un:0x%p Wide Transfer " 6864 "enabled\n", un); 6865 } 6866 6867 /* 6868 * If tagged queuing has also been enabled, then 6869 * enable large xfers 6870 */ 6871 if (un->un_saved_throttle == sd_max_throttle) { 6872 un->un_max_xfer_size = 6873 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6874 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6875 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6876 "sd_unit_attach: un:0x%p max transfer " 6877 "size=0x%x\n", un, un->un_max_xfer_size); 6878 } 6879 } else { 6880 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6881 0, 1) == 1) { 6882 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6883 "sd_unit_attach: un:0x%p " 6884 "Wide Transfer disabled\n", un); 6885 } 6886 } 6887 } else { 6888 un->un_tagflags = FLAG_STAG; 6889 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6890 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6891 } 6892 6893 /* 6894 * If this target supports LUN reset, try to enable it. 6895 */ 6896 if (un->un_f_lun_reset_enabled) { 6897 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6898 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6899 "un:0x%p lun_reset capability set\n", un); 6900 } else { 6901 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6902 "un:0x%p lun-reset capability not set\n", un); 6903 } 6904 } 6905 6906 /* 6907 * At this point in the attach, we have enough info in the 6908 * soft state to be able to issue commands to the target. 6909 * 6910 * All command paths used below MUST issue their commands as 6911 * SD_PATH_DIRECT. This is important as intermediate layers 6912 * are not all initialized yet (such as PM). 6913 */ 6914 6915 /* 6916 * Send a TEST UNIT READY command to the device. This should clear 6917 * any outstanding UNIT ATTENTION that may be present. 6918 * 6919 * Note: Don't check for success, just track if there is a reservation, 6920 * this is a throw away command to clear any unit attentions. 6921 * 6922 * Note: This MUST be the first command issued to the target during 6923 * attach to ensure power on UNIT ATTENTIONS are cleared. 6924 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6925 * with attempts at spinning up a device with no media. 6926 */ 6927 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6928 reservation_flag = SD_TARGET_IS_RESERVED; 6929 } 6930 6931 /* 6932 * If the device is NOT a removable media device, attempt to spin 6933 * it up (using the START_STOP_UNIT command) and read its capacity 6934 * (using the READ CAPACITY command). Note, however, that either 6935 * of these could fail and in some cases we would continue with 6936 * the attach despite the failure (see below). 6937 */ 6938 if (un->un_f_descr_format_supported) { 6939 switch (sd_spin_up_unit(un)) { 6940 case 0: 6941 /* 6942 * Spin-up was successful; now try to read the 6943 * capacity. If successful then save the results 6944 * and mark the capacity & lbasize as valid. 6945 */ 6946 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6947 "sd_unit_attach: un:0x%p spin-up successful\n", un); 6948 6949 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 6950 &lbasize, SD_PATH_DIRECT)) { 6951 case 0: { 6952 if (capacity > DK_MAX_BLOCKS) { 6953 #ifdef _LP64 6954 if (capacity + 1 > 6955 SD_GROUP1_MAX_ADDRESS) { 6956 /* 6957 * Enable descriptor format 6958 * sense data so that we can 6959 * get 64 bit sense data 6960 * fields. 6961 */ 6962 sd_enable_descr_sense(un); 6963 } 6964 #else 6965 /* 32-bit kernels can't handle this */ 6966 scsi_log(SD_DEVINFO(un), 6967 sd_label, CE_WARN, 6968 "disk has %llu blocks, which " 6969 "is too large for a 32-bit " 6970 "kernel", capacity); 6971 6972 #if defined(__i386) || defined(__amd64) 6973 /* 6974 * 1TB disk was treated as (1T - 512)B 6975 * in the past, so that it might have 6976 * valid VTOC and solaris partitions, 6977 * we have to allow it to continue to 6978 * work. 6979 */ 6980 if (capacity -1 > DK_MAX_BLOCKS) 6981 #endif 6982 goto spinup_failed; 6983 #endif 6984 } 6985 6986 /* 6987 * Here it's not necessary to check the case: 6988 * the capacity of the device is bigger than 6989 * what the max hba cdb can support. Because 6990 * sd_send_scsi_READ_CAPACITY will retrieve 6991 * the capacity by sending USCSI command, which 6992 * is constrained by the max hba cdb. Actually, 6993 * sd_send_scsi_READ_CAPACITY will return 6994 * EINVAL when using bigger cdb than required 6995 * cdb length. Will handle this case in 6996 * "case EINVAL". 6997 */ 6998 6999 /* 7000 * The following relies on 7001 * sd_send_scsi_READ_CAPACITY never 7002 * returning 0 for capacity and/or lbasize. 7003 */ 7004 sd_update_block_info(un, lbasize, capacity); 7005 7006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7007 "sd_unit_attach: un:0x%p capacity = %ld " 7008 "blocks; lbasize= %ld.\n", un, 7009 un->un_blockcount, un->un_tgt_blocksize); 7010 7011 break; 7012 } 7013 case EINVAL: 7014 /* 7015 * In the case where the max-cdb-length property 7016 * is smaller than the required CDB length for 7017 * a SCSI device, a target driver can fail to 7018 * attach to that device. 7019 */ 7020 scsi_log(SD_DEVINFO(un), 7021 sd_label, CE_WARN, 7022 "disk capacity is too large " 7023 "for current cdb length"); 7024 goto spinup_failed; 7025 case EACCES: 7026 /* 7027 * Should never get here if the spin-up 7028 * succeeded, but code it in anyway. 7029 * From here, just continue with the attach... 7030 */ 7031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7032 "sd_unit_attach: un:0x%p " 7033 "sd_send_scsi_READ_CAPACITY " 7034 "returned reservation conflict\n", un); 7035 reservation_flag = SD_TARGET_IS_RESERVED; 7036 break; 7037 default: 7038 /* 7039 * Likewise, should never get here if the 7040 * spin-up succeeded. Just continue with 7041 * the attach... 7042 */ 7043 break; 7044 } 7045 break; 7046 case EACCES: 7047 /* 7048 * Device is reserved by another host. In this case 7049 * we could not spin it up or read the capacity, but 7050 * we continue with the attach anyway. 7051 */ 7052 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7053 "sd_unit_attach: un:0x%p spin-up reservation " 7054 "conflict.\n", un); 7055 reservation_flag = SD_TARGET_IS_RESERVED; 7056 break; 7057 default: 7058 /* Fail the attach if the spin-up failed. */ 7059 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7060 "sd_unit_attach: un:0x%p spin-up failed.", un); 7061 goto spinup_failed; 7062 } 7063 } 7064 7065 /* 7066 * Check to see if this is a MMC drive 7067 */ 7068 if (ISCD(un)) { 7069 sd_set_mmc_caps(un); 7070 } 7071 7072 7073 /* 7074 * Add a zero-length attribute to tell the world we support 7075 * kernel ioctls (for layered drivers) 7076 */ 7077 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7078 DDI_KERNEL_IOCTL, NULL, 0); 7079 7080 /* 7081 * Add a boolean property to tell the world we support 7082 * the B_FAILFAST flag (for layered drivers) 7083 */ 7084 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7085 "ddi-failfast-supported", NULL, 0); 7086 7087 /* 7088 * Initialize power management 7089 */ 7090 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7091 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7092 sd_setup_pm(un, devi); 7093 if (un->un_f_pm_is_enabled == FALSE) { 7094 /* 7095 * For performance, point to a jump table that does 7096 * not include pm. 7097 * The direct and priority chains don't change with PM. 7098 * 7099 * Note: this is currently done based on individual device 7100 * capabilities. When an interface for determining system 7101 * power enabled state becomes available, or when additional 7102 * layers are added to the command chain, these values will 7103 * have to be re-evaluated for correctness. 7104 */ 7105 if (un->un_f_non_devbsize_supported) { 7106 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7107 } else { 7108 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7109 } 7110 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7111 } 7112 7113 /* 7114 * This property is set to 0 by HA software to avoid retries 7115 * on a reserved disk. (The preferred property name is 7116 * "retry-on-reservation-conflict") (1189689) 7117 * 7118 * Note: The use of a global here can have unintended consequences. A 7119 * per instance variable is preferrable to match the capabilities of 7120 * different underlying hba's (4402600) 7121 */ 7122 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7123 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7124 sd_retry_on_reservation_conflict); 7125 if (sd_retry_on_reservation_conflict != 0) { 7126 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7127 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7128 sd_retry_on_reservation_conflict); 7129 } 7130 7131 /* Set up options for QFULL handling. */ 7132 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7133 "qfull-retries", -1)) != -1) { 7134 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7135 rval, 1); 7136 } 7137 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7138 "qfull-retry-interval", -1)) != -1) { 7139 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7140 rval, 1); 7141 } 7142 7143 /* 7144 * This just prints a message that announces the existence of the 7145 * device. The message is always printed in the system logfile, but 7146 * only appears on the console if the system is booted with the 7147 * -v (verbose) argument. 7148 */ 7149 ddi_report_dev(devi); 7150 7151 un->un_mediastate = DKIO_NONE; 7152 7153 cmlb_alloc_handle(&un->un_cmlbhandle); 7154 7155 #if defined(__i386) || defined(__amd64) 7156 /* 7157 * On x86, compensate for off-by-1 legacy error 7158 */ 7159 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7160 (lbasize == un->un_sys_blocksize)) 7161 offbyone = CMLB_OFF_BY_ONE; 7162 #endif 7163 7164 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7165 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7166 un->un_node_type, offbyone, un->un_cmlbhandle, 7167 (void *)SD_PATH_DIRECT) != 0) { 7168 goto cmlb_attach_failed; 7169 } 7170 7171 7172 /* 7173 * Read and validate the device's geometry (ie, disk label) 7174 * A new unformatted drive will not have a valid geometry, but 7175 * the driver needs to successfully attach to this device so 7176 * the drive can be formatted via ioctls. 7177 */ 7178 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7179 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7180 7181 mutex_enter(SD_MUTEX(un)); 7182 7183 /* 7184 * Read and initialize the devid for the unit. 7185 */ 7186 ASSERT(un->un_errstats != NULL); 7187 if (un->un_f_devid_supported) { 7188 sd_register_devid(un, devi, reservation_flag); 7189 } 7190 mutex_exit(SD_MUTEX(un)); 7191 7192 #if (defined(__fibre)) 7193 /* 7194 * Register callbacks for fibre only. You can't do this soley 7195 * on the basis of the devid_type because this is hba specific. 7196 * We need to query our hba capabilities to find out whether to 7197 * register or not. 7198 */ 7199 if (un->un_f_is_fibre) { 7200 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7201 sd_init_event_callbacks(un); 7202 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7203 "sd_unit_attach: un:0x%p event callbacks inserted", 7204 un); 7205 } 7206 } 7207 #endif 7208 7209 if (un->un_f_opt_disable_cache == TRUE) { 7210 /* 7211 * Disable both read cache and write cache. This is 7212 * the historic behavior of the keywords in the config file. 7213 */ 7214 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7215 0) { 7216 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7217 "sd_unit_attach: un:0x%p Could not disable " 7218 "caching", un); 7219 goto devid_failed; 7220 } 7221 } 7222 7223 /* 7224 * Check the value of the WCE bit now and 7225 * set un_f_write_cache_enabled accordingly. 7226 */ 7227 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7228 mutex_enter(SD_MUTEX(un)); 7229 un->un_f_write_cache_enabled = (wc_enabled != 0); 7230 mutex_exit(SD_MUTEX(un)); 7231 7232 /* 7233 * Find out what type of reservation this disk supports. 7234 */ 7235 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7236 case 0: 7237 /* 7238 * SCSI-3 reservations are supported. 7239 */ 7240 un->un_reservation_type = SD_SCSI3_RESERVATION; 7241 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7242 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7243 break; 7244 case ENOTSUP: 7245 /* 7246 * The PERSISTENT RESERVE IN command would not be recognized by 7247 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7248 */ 7249 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7250 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7251 un->un_reservation_type = SD_SCSI2_RESERVATION; 7252 break; 7253 default: 7254 /* 7255 * default to SCSI-3 reservations 7256 */ 7257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7258 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7259 un->un_reservation_type = SD_SCSI3_RESERVATION; 7260 break; 7261 } 7262 7263 /* 7264 * Set the pstat and error stat values here, so data obtained during the 7265 * previous attach-time routines is available. 7266 * 7267 * Note: This is a critical sequence that needs to be maintained: 7268 * 1) Instantiate the kstats before any routines using the iopath 7269 * (i.e. sd_send_scsi_cmd). 7270 * 2) Initialize the error stats (sd_set_errstats) and partition 7271 * stats (sd_set_pstats)here, following 7272 * cmlb_validate_geometry(), sd_register_devid(), and 7273 * sd_cache_control(). 7274 */ 7275 7276 if (un->un_f_pkstats_enabled && geom_label_valid) { 7277 sd_set_pstats(un); 7278 SD_TRACE(SD_LOG_IO_PARTITION, un, 7279 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7280 } 7281 7282 sd_set_errstats(un); 7283 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7284 "sd_unit_attach: un:0x%p errstats set\n", un); 7285 7286 7287 /* 7288 * After successfully attaching an instance, we record the information 7289 * of how many luns have been attached on the relative target and 7290 * controller for parallel SCSI. This information is used when sd tries 7291 * to set the tagged queuing capability in HBA. 7292 */ 7293 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7294 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7295 } 7296 7297 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7298 "sd_unit_attach: un:0x%p exit success\n", un); 7299 7300 return (DDI_SUCCESS); 7301 7302 /* 7303 * An error occurred during the attach; clean up & return failure. 7304 */ 7305 7306 devid_failed: 7307 7308 setup_pm_failed: 7309 ddi_remove_minor_node(devi, NULL); 7310 7311 cmlb_attach_failed: 7312 /* 7313 * Cleanup from the scsi_ifsetcap() calls (437868) 7314 */ 7315 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7316 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7317 7318 /* 7319 * Refer to the comments of setting tagged-qing in the beginning of 7320 * sd_unit_attach. We can only disable tagged queuing when there is 7321 * no lun attached on the target. 7322 */ 7323 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7324 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7325 } 7326 7327 if (un->un_f_is_fibre == FALSE) { 7328 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7329 } 7330 7331 spinup_failed: 7332 7333 mutex_enter(SD_MUTEX(un)); 7334 7335 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7336 if (un->un_direct_priority_timeid != NULL) { 7337 timeout_id_t temp_id = un->un_direct_priority_timeid; 7338 un->un_direct_priority_timeid = NULL; 7339 mutex_exit(SD_MUTEX(un)); 7340 (void) untimeout(temp_id); 7341 mutex_enter(SD_MUTEX(un)); 7342 } 7343 7344 /* Cancel any pending start/stop timeouts */ 7345 if (un->un_startstop_timeid != NULL) { 7346 timeout_id_t temp_id = un->un_startstop_timeid; 7347 un->un_startstop_timeid = NULL; 7348 mutex_exit(SD_MUTEX(un)); 7349 (void) untimeout(temp_id); 7350 mutex_enter(SD_MUTEX(un)); 7351 } 7352 7353 /* Cancel any pending reset-throttle timeouts */ 7354 if (un->un_reset_throttle_timeid != NULL) { 7355 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7356 un->un_reset_throttle_timeid = NULL; 7357 mutex_exit(SD_MUTEX(un)); 7358 (void) untimeout(temp_id); 7359 mutex_enter(SD_MUTEX(un)); 7360 } 7361 7362 /* Cancel any pending retry timeouts */ 7363 if (un->un_retry_timeid != NULL) { 7364 timeout_id_t temp_id = un->un_retry_timeid; 7365 un->un_retry_timeid = NULL; 7366 mutex_exit(SD_MUTEX(un)); 7367 (void) untimeout(temp_id); 7368 mutex_enter(SD_MUTEX(un)); 7369 } 7370 7371 /* Cancel any pending delayed cv broadcast timeouts */ 7372 if (un->un_dcvb_timeid != NULL) { 7373 timeout_id_t temp_id = un->un_dcvb_timeid; 7374 un->un_dcvb_timeid = NULL; 7375 mutex_exit(SD_MUTEX(un)); 7376 (void) untimeout(temp_id); 7377 mutex_enter(SD_MUTEX(un)); 7378 } 7379 7380 mutex_exit(SD_MUTEX(un)); 7381 7382 /* There should not be any in-progress I/O so ASSERT this check */ 7383 ASSERT(un->un_ncmds_in_transport == 0); 7384 ASSERT(un->un_ncmds_in_driver == 0); 7385 7386 /* Do not free the softstate if the callback routine is active */ 7387 sd_sync_with_callback(un); 7388 7389 /* 7390 * Partition stats apparently are not used with removables. These would 7391 * not have been created during attach, so no need to clean them up... 7392 */ 7393 if (un->un_stats != NULL) { 7394 kstat_delete(un->un_stats); 7395 un->un_stats = NULL; 7396 } 7397 if (un->un_errstats != NULL) { 7398 kstat_delete(un->un_errstats); 7399 un->un_errstats = NULL; 7400 } 7401 7402 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7403 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7404 7405 ddi_prop_remove_all(devi); 7406 sema_destroy(&un->un_semoclose); 7407 cv_destroy(&un->un_state_cv); 7408 7409 getrbuf_failed: 7410 7411 sd_free_rqs(un); 7412 7413 alloc_rqs_failed: 7414 7415 devp->sd_private = NULL; 7416 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7417 7418 get_softstate_failed: 7419 /* 7420 * Note: the man pages are unclear as to whether or not doing a 7421 * ddi_soft_state_free(sd_state, instance) is the right way to 7422 * clean up after the ddi_soft_state_zalloc() if the subsequent 7423 * ddi_get_soft_state() fails. The implication seems to be 7424 * that the get_soft_state cannot fail if the zalloc succeeds. 7425 */ 7426 ddi_soft_state_free(sd_state, instance); 7427 7428 probe_failed: 7429 scsi_unprobe(devp); 7430 #ifdef SDDEBUG 7431 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7432 (sd_level_mask & SD_LOGMASK_TRACE)) { 7433 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7434 (void *)un); 7435 } 7436 #endif 7437 return (DDI_FAILURE); 7438 } 7439 7440 7441 /* 7442 * Function: sd_unit_detach 7443 * 7444 * Description: Performs DDI_DETACH processing for sddetach(). 7445 * 7446 * Return Code: DDI_SUCCESS 7447 * DDI_FAILURE 7448 * 7449 * Context: Kernel thread context 7450 */ 7451 7452 static int 7453 sd_unit_detach(dev_info_t *devi) 7454 { 7455 struct scsi_device *devp; 7456 struct sd_lun *un; 7457 int i; 7458 int tgt; 7459 dev_t dev; 7460 dev_info_t *pdip = ddi_get_parent(devi); 7461 int instance = ddi_get_instance(devi); 7462 7463 mutex_enter(&sd_detach_mutex); 7464 7465 /* 7466 * Fail the detach for any of the following: 7467 * - Unable to get the sd_lun struct for the instance 7468 * - A layered driver has an outstanding open on the instance 7469 * - Another thread is already detaching this instance 7470 * - Another thread is currently performing an open 7471 */ 7472 devp = ddi_get_driver_private(devi); 7473 if ((devp == NULL) || 7474 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7475 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7476 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7477 mutex_exit(&sd_detach_mutex); 7478 return (DDI_FAILURE); 7479 } 7480 7481 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7482 7483 /* 7484 * Mark this instance as currently in a detach, to inhibit any 7485 * opens from a layered driver. 7486 */ 7487 un->un_detach_count++; 7488 mutex_exit(&sd_detach_mutex); 7489 7490 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7491 SCSI_ADDR_PROP_TARGET, -1); 7492 7493 dev = sd_make_device(SD_DEVINFO(un)); 7494 7495 #ifndef lint 7496 _NOTE(COMPETING_THREADS_NOW); 7497 #endif 7498 7499 mutex_enter(SD_MUTEX(un)); 7500 7501 /* 7502 * Fail the detach if there are any outstanding layered 7503 * opens on this device. 7504 */ 7505 for (i = 0; i < NDKMAP; i++) { 7506 if (un->un_ocmap.lyropen[i] != 0) { 7507 goto err_notclosed; 7508 } 7509 } 7510 7511 /* 7512 * Verify there are NO outstanding commands issued to this device. 7513 * ie, un_ncmds_in_transport == 0. 7514 * It's possible to have outstanding commands through the physio 7515 * code path, even though everything's closed. 7516 */ 7517 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7518 (un->un_direct_priority_timeid != NULL) || 7519 (un->un_state == SD_STATE_RWAIT)) { 7520 mutex_exit(SD_MUTEX(un)); 7521 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7522 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7523 goto err_stillbusy; 7524 } 7525 7526 /* 7527 * If we have the device reserved, release the reservation. 7528 */ 7529 if ((un->un_resvd_status & SD_RESERVE) && 7530 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7531 mutex_exit(SD_MUTEX(un)); 7532 /* 7533 * Note: sd_reserve_release sends a command to the device 7534 * via the sd_ioctlcmd() path, and can sleep. 7535 */ 7536 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7537 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7538 "sd_dr_detach: Cannot release reservation \n"); 7539 } 7540 } else { 7541 mutex_exit(SD_MUTEX(un)); 7542 } 7543 7544 /* 7545 * Untimeout any reserve recover, throttle reset, restart unit 7546 * and delayed broadcast timeout threads. Protect the timeout pointer 7547 * from getting nulled by their callback functions. 7548 */ 7549 mutex_enter(SD_MUTEX(un)); 7550 if (un->un_resvd_timeid != NULL) { 7551 timeout_id_t temp_id = un->un_resvd_timeid; 7552 un->un_resvd_timeid = NULL; 7553 mutex_exit(SD_MUTEX(un)); 7554 (void) untimeout(temp_id); 7555 mutex_enter(SD_MUTEX(un)); 7556 } 7557 7558 if (un->un_reset_throttle_timeid != NULL) { 7559 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7560 un->un_reset_throttle_timeid = NULL; 7561 mutex_exit(SD_MUTEX(un)); 7562 (void) untimeout(temp_id); 7563 mutex_enter(SD_MUTEX(un)); 7564 } 7565 7566 if (un->un_startstop_timeid != NULL) { 7567 timeout_id_t temp_id = un->un_startstop_timeid; 7568 un->un_startstop_timeid = NULL; 7569 mutex_exit(SD_MUTEX(un)); 7570 (void) untimeout(temp_id); 7571 mutex_enter(SD_MUTEX(un)); 7572 } 7573 7574 if (un->un_dcvb_timeid != NULL) { 7575 timeout_id_t temp_id = un->un_dcvb_timeid; 7576 un->un_dcvb_timeid = NULL; 7577 mutex_exit(SD_MUTEX(un)); 7578 (void) untimeout(temp_id); 7579 } else { 7580 mutex_exit(SD_MUTEX(un)); 7581 } 7582 7583 /* Remove any pending reservation reclaim requests for this device */ 7584 sd_rmv_resv_reclaim_req(dev); 7585 7586 mutex_enter(SD_MUTEX(un)); 7587 7588 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7589 if (un->un_direct_priority_timeid != NULL) { 7590 timeout_id_t temp_id = un->un_direct_priority_timeid; 7591 un->un_direct_priority_timeid = NULL; 7592 mutex_exit(SD_MUTEX(un)); 7593 (void) untimeout(temp_id); 7594 mutex_enter(SD_MUTEX(un)); 7595 } 7596 7597 /* Cancel any active multi-host disk watch thread requests */ 7598 if (un->un_mhd_token != NULL) { 7599 mutex_exit(SD_MUTEX(un)); 7600 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7601 if (scsi_watch_request_terminate(un->un_mhd_token, 7602 SCSI_WATCH_TERMINATE_NOWAIT)) { 7603 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7604 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7605 /* 7606 * Note: We are returning here after having removed 7607 * some driver timeouts above. This is consistent with 7608 * the legacy implementation but perhaps the watch 7609 * terminate call should be made with the wait flag set. 7610 */ 7611 goto err_stillbusy; 7612 } 7613 mutex_enter(SD_MUTEX(un)); 7614 un->un_mhd_token = NULL; 7615 } 7616 7617 if (un->un_swr_token != NULL) { 7618 mutex_exit(SD_MUTEX(un)); 7619 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7620 if (scsi_watch_request_terminate(un->un_swr_token, 7621 SCSI_WATCH_TERMINATE_NOWAIT)) { 7622 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7623 "sd_dr_detach: Cannot cancel swr watch request\n"); 7624 /* 7625 * Note: We are returning here after having removed 7626 * some driver timeouts above. This is consistent with 7627 * the legacy implementation but perhaps the watch 7628 * terminate call should be made with the wait flag set. 7629 */ 7630 goto err_stillbusy; 7631 } 7632 mutex_enter(SD_MUTEX(un)); 7633 un->un_swr_token = NULL; 7634 } 7635 7636 mutex_exit(SD_MUTEX(un)); 7637 7638 /* 7639 * Clear any scsi_reset_notifies. We clear the reset notifies 7640 * if we have not registered one. 7641 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7642 */ 7643 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7644 sd_mhd_reset_notify_cb, (caddr_t)un); 7645 7646 /* 7647 * protect the timeout pointers from getting nulled by 7648 * their callback functions during the cancellation process. 7649 * In such a scenario untimeout can be invoked with a null value. 7650 */ 7651 _NOTE(NO_COMPETING_THREADS_NOW); 7652 7653 mutex_enter(&un->un_pm_mutex); 7654 if (un->un_pm_idle_timeid != NULL) { 7655 timeout_id_t temp_id = un->un_pm_idle_timeid; 7656 un->un_pm_idle_timeid = NULL; 7657 mutex_exit(&un->un_pm_mutex); 7658 7659 /* 7660 * Timeout is active; cancel it. 7661 * Note that it'll never be active on a device 7662 * that does not support PM therefore we don't 7663 * have to check before calling pm_idle_component. 7664 */ 7665 (void) untimeout(temp_id); 7666 (void) pm_idle_component(SD_DEVINFO(un), 0); 7667 mutex_enter(&un->un_pm_mutex); 7668 } 7669 7670 /* 7671 * Check whether there is already a timeout scheduled for power 7672 * management. If yes then don't lower the power here, that's. 7673 * the timeout handler's job. 7674 */ 7675 if (un->un_pm_timeid != NULL) { 7676 timeout_id_t temp_id = un->un_pm_timeid; 7677 un->un_pm_timeid = NULL; 7678 mutex_exit(&un->un_pm_mutex); 7679 /* 7680 * Timeout is active; cancel it. 7681 * Note that it'll never be active on a device 7682 * that does not support PM therefore we don't 7683 * have to check before calling pm_idle_component. 7684 */ 7685 (void) untimeout(temp_id); 7686 (void) pm_idle_component(SD_DEVINFO(un), 0); 7687 7688 } else { 7689 mutex_exit(&un->un_pm_mutex); 7690 if ((un->un_f_pm_is_enabled == TRUE) && 7691 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7692 DDI_SUCCESS)) { 7693 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7694 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7695 /* 7696 * Fix for bug: 4297749, item # 13 7697 * The above test now includes a check to see if PM is 7698 * supported by this device before call 7699 * pm_lower_power(). 7700 * Note, the following is not dead code. The call to 7701 * pm_lower_power above will generate a call back into 7702 * our sdpower routine which might result in a timeout 7703 * handler getting activated. Therefore the following 7704 * code is valid and necessary. 7705 */ 7706 mutex_enter(&un->un_pm_mutex); 7707 if (un->un_pm_timeid != NULL) { 7708 timeout_id_t temp_id = un->un_pm_timeid; 7709 un->un_pm_timeid = NULL; 7710 mutex_exit(&un->un_pm_mutex); 7711 (void) untimeout(temp_id); 7712 (void) pm_idle_component(SD_DEVINFO(un), 0); 7713 } else { 7714 mutex_exit(&un->un_pm_mutex); 7715 } 7716 } 7717 } 7718 7719 /* 7720 * Cleanup from the scsi_ifsetcap() calls (437868) 7721 * Relocated here from above to be after the call to 7722 * pm_lower_power, which was getting errors. 7723 */ 7724 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7725 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7726 7727 /* 7728 * Currently, tagged queuing is supported per target based by HBA. 7729 * Setting this per lun instance actually sets the capability of this 7730 * target in HBA, which affects those luns already attached on the 7731 * same target. So during detach, we can only disable this capability 7732 * only when this is the only lun left on this target. By doing 7733 * this, we assume a target has the same tagged queuing capability 7734 * for every lun. The condition can be removed when HBA is changed to 7735 * support per lun based tagged queuing capability. 7736 */ 7737 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7738 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7739 } 7740 7741 if (un->un_f_is_fibre == FALSE) { 7742 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7743 } 7744 7745 /* 7746 * Remove any event callbacks, fibre only 7747 */ 7748 if (un->un_f_is_fibre == TRUE) { 7749 if ((un->un_insert_event != NULL) && 7750 (ddi_remove_event_handler(un->un_insert_cb_id) != 7751 DDI_SUCCESS)) { 7752 /* 7753 * Note: We are returning here after having done 7754 * substantial cleanup above. This is consistent 7755 * with the legacy implementation but this may not 7756 * be the right thing to do. 7757 */ 7758 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7759 "sd_dr_detach: Cannot cancel insert event\n"); 7760 goto err_remove_event; 7761 } 7762 un->un_insert_event = NULL; 7763 7764 if ((un->un_remove_event != NULL) && 7765 (ddi_remove_event_handler(un->un_remove_cb_id) != 7766 DDI_SUCCESS)) { 7767 /* 7768 * Note: We are returning here after having done 7769 * substantial cleanup above. This is consistent 7770 * with the legacy implementation but this may not 7771 * be the right thing to do. 7772 */ 7773 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7774 "sd_dr_detach: Cannot cancel remove event\n"); 7775 goto err_remove_event; 7776 } 7777 un->un_remove_event = NULL; 7778 } 7779 7780 /* Do not free the softstate if the callback routine is active */ 7781 sd_sync_with_callback(un); 7782 7783 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7784 cmlb_free_handle(&un->un_cmlbhandle); 7785 7786 /* 7787 * Hold the detach mutex here, to make sure that no other threads ever 7788 * can access a (partially) freed soft state structure. 7789 */ 7790 mutex_enter(&sd_detach_mutex); 7791 7792 /* 7793 * Clean up the soft state struct. 7794 * Cleanup is done in reverse order of allocs/inits. 7795 * At this point there should be no competing threads anymore. 7796 */ 7797 7798 /* Unregister and free device id. */ 7799 ddi_devid_unregister(devi); 7800 if (un->un_devid) { 7801 ddi_devid_free(un->un_devid); 7802 un->un_devid = NULL; 7803 } 7804 7805 /* 7806 * Destroy wmap cache if it exists. 7807 */ 7808 if (un->un_wm_cache != NULL) { 7809 kmem_cache_destroy(un->un_wm_cache); 7810 un->un_wm_cache = NULL; 7811 } 7812 7813 /* 7814 * kstat cleanup is done in detach for all device types (4363169). 7815 * We do not want to fail detach if the device kstats are not deleted 7816 * since there is a confusion about the devo_refcnt for the device. 7817 * We just delete the kstats and let detach complete successfully. 7818 */ 7819 if (un->un_stats != NULL) { 7820 kstat_delete(un->un_stats); 7821 un->un_stats = NULL; 7822 } 7823 if (un->un_errstats != NULL) { 7824 kstat_delete(un->un_errstats); 7825 un->un_errstats = NULL; 7826 } 7827 7828 /* Remove partition stats */ 7829 if (un->un_f_pkstats_enabled) { 7830 for (i = 0; i < NSDMAP; i++) { 7831 if (un->un_pstats[i] != NULL) { 7832 kstat_delete(un->un_pstats[i]); 7833 un->un_pstats[i] = NULL; 7834 } 7835 } 7836 } 7837 7838 /* Remove xbuf registration */ 7839 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7840 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7841 7842 /* Remove driver properties */ 7843 ddi_prop_remove_all(devi); 7844 7845 mutex_destroy(&un->un_pm_mutex); 7846 cv_destroy(&un->un_pm_busy_cv); 7847 7848 cv_destroy(&un->un_wcc_cv); 7849 7850 /* Open/close semaphore */ 7851 sema_destroy(&un->un_semoclose); 7852 7853 /* Removable media condvar. */ 7854 cv_destroy(&un->un_state_cv); 7855 7856 /* Suspend/resume condvar. */ 7857 cv_destroy(&un->un_suspend_cv); 7858 cv_destroy(&un->un_disk_busy_cv); 7859 7860 sd_free_rqs(un); 7861 7862 /* Free up soft state */ 7863 devp->sd_private = NULL; 7864 7865 bzero(un, sizeof (struct sd_lun)); 7866 ddi_soft_state_free(sd_state, instance); 7867 7868 mutex_exit(&sd_detach_mutex); 7869 7870 /* This frees up the INQUIRY data associated with the device. */ 7871 scsi_unprobe(devp); 7872 7873 /* 7874 * After successfully detaching an instance, we update the information 7875 * of how many luns have been attached in the relative target and 7876 * controller for parallel SCSI. This information is used when sd tries 7877 * to set the tagged queuing capability in HBA. 7878 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7879 * check if the device is parallel SCSI. However, we don't need to 7880 * check here because we've already checked during attach. No device 7881 * that is not parallel SCSI is in the chain. 7882 */ 7883 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7884 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7885 } 7886 7887 return (DDI_SUCCESS); 7888 7889 err_notclosed: 7890 mutex_exit(SD_MUTEX(un)); 7891 7892 err_stillbusy: 7893 _NOTE(NO_COMPETING_THREADS_NOW); 7894 7895 err_remove_event: 7896 mutex_enter(&sd_detach_mutex); 7897 un->un_detach_count--; 7898 mutex_exit(&sd_detach_mutex); 7899 7900 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7901 return (DDI_FAILURE); 7902 } 7903 7904 7905 /* 7906 * Function: sd_create_errstats 7907 * 7908 * Description: This routine instantiates the device error stats. 7909 * 7910 * Note: During attach the stats are instantiated first so they are 7911 * available for attach-time routines that utilize the driver 7912 * iopath to send commands to the device. The stats are initialized 7913 * separately so data obtained during some attach-time routines is 7914 * available. (4362483) 7915 * 7916 * Arguments: un - driver soft state (unit) structure 7917 * instance - driver instance 7918 * 7919 * Context: Kernel thread context 7920 */ 7921 7922 static void 7923 sd_create_errstats(struct sd_lun *un, int instance) 7924 { 7925 struct sd_errstats *stp; 7926 char kstatmodule_err[KSTAT_STRLEN]; 7927 char kstatname[KSTAT_STRLEN]; 7928 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7929 7930 ASSERT(un != NULL); 7931 7932 if (un->un_errstats != NULL) { 7933 return; 7934 } 7935 7936 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7937 "%serr", sd_label); 7938 (void) snprintf(kstatname, sizeof (kstatname), 7939 "%s%d,err", sd_label, instance); 7940 7941 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 7942 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 7943 7944 if (un->un_errstats == NULL) { 7945 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7946 "sd_create_errstats: Failed kstat_create\n"); 7947 return; 7948 } 7949 7950 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7951 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 7952 KSTAT_DATA_UINT32); 7953 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 7954 KSTAT_DATA_UINT32); 7955 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 7956 KSTAT_DATA_UINT32); 7957 kstat_named_init(&stp->sd_vid, "Vendor", 7958 KSTAT_DATA_CHAR); 7959 kstat_named_init(&stp->sd_pid, "Product", 7960 KSTAT_DATA_CHAR); 7961 kstat_named_init(&stp->sd_revision, "Revision", 7962 KSTAT_DATA_CHAR); 7963 kstat_named_init(&stp->sd_serial, "Serial No", 7964 KSTAT_DATA_CHAR); 7965 kstat_named_init(&stp->sd_capacity, "Size", 7966 KSTAT_DATA_ULONGLONG); 7967 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 7968 KSTAT_DATA_UINT32); 7969 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 7970 KSTAT_DATA_UINT32); 7971 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 7972 KSTAT_DATA_UINT32); 7973 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 7974 KSTAT_DATA_UINT32); 7975 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 7976 KSTAT_DATA_UINT32); 7977 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 7978 KSTAT_DATA_UINT32); 7979 7980 un->un_errstats->ks_private = un; 7981 un->un_errstats->ks_update = nulldev; 7982 7983 kstat_install(un->un_errstats); 7984 } 7985 7986 7987 /* 7988 * Function: sd_set_errstats 7989 * 7990 * Description: This routine sets the value of the vendor id, product id, 7991 * revision, serial number, and capacity device error stats. 7992 * 7993 * Note: During attach the stats are instantiated first so they are 7994 * available for attach-time routines that utilize the driver 7995 * iopath to send commands to the device. The stats are initialized 7996 * separately so data obtained during some attach-time routines is 7997 * available. (4362483) 7998 * 7999 * Arguments: un - driver soft state (unit) structure 8000 * 8001 * Context: Kernel thread context 8002 */ 8003 8004 static void 8005 sd_set_errstats(struct sd_lun *un) 8006 { 8007 struct sd_errstats *stp; 8008 8009 ASSERT(un != NULL); 8010 ASSERT(un->un_errstats != NULL); 8011 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8012 ASSERT(stp != NULL); 8013 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8014 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8015 (void) strncpy(stp->sd_revision.value.c, 8016 un->un_sd->sd_inq->inq_revision, 4); 8017 8018 /* 8019 * All the errstats are persistent across detach/attach, 8020 * so reset all the errstats here in case of the hot 8021 * replacement of disk drives, except for not changed 8022 * Sun qualified drives. 8023 */ 8024 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8025 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8026 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8027 stp->sd_softerrs.value.ui32 = 0; 8028 stp->sd_harderrs.value.ui32 = 0; 8029 stp->sd_transerrs.value.ui32 = 0; 8030 stp->sd_rq_media_err.value.ui32 = 0; 8031 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8032 stp->sd_rq_nodev_err.value.ui32 = 0; 8033 stp->sd_rq_recov_err.value.ui32 = 0; 8034 stp->sd_rq_illrq_err.value.ui32 = 0; 8035 stp->sd_rq_pfa_err.value.ui32 = 0; 8036 } 8037 8038 /* 8039 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8040 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8041 * (4376302)) 8042 */ 8043 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8044 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8045 sizeof (SD_INQUIRY(un)->inq_serial)); 8046 } 8047 8048 if (un->un_f_blockcount_is_valid != TRUE) { 8049 /* 8050 * Set capacity error stat to 0 for no media. This ensures 8051 * a valid capacity is displayed in response to 'iostat -E' 8052 * when no media is present in the device. 8053 */ 8054 stp->sd_capacity.value.ui64 = 0; 8055 } else { 8056 /* 8057 * Multiply un_blockcount by un->un_sys_blocksize to get 8058 * capacity. 8059 * 8060 * Note: for non-512 blocksize devices "un_blockcount" has been 8061 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8062 * (un_tgt_blocksize / un->un_sys_blocksize). 8063 */ 8064 stp->sd_capacity.value.ui64 = (uint64_t) 8065 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8066 } 8067 } 8068 8069 8070 /* 8071 * Function: sd_set_pstats 8072 * 8073 * Description: This routine instantiates and initializes the partition 8074 * stats for each partition with more than zero blocks. 8075 * (4363169) 8076 * 8077 * Arguments: un - driver soft state (unit) structure 8078 * 8079 * Context: Kernel thread context 8080 */ 8081 8082 static void 8083 sd_set_pstats(struct sd_lun *un) 8084 { 8085 char kstatname[KSTAT_STRLEN]; 8086 int instance; 8087 int i; 8088 diskaddr_t nblks = 0; 8089 char *partname = NULL; 8090 8091 ASSERT(un != NULL); 8092 8093 instance = ddi_get_instance(SD_DEVINFO(un)); 8094 8095 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8096 for (i = 0; i < NSDMAP; i++) { 8097 8098 if (cmlb_partinfo(un->un_cmlbhandle, i, 8099 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8100 continue; 8101 mutex_enter(SD_MUTEX(un)); 8102 8103 if ((un->un_pstats[i] == NULL) && 8104 (nblks != 0)) { 8105 8106 (void) snprintf(kstatname, sizeof (kstatname), 8107 "%s%d,%s", sd_label, instance, 8108 partname); 8109 8110 un->un_pstats[i] = kstat_create(sd_label, 8111 instance, kstatname, "partition", KSTAT_TYPE_IO, 8112 1, KSTAT_FLAG_PERSISTENT); 8113 if (un->un_pstats[i] != NULL) { 8114 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8115 kstat_install(un->un_pstats[i]); 8116 } 8117 } 8118 mutex_exit(SD_MUTEX(un)); 8119 } 8120 } 8121 8122 8123 #if (defined(__fibre)) 8124 /* 8125 * Function: sd_init_event_callbacks 8126 * 8127 * Description: This routine initializes the insertion and removal event 8128 * callbacks. (fibre only) 8129 * 8130 * Arguments: un - driver soft state (unit) structure 8131 * 8132 * Context: Kernel thread context 8133 */ 8134 8135 static void 8136 sd_init_event_callbacks(struct sd_lun *un) 8137 { 8138 ASSERT(un != NULL); 8139 8140 if ((un->un_insert_event == NULL) && 8141 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8142 &un->un_insert_event) == DDI_SUCCESS)) { 8143 /* 8144 * Add the callback for an insertion event 8145 */ 8146 (void) ddi_add_event_handler(SD_DEVINFO(un), 8147 un->un_insert_event, sd_event_callback, (void *)un, 8148 &(un->un_insert_cb_id)); 8149 } 8150 8151 if ((un->un_remove_event == NULL) && 8152 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8153 &un->un_remove_event) == DDI_SUCCESS)) { 8154 /* 8155 * Add the callback for a removal event 8156 */ 8157 (void) ddi_add_event_handler(SD_DEVINFO(un), 8158 un->un_remove_event, sd_event_callback, (void *)un, 8159 &(un->un_remove_cb_id)); 8160 } 8161 } 8162 8163 8164 /* 8165 * Function: sd_event_callback 8166 * 8167 * Description: This routine handles insert/remove events (photon). The 8168 * state is changed to OFFLINE which can be used to supress 8169 * error msgs. (fibre only) 8170 * 8171 * Arguments: un - driver soft state (unit) structure 8172 * 8173 * Context: Callout thread context 8174 */ 8175 /* ARGSUSED */ 8176 static void 8177 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8178 void *bus_impldata) 8179 { 8180 struct sd_lun *un = (struct sd_lun *)arg; 8181 8182 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8183 if (event == un->un_insert_event) { 8184 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8185 mutex_enter(SD_MUTEX(un)); 8186 if (un->un_state == SD_STATE_OFFLINE) { 8187 if (un->un_last_state != SD_STATE_SUSPENDED) { 8188 un->un_state = un->un_last_state; 8189 } else { 8190 /* 8191 * We have gone through SUSPEND/RESUME while 8192 * we were offline. Restore the last state 8193 */ 8194 un->un_state = un->un_save_state; 8195 } 8196 } 8197 mutex_exit(SD_MUTEX(un)); 8198 8199 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8200 } else if (event == un->un_remove_event) { 8201 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8202 mutex_enter(SD_MUTEX(un)); 8203 /* 8204 * We need to handle an event callback that occurs during 8205 * the suspend operation, since we don't prevent it. 8206 */ 8207 if (un->un_state != SD_STATE_OFFLINE) { 8208 if (un->un_state != SD_STATE_SUSPENDED) { 8209 New_state(un, SD_STATE_OFFLINE); 8210 } else { 8211 un->un_last_state = SD_STATE_OFFLINE; 8212 } 8213 } 8214 mutex_exit(SD_MUTEX(un)); 8215 } else { 8216 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8217 "!Unknown event\n"); 8218 } 8219 8220 } 8221 #endif 8222 8223 /* 8224 * Function: sd_cache_control() 8225 * 8226 * Description: This routine is the driver entry point for setting 8227 * read and write caching by modifying the WCE (write cache 8228 * enable) and RCD (read cache disable) bits of mode 8229 * page 8 (MODEPAGE_CACHING). 8230 * 8231 * Arguments: un - driver soft state (unit) structure 8232 * rcd_flag - flag for controlling the read cache 8233 * wce_flag - flag for controlling the write cache 8234 * 8235 * Return Code: EIO 8236 * code returned by sd_send_scsi_MODE_SENSE and 8237 * sd_send_scsi_MODE_SELECT 8238 * 8239 * Context: Kernel Thread 8240 */ 8241 8242 static int 8243 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8244 { 8245 struct mode_caching *mode_caching_page; 8246 uchar_t *header; 8247 size_t buflen; 8248 int hdrlen; 8249 int bd_len; 8250 int rval = 0; 8251 struct mode_header_grp2 *mhp; 8252 8253 ASSERT(un != NULL); 8254 8255 /* 8256 * Do a test unit ready, otherwise a mode sense may not work if this 8257 * is the first command sent to the device after boot. 8258 */ 8259 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8260 8261 if (un->un_f_cfg_is_atapi == TRUE) { 8262 hdrlen = MODE_HEADER_LENGTH_GRP2; 8263 } else { 8264 hdrlen = MODE_HEADER_LENGTH; 8265 } 8266 8267 /* 8268 * Allocate memory for the retrieved mode page and its headers. Set 8269 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8270 * we get all of the mode sense data otherwise, the mode select 8271 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8272 */ 8273 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8274 sizeof (struct mode_cache_scsi3); 8275 8276 header = kmem_zalloc(buflen, KM_SLEEP); 8277 8278 /* Get the information from the device. */ 8279 if (un->un_f_cfg_is_atapi == TRUE) { 8280 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8281 MODEPAGE_CACHING, SD_PATH_DIRECT); 8282 } else { 8283 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8284 MODEPAGE_CACHING, SD_PATH_DIRECT); 8285 } 8286 if (rval != 0) { 8287 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8288 "sd_cache_control: Mode Sense Failed\n"); 8289 kmem_free(header, buflen); 8290 return (rval); 8291 } 8292 8293 /* 8294 * Determine size of Block Descriptors in order to locate 8295 * the mode page data. ATAPI devices return 0, SCSI devices 8296 * should return MODE_BLK_DESC_LENGTH. 8297 */ 8298 if (un->un_f_cfg_is_atapi == TRUE) { 8299 mhp = (struct mode_header_grp2 *)header; 8300 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8301 } else { 8302 bd_len = ((struct mode_header *)header)->bdesc_length; 8303 } 8304 8305 if (bd_len > MODE_BLK_DESC_LENGTH) { 8306 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8307 "sd_cache_control: Mode Sense returned invalid " 8308 "block descriptor length\n"); 8309 kmem_free(header, buflen); 8310 return (EIO); 8311 } 8312 8313 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8314 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8315 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8316 " caching page code mismatch %d\n", 8317 mode_caching_page->mode_page.code); 8318 kmem_free(header, buflen); 8319 return (EIO); 8320 } 8321 8322 /* Check the relevant bits on successful mode sense. */ 8323 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8324 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8325 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8326 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8327 8328 size_t sbuflen; 8329 uchar_t save_pg; 8330 8331 /* 8332 * Construct select buffer length based on the 8333 * length of the sense data returned. 8334 */ 8335 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8336 sizeof (struct mode_page) + 8337 (int)mode_caching_page->mode_page.length; 8338 8339 /* 8340 * Set the caching bits as requested. 8341 */ 8342 if (rcd_flag == SD_CACHE_ENABLE) 8343 mode_caching_page->rcd = 0; 8344 else if (rcd_flag == SD_CACHE_DISABLE) 8345 mode_caching_page->rcd = 1; 8346 8347 if (wce_flag == SD_CACHE_ENABLE) 8348 mode_caching_page->wce = 1; 8349 else if (wce_flag == SD_CACHE_DISABLE) 8350 mode_caching_page->wce = 0; 8351 8352 /* 8353 * Save the page if the mode sense says the 8354 * drive supports it. 8355 */ 8356 save_pg = mode_caching_page->mode_page.ps ? 8357 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8358 8359 /* Clear reserved bits before mode select. */ 8360 mode_caching_page->mode_page.ps = 0; 8361 8362 /* 8363 * Clear out mode header for mode select. 8364 * The rest of the retrieved page will be reused. 8365 */ 8366 bzero(header, hdrlen); 8367 8368 if (un->un_f_cfg_is_atapi == TRUE) { 8369 mhp = (struct mode_header_grp2 *)header; 8370 mhp->bdesc_length_hi = bd_len >> 8; 8371 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8372 } else { 8373 ((struct mode_header *)header)->bdesc_length = bd_len; 8374 } 8375 8376 /* Issue mode select to change the cache settings */ 8377 if (un->un_f_cfg_is_atapi == TRUE) { 8378 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8379 sbuflen, save_pg, SD_PATH_DIRECT); 8380 } else { 8381 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8382 sbuflen, save_pg, SD_PATH_DIRECT); 8383 } 8384 } 8385 8386 kmem_free(header, buflen); 8387 return (rval); 8388 } 8389 8390 8391 /* 8392 * Function: sd_get_write_cache_enabled() 8393 * 8394 * Description: This routine is the driver entry point for determining if 8395 * write caching is enabled. It examines the WCE (write cache 8396 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8397 * 8398 * Arguments: un - driver soft state (unit) structure 8399 * is_enabled - pointer to int where write cache enabled state 8400 * is returned (non-zero -> write cache enabled) 8401 * 8402 * 8403 * Return Code: EIO 8404 * code returned by sd_send_scsi_MODE_SENSE 8405 * 8406 * Context: Kernel Thread 8407 * 8408 * NOTE: If ioctl is added to disable write cache, this sequence should 8409 * be followed so that no locking is required for accesses to 8410 * un->un_f_write_cache_enabled: 8411 * do mode select to clear wce 8412 * do synchronize cache to flush cache 8413 * set un->un_f_write_cache_enabled = FALSE 8414 * 8415 * Conversely, an ioctl to enable the write cache should be done 8416 * in this order: 8417 * set un->un_f_write_cache_enabled = TRUE 8418 * do mode select to set wce 8419 */ 8420 8421 static int 8422 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8423 { 8424 struct mode_caching *mode_caching_page; 8425 uchar_t *header; 8426 size_t buflen; 8427 int hdrlen; 8428 int bd_len; 8429 int rval = 0; 8430 8431 ASSERT(un != NULL); 8432 ASSERT(is_enabled != NULL); 8433 8434 /* in case of error, flag as enabled */ 8435 *is_enabled = TRUE; 8436 8437 /* 8438 * Do a test unit ready, otherwise a mode sense may not work if this 8439 * is the first command sent to the device after boot. 8440 */ 8441 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8442 8443 if (un->un_f_cfg_is_atapi == TRUE) { 8444 hdrlen = MODE_HEADER_LENGTH_GRP2; 8445 } else { 8446 hdrlen = MODE_HEADER_LENGTH; 8447 } 8448 8449 /* 8450 * Allocate memory for the retrieved mode page and its headers. Set 8451 * a pointer to the page itself. 8452 */ 8453 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8454 header = kmem_zalloc(buflen, KM_SLEEP); 8455 8456 /* Get the information from the device. */ 8457 if (un->un_f_cfg_is_atapi == TRUE) { 8458 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8459 MODEPAGE_CACHING, SD_PATH_DIRECT); 8460 } else { 8461 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8462 MODEPAGE_CACHING, SD_PATH_DIRECT); 8463 } 8464 if (rval != 0) { 8465 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8466 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8467 kmem_free(header, buflen); 8468 return (rval); 8469 } 8470 8471 /* 8472 * Determine size of Block Descriptors in order to locate 8473 * the mode page data. ATAPI devices return 0, SCSI devices 8474 * should return MODE_BLK_DESC_LENGTH. 8475 */ 8476 if (un->un_f_cfg_is_atapi == TRUE) { 8477 struct mode_header_grp2 *mhp; 8478 mhp = (struct mode_header_grp2 *)header; 8479 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8480 } else { 8481 bd_len = ((struct mode_header *)header)->bdesc_length; 8482 } 8483 8484 if (bd_len > MODE_BLK_DESC_LENGTH) { 8485 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8486 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8487 "block descriptor length\n"); 8488 kmem_free(header, buflen); 8489 return (EIO); 8490 } 8491 8492 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8493 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8494 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8495 " caching page code mismatch %d\n", 8496 mode_caching_page->mode_page.code); 8497 kmem_free(header, buflen); 8498 return (EIO); 8499 } 8500 *is_enabled = mode_caching_page->wce; 8501 8502 kmem_free(header, buflen); 8503 return (0); 8504 } 8505 8506 8507 /* 8508 * Function: sd_make_device 8509 * 8510 * Description: Utility routine to return the Solaris device number from 8511 * the data in the device's dev_info structure. 8512 * 8513 * Return Code: The Solaris device number 8514 * 8515 * Context: Any 8516 */ 8517 8518 static dev_t 8519 sd_make_device(dev_info_t *devi) 8520 { 8521 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8522 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8523 } 8524 8525 8526 /* 8527 * Function: sd_pm_entry 8528 * 8529 * Description: Called at the start of a new command to manage power 8530 * and busy status of a device. This includes determining whether 8531 * the current power state of the device is sufficient for 8532 * performing the command or whether it must be changed. 8533 * The PM framework is notified appropriately. 8534 * Only with a return status of DDI_SUCCESS will the 8535 * component be busy to the framework. 8536 * 8537 * All callers of sd_pm_entry must check the return status 8538 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8539 * of DDI_FAILURE indicates the device failed to power up. 8540 * In this case un_pm_count has been adjusted so the result 8541 * on exit is still powered down, ie. count is less than 0. 8542 * Calling sd_pm_exit with this count value hits an ASSERT. 8543 * 8544 * Return Code: DDI_SUCCESS or DDI_FAILURE 8545 * 8546 * Context: Kernel thread context. 8547 */ 8548 8549 static int 8550 sd_pm_entry(struct sd_lun *un) 8551 { 8552 int return_status = DDI_SUCCESS; 8553 8554 ASSERT(!mutex_owned(SD_MUTEX(un))); 8555 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8556 8557 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8558 8559 if (un->un_f_pm_is_enabled == FALSE) { 8560 SD_TRACE(SD_LOG_IO_PM, un, 8561 "sd_pm_entry: exiting, PM not enabled\n"); 8562 return (return_status); 8563 } 8564 8565 /* 8566 * Just increment a counter if PM is enabled. On the transition from 8567 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8568 * the count with each IO and mark the device as idle when the count 8569 * hits 0. 8570 * 8571 * If the count is less than 0 the device is powered down. If a powered 8572 * down device is successfully powered up then the count must be 8573 * incremented to reflect the power up. Note that it'll get incremented 8574 * a second time to become busy. 8575 * 8576 * Because the following has the potential to change the device state 8577 * and must release the un_pm_mutex to do so, only one thread can be 8578 * allowed through at a time. 8579 */ 8580 8581 mutex_enter(&un->un_pm_mutex); 8582 while (un->un_pm_busy == TRUE) { 8583 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8584 } 8585 un->un_pm_busy = TRUE; 8586 8587 if (un->un_pm_count < 1) { 8588 8589 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8590 8591 /* 8592 * Indicate we are now busy so the framework won't attempt to 8593 * power down the device. This call will only fail if either 8594 * we passed a bad component number or the device has no 8595 * components. Neither of these should ever happen. 8596 */ 8597 mutex_exit(&un->un_pm_mutex); 8598 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8599 ASSERT(return_status == DDI_SUCCESS); 8600 8601 mutex_enter(&un->un_pm_mutex); 8602 8603 if (un->un_pm_count < 0) { 8604 mutex_exit(&un->un_pm_mutex); 8605 8606 SD_TRACE(SD_LOG_IO_PM, un, 8607 "sd_pm_entry: power up component\n"); 8608 8609 /* 8610 * pm_raise_power will cause sdpower to be called 8611 * which brings the device power level to the 8612 * desired state, ON in this case. If successful, 8613 * un_pm_count and un_power_level will be updated 8614 * appropriately. 8615 */ 8616 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8617 SD_SPINDLE_ON); 8618 8619 mutex_enter(&un->un_pm_mutex); 8620 8621 if (return_status != DDI_SUCCESS) { 8622 /* 8623 * Power up failed. 8624 * Idle the device and adjust the count 8625 * so the result on exit is that we're 8626 * still powered down, ie. count is less than 0. 8627 */ 8628 SD_TRACE(SD_LOG_IO_PM, un, 8629 "sd_pm_entry: power up failed," 8630 " idle the component\n"); 8631 8632 (void) pm_idle_component(SD_DEVINFO(un), 0); 8633 un->un_pm_count--; 8634 } else { 8635 /* 8636 * Device is powered up, verify the 8637 * count is non-negative. 8638 * This is debug only. 8639 */ 8640 ASSERT(un->un_pm_count == 0); 8641 } 8642 } 8643 8644 if (return_status == DDI_SUCCESS) { 8645 /* 8646 * For performance, now that the device has been tagged 8647 * as busy, and it's known to be powered up, update the 8648 * chain types to use jump tables that do not include 8649 * pm. This significantly lowers the overhead and 8650 * therefore improves performance. 8651 */ 8652 8653 mutex_exit(&un->un_pm_mutex); 8654 mutex_enter(SD_MUTEX(un)); 8655 SD_TRACE(SD_LOG_IO_PM, un, 8656 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8657 un->un_uscsi_chain_type); 8658 8659 if (un->un_f_non_devbsize_supported) { 8660 un->un_buf_chain_type = 8661 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8662 } else { 8663 un->un_buf_chain_type = 8664 SD_CHAIN_INFO_DISK_NO_PM; 8665 } 8666 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8667 8668 SD_TRACE(SD_LOG_IO_PM, un, 8669 " changed uscsi_chain_type to %d\n", 8670 un->un_uscsi_chain_type); 8671 mutex_exit(SD_MUTEX(un)); 8672 mutex_enter(&un->un_pm_mutex); 8673 8674 if (un->un_pm_idle_timeid == NULL) { 8675 /* 300 ms. */ 8676 un->un_pm_idle_timeid = 8677 timeout(sd_pm_idletimeout_handler, un, 8678 (drv_usectohz((clock_t)300000))); 8679 /* 8680 * Include an extra call to busy which keeps the 8681 * device busy with-respect-to the PM layer 8682 * until the timer fires, at which time it'll 8683 * get the extra idle call. 8684 */ 8685 (void) pm_busy_component(SD_DEVINFO(un), 0); 8686 } 8687 } 8688 } 8689 un->un_pm_busy = FALSE; 8690 /* Next... */ 8691 cv_signal(&un->un_pm_busy_cv); 8692 8693 un->un_pm_count++; 8694 8695 SD_TRACE(SD_LOG_IO_PM, un, 8696 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8697 8698 mutex_exit(&un->un_pm_mutex); 8699 8700 return (return_status); 8701 } 8702 8703 8704 /* 8705 * Function: sd_pm_exit 8706 * 8707 * Description: Called at the completion of a command to manage busy 8708 * status for the device. If the device becomes idle the 8709 * PM framework is notified. 8710 * 8711 * Context: Kernel thread context 8712 */ 8713 8714 static void 8715 sd_pm_exit(struct sd_lun *un) 8716 { 8717 ASSERT(!mutex_owned(SD_MUTEX(un))); 8718 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8719 8720 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8721 8722 /* 8723 * After attach the following flag is only read, so don't 8724 * take the penalty of acquiring a mutex for it. 8725 */ 8726 if (un->un_f_pm_is_enabled == TRUE) { 8727 8728 mutex_enter(&un->un_pm_mutex); 8729 un->un_pm_count--; 8730 8731 SD_TRACE(SD_LOG_IO_PM, un, 8732 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8733 8734 ASSERT(un->un_pm_count >= 0); 8735 if (un->un_pm_count == 0) { 8736 mutex_exit(&un->un_pm_mutex); 8737 8738 SD_TRACE(SD_LOG_IO_PM, un, 8739 "sd_pm_exit: idle component\n"); 8740 8741 (void) pm_idle_component(SD_DEVINFO(un), 0); 8742 8743 } else { 8744 mutex_exit(&un->un_pm_mutex); 8745 } 8746 } 8747 8748 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8749 } 8750 8751 8752 /* 8753 * Function: sdopen 8754 * 8755 * Description: Driver's open(9e) entry point function. 8756 * 8757 * Arguments: dev_i - pointer to device number 8758 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8759 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8760 * cred_p - user credential pointer 8761 * 8762 * Return Code: EINVAL 8763 * ENXIO 8764 * EIO 8765 * EROFS 8766 * EBUSY 8767 * 8768 * Context: Kernel thread context 8769 */ 8770 /* ARGSUSED */ 8771 static int 8772 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8773 { 8774 struct sd_lun *un; 8775 int nodelay; 8776 int part; 8777 uint64_t partmask; 8778 int instance; 8779 dev_t dev; 8780 int rval = EIO; 8781 diskaddr_t nblks = 0; 8782 8783 /* Validate the open type */ 8784 if (otyp >= OTYPCNT) { 8785 return (EINVAL); 8786 } 8787 8788 dev = *dev_p; 8789 instance = SDUNIT(dev); 8790 mutex_enter(&sd_detach_mutex); 8791 8792 /* 8793 * Fail the open if there is no softstate for the instance, or 8794 * if another thread somewhere is trying to detach the instance. 8795 */ 8796 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8797 (un->un_detach_count != 0)) { 8798 mutex_exit(&sd_detach_mutex); 8799 /* 8800 * The probe cache only needs to be cleared when open (9e) fails 8801 * with ENXIO (4238046). 8802 */ 8803 /* 8804 * un-conditionally clearing probe cache is ok with 8805 * separate sd/ssd binaries 8806 * x86 platform can be an issue with both parallel 8807 * and fibre in 1 binary 8808 */ 8809 sd_scsi_clear_probe_cache(); 8810 return (ENXIO); 8811 } 8812 8813 /* 8814 * The un_layer_count is to prevent another thread in specfs from 8815 * trying to detach the instance, which can happen when we are 8816 * called from a higher-layer driver instead of thru specfs. 8817 * This will not be needed when DDI provides a layered driver 8818 * interface that allows specfs to know that an instance is in 8819 * use by a layered driver & should not be detached. 8820 * 8821 * Note: the semantics for layered driver opens are exactly one 8822 * close for every open. 8823 */ 8824 if (otyp == OTYP_LYR) { 8825 un->un_layer_count++; 8826 } 8827 8828 /* 8829 * Keep a count of the current # of opens in progress. This is because 8830 * some layered drivers try to call us as a regular open. This can 8831 * cause problems that we cannot prevent, however by keeping this count 8832 * we can at least keep our open and detach routines from racing against 8833 * each other under such conditions. 8834 */ 8835 un->un_opens_in_progress++; 8836 mutex_exit(&sd_detach_mutex); 8837 8838 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8839 part = SDPART(dev); 8840 partmask = 1 << part; 8841 8842 /* 8843 * We use a semaphore here in order to serialize 8844 * open and close requests on the device. 8845 */ 8846 sema_p(&un->un_semoclose); 8847 8848 mutex_enter(SD_MUTEX(un)); 8849 8850 /* 8851 * All device accesses go thru sdstrategy() where we check 8852 * on suspend status but there could be a scsi_poll command, 8853 * which bypasses sdstrategy(), so we need to check pm 8854 * status. 8855 */ 8856 8857 if (!nodelay) { 8858 while ((un->un_state == SD_STATE_SUSPENDED) || 8859 (un->un_state == SD_STATE_PM_CHANGING)) { 8860 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8861 } 8862 8863 mutex_exit(SD_MUTEX(un)); 8864 if (sd_pm_entry(un) != DDI_SUCCESS) { 8865 rval = EIO; 8866 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8867 "sdopen: sd_pm_entry failed\n"); 8868 goto open_failed_with_pm; 8869 } 8870 mutex_enter(SD_MUTEX(un)); 8871 } 8872 8873 /* check for previous exclusive open */ 8874 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8875 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8876 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8877 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8878 8879 if (un->un_exclopen & (partmask)) { 8880 goto excl_open_fail; 8881 } 8882 8883 if (flag & FEXCL) { 8884 int i; 8885 if (un->un_ocmap.lyropen[part]) { 8886 goto excl_open_fail; 8887 } 8888 for (i = 0; i < (OTYPCNT - 1); i++) { 8889 if (un->un_ocmap.regopen[i] & (partmask)) { 8890 goto excl_open_fail; 8891 } 8892 } 8893 } 8894 8895 /* 8896 * Check the write permission if this is a removable media device, 8897 * NDELAY has not been set, and writable permission is requested. 8898 * 8899 * Note: If NDELAY was set and this is write-protected media the WRITE 8900 * attempt will fail with EIO as part of the I/O processing. This is a 8901 * more permissive implementation that allows the open to succeed and 8902 * WRITE attempts to fail when appropriate. 8903 */ 8904 if (un->un_f_chk_wp_open) { 8905 if ((flag & FWRITE) && (!nodelay)) { 8906 mutex_exit(SD_MUTEX(un)); 8907 /* 8908 * Defer the check for write permission on writable 8909 * DVD drive till sdstrategy and will not fail open even 8910 * if FWRITE is set as the device can be writable 8911 * depending upon the media and the media can change 8912 * after the call to open(). 8913 */ 8914 if (un->un_f_dvdram_writable_device == FALSE) { 8915 if (ISCD(un) || sr_check_wp(dev)) { 8916 rval = EROFS; 8917 mutex_enter(SD_MUTEX(un)); 8918 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8919 "write to cd or write protected media\n"); 8920 goto open_fail; 8921 } 8922 } 8923 mutex_enter(SD_MUTEX(un)); 8924 } 8925 } 8926 8927 /* 8928 * If opening in NDELAY/NONBLOCK mode, just return. 8929 * Check if disk is ready and has a valid geometry later. 8930 */ 8931 if (!nodelay) { 8932 mutex_exit(SD_MUTEX(un)); 8933 rval = sd_ready_and_valid(un); 8934 mutex_enter(SD_MUTEX(un)); 8935 /* 8936 * Fail if device is not ready or if the number of disk 8937 * blocks is zero or negative for non CD devices. 8938 */ 8939 8940 nblks = 0; 8941 8942 if (rval == SD_READY_VALID && (!ISCD(un))) { 8943 /* if cmlb_partinfo fails, nblks remains 0 */ 8944 mutex_exit(SD_MUTEX(un)); 8945 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 8946 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 8947 mutex_enter(SD_MUTEX(un)); 8948 } 8949 8950 if ((rval != SD_READY_VALID) || 8951 (!ISCD(un) && nblks <= 0)) { 8952 rval = un->un_f_has_removable_media ? ENXIO : EIO; 8953 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8954 "device not ready or invalid disk block value\n"); 8955 goto open_fail; 8956 } 8957 #if defined(__i386) || defined(__amd64) 8958 } else { 8959 uchar_t *cp; 8960 /* 8961 * x86 requires special nodelay handling, so that p0 is 8962 * always defined and accessible. 8963 * Invalidate geometry only if device is not already open. 8964 */ 8965 cp = &un->un_ocmap.chkd[0]; 8966 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 8967 if (*cp != (uchar_t)0) { 8968 break; 8969 } 8970 cp++; 8971 } 8972 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 8973 mutex_exit(SD_MUTEX(un)); 8974 cmlb_invalidate(un->un_cmlbhandle, 8975 (void *)SD_PATH_DIRECT); 8976 mutex_enter(SD_MUTEX(un)); 8977 } 8978 8979 #endif 8980 } 8981 8982 if (otyp == OTYP_LYR) { 8983 un->un_ocmap.lyropen[part]++; 8984 } else { 8985 un->un_ocmap.regopen[otyp] |= partmask; 8986 } 8987 8988 /* Set up open and exclusive open flags */ 8989 if (flag & FEXCL) { 8990 un->un_exclopen |= (partmask); 8991 } 8992 8993 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8994 "open of part %d type %d\n", part, otyp); 8995 8996 mutex_exit(SD_MUTEX(un)); 8997 if (!nodelay) { 8998 sd_pm_exit(un); 8999 } 9000 9001 sema_v(&un->un_semoclose); 9002 9003 mutex_enter(&sd_detach_mutex); 9004 un->un_opens_in_progress--; 9005 mutex_exit(&sd_detach_mutex); 9006 9007 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9008 return (DDI_SUCCESS); 9009 9010 excl_open_fail: 9011 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9012 rval = EBUSY; 9013 9014 open_fail: 9015 mutex_exit(SD_MUTEX(un)); 9016 9017 /* 9018 * On a failed open we must exit the pm management. 9019 */ 9020 if (!nodelay) { 9021 sd_pm_exit(un); 9022 } 9023 open_failed_with_pm: 9024 sema_v(&un->un_semoclose); 9025 9026 mutex_enter(&sd_detach_mutex); 9027 un->un_opens_in_progress--; 9028 if (otyp == OTYP_LYR) { 9029 un->un_layer_count--; 9030 } 9031 mutex_exit(&sd_detach_mutex); 9032 9033 return (rval); 9034 } 9035 9036 9037 /* 9038 * Function: sdclose 9039 * 9040 * Description: Driver's close(9e) entry point function. 9041 * 9042 * Arguments: dev - device number 9043 * flag - file status flag, informational only 9044 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9045 * cred_p - user credential pointer 9046 * 9047 * Return Code: ENXIO 9048 * 9049 * Context: Kernel thread context 9050 */ 9051 /* ARGSUSED */ 9052 static int 9053 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9054 { 9055 struct sd_lun *un; 9056 uchar_t *cp; 9057 int part; 9058 int nodelay; 9059 int rval = 0; 9060 9061 /* Validate the open type */ 9062 if (otyp >= OTYPCNT) { 9063 return (ENXIO); 9064 } 9065 9066 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9067 return (ENXIO); 9068 } 9069 9070 part = SDPART(dev); 9071 nodelay = flag & (FNDELAY | FNONBLOCK); 9072 9073 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9074 "sdclose: close of part %d type %d\n", part, otyp); 9075 9076 /* 9077 * We use a semaphore here in order to serialize 9078 * open and close requests on the device. 9079 */ 9080 sema_p(&un->un_semoclose); 9081 9082 mutex_enter(SD_MUTEX(un)); 9083 9084 /* Don't proceed if power is being changed. */ 9085 while (un->un_state == SD_STATE_PM_CHANGING) { 9086 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9087 } 9088 9089 if (un->un_exclopen & (1 << part)) { 9090 un->un_exclopen &= ~(1 << part); 9091 } 9092 9093 /* Update the open partition map */ 9094 if (otyp == OTYP_LYR) { 9095 un->un_ocmap.lyropen[part] -= 1; 9096 } else { 9097 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9098 } 9099 9100 cp = &un->un_ocmap.chkd[0]; 9101 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9102 if (*cp != NULL) { 9103 break; 9104 } 9105 cp++; 9106 } 9107 9108 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9109 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9110 9111 /* 9112 * We avoid persistance upon the last close, and set 9113 * the throttle back to the maximum. 9114 */ 9115 un->un_throttle = un->un_saved_throttle; 9116 9117 if (un->un_state == SD_STATE_OFFLINE) { 9118 if (un->un_f_is_fibre == FALSE) { 9119 scsi_log(SD_DEVINFO(un), sd_label, 9120 CE_WARN, "offline\n"); 9121 } 9122 mutex_exit(SD_MUTEX(un)); 9123 cmlb_invalidate(un->un_cmlbhandle, 9124 (void *)SD_PATH_DIRECT); 9125 mutex_enter(SD_MUTEX(un)); 9126 9127 } else { 9128 /* 9129 * Flush any outstanding writes in NVRAM cache. 9130 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9131 * cmd, it may not work for non-Pluto devices. 9132 * SYNCHRONIZE CACHE is not required for removables, 9133 * except DVD-RAM drives. 9134 * 9135 * Also note: because SYNCHRONIZE CACHE is currently 9136 * the only command issued here that requires the 9137 * drive be powered up, only do the power up before 9138 * sending the Sync Cache command. If additional 9139 * commands are added which require a powered up 9140 * drive, the following sequence may have to change. 9141 * 9142 * And finally, note that parallel SCSI on SPARC 9143 * only issues a Sync Cache to DVD-RAM, a newly 9144 * supported device. 9145 */ 9146 #if defined(__i386) || defined(__amd64) 9147 if (un->un_f_sync_cache_supported || 9148 un->un_f_dvdram_writable_device == TRUE) { 9149 #else 9150 if (un->un_f_dvdram_writable_device == TRUE) { 9151 #endif 9152 mutex_exit(SD_MUTEX(un)); 9153 if (sd_pm_entry(un) == DDI_SUCCESS) { 9154 rval = 9155 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9156 NULL); 9157 /* ignore error if not supported */ 9158 if (rval == ENOTSUP) { 9159 rval = 0; 9160 } else if (rval != 0) { 9161 rval = EIO; 9162 } 9163 sd_pm_exit(un); 9164 } else { 9165 rval = EIO; 9166 } 9167 mutex_enter(SD_MUTEX(un)); 9168 } 9169 9170 /* 9171 * For devices which supports DOOR_LOCK, send an ALLOW 9172 * MEDIA REMOVAL command, but don't get upset if it 9173 * fails. We need to raise the power of the drive before 9174 * we can call sd_send_scsi_DOORLOCK() 9175 */ 9176 if (un->un_f_doorlock_supported) { 9177 mutex_exit(SD_MUTEX(un)); 9178 if (sd_pm_entry(un) == DDI_SUCCESS) { 9179 rval = sd_send_scsi_DOORLOCK(un, 9180 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9181 9182 sd_pm_exit(un); 9183 if (ISCD(un) && (rval != 0) && 9184 (nodelay != 0)) { 9185 rval = ENXIO; 9186 } 9187 } else { 9188 rval = EIO; 9189 } 9190 mutex_enter(SD_MUTEX(un)); 9191 } 9192 9193 /* 9194 * If a device has removable media, invalidate all 9195 * parameters related to media, such as geometry, 9196 * blocksize, and blockcount. 9197 */ 9198 if (un->un_f_has_removable_media) { 9199 sr_ejected(un); 9200 } 9201 9202 /* 9203 * Destroy the cache (if it exists) which was 9204 * allocated for the write maps since this is 9205 * the last close for this media. 9206 */ 9207 if (un->un_wm_cache) { 9208 /* 9209 * Check if there are pending commands. 9210 * and if there are give a warning and 9211 * do not destroy the cache. 9212 */ 9213 if (un->un_ncmds_in_driver > 0) { 9214 scsi_log(SD_DEVINFO(un), 9215 sd_label, CE_WARN, 9216 "Unable to clean up memory " 9217 "because of pending I/O\n"); 9218 } else { 9219 kmem_cache_destroy( 9220 un->un_wm_cache); 9221 un->un_wm_cache = NULL; 9222 } 9223 } 9224 } 9225 } 9226 9227 mutex_exit(SD_MUTEX(un)); 9228 sema_v(&un->un_semoclose); 9229 9230 if (otyp == OTYP_LYR) { 9231 mutex_enter(&sd_detach_mutex); 9232 /* 9233 * The detach routine may run when the layer count 9234 * drops to zero. 9235 */ 9236 un->un_layer_count--; 9237 mutex_exit(&sd_detach_mutex); 9238 } 9239 9240 return (rval); 9241 } 9242 9243 9244 /* 9245 * Function: sd_ready_and_valid 9246 * 9247 * Description: Test if device is ready and has a valid geometry. 9248 * 9249 * Arguments: dev - device number 9250 * un - driver soft state (unit) structure 9251 * 9252 * Return Code: SD_READY_VALID ready and valid label 9253 * SD_NOT_READY_VALID not ready, no label 9254 * SD_RESERVED_BY_OTHERS reservation conflict 9255 * 9256 * Context: Never called at interrupt context. 9257 */ 9258 9259 static int 9260 sd_ready_and_valid(struct sd_lun *un) 9261 { 9262 struct sd_errstats *stp; 9263 uint64_t capacity; 9264 uint_t lbasize; 9265 int rval = SD_READY_VALID; 9266 char name_str[48]; 9267 int is_valid; 9268 9269 ASSERT(un != NULL); 9270 ASSERT(!mutex_owned(SD_MUTEX(un))); 9271 9272 mutex_enter(SD_MUTEX(un)); 9273 /* 9274 * If a device has removable media, we must check if media is 9275 * ready when checking if this device is ready and valid. 9276 */ 9277 if (un->un_f_has_removable_media) { 9278 mutex_exit(SD_MUTEX(un)); 9279 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9280 rval = SD_NOT_READY_VALID; 9281 mutex_enter(SD_MUTEX(un)); 9282 goto done; 9283 } 9284 9285 is_valid = SD_IS_VALID_LABEL(un); 9286 mutex_enter(SD_MUTEX(un)); 9287 if (!is_valid || 9288 (un->un_f_blockcount_is_valid == FALSE) || 9289 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9290 9291 /* capacity has to be read every open. */ 9292 mutex_exit(SD_MUTEX(un)); 9293 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9294 &lbasize, SD_PATH_DIRECT) != 0) { 9295 cmlb_invalidate(un->un_cmlbhandle, 9296 (void *)SD_PATH_DIRECT); 9297 mutex_enter(SD_MUTEX(un)); 9298 rval = SD_NOT_READY_VALID; 9299 goto done; 9300 } else { 9301 mutex_enter(SD_MUTEX(un)); 9302 sd_update_block_info(un, lbasize, capacity); 9303 } 9304 } 9305 9306 /* 9307 * Check if the media in the device is writable or not. 9308 */ 9309 if (!is_valid && ISCD(un)) { 9310 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9311 } 9312 9313 } else { 9314 /* 9315 * Do a test unit ready to clear any unit attention from non-cd 9316 * devices. 9317 */ 9318 mutex_exit(SD_MUTEX(un)); 9319 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9320 mutex_enter(SD_MUTEX(un)); 9321 } 9322 9323 9324 /* 9325 * If this is a non 512 block device, allocate space for 9326 * the wmap cache. This is being done here since every time 9327 * a media is changed this routine will be called and the 9328 * block size is a function of media rather than device. 9329 */ 9330 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9331 if (!(un->un_wm_cache)) { 9332 (void) snprintf(name_str, sizeof (name_str), 9333 "%s%d_cache", 9334 ddi_driver_name(SD_DEVINFO(un)), 9335 ddi_get_instance(SD_DEVINFO(un))); 9336 un->un_wm_cache = kmem_cache_create( 9337 name_str, sizeof (struct sd_w_map), 9338 8, sd_wm_cache_constructor, 9339 sd_wm_cache_destructor, NULL, 9340 (void *)un, NULL, 0); 9341 if (!(un->un_wm_cache)) { 9342 rval = ENOMEM; 9343 goto done; 9344 } 9345 } 9346 } 9347 9348 if (un->un_state == SD_STATE_NORMAL) { 9349 /* 9350 * If the target is not yet ready here (defined by a TUR 9351 * failure), invalidate the geometry and print an 'offline' 9352 * message. This is a legacy message, as the state of the 9353 * target is not actually changed to SD_STATE_OFFLINE. 9354 * 9355 * If the TUR fails for EACCES (Reservation Conflict), 9356 * SD_RESERVED_BY_OTHERS will be returned to indicate 9357 * reservation conflict. If the TUR fails for other 9358 * reasons, SD_NOT_READY_VALID will be returned. 9359 */ 9360 int err; 9361 9362 mutex_exit(SD_MUTEX(un)); 9363 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9364 mutex_enter(SD_MUTEX(un)); 9365 9366 if (err != 0) { 9367 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9368 "offline or reservation conflict\n"); 9369 mutex_exit(SD_MUTEX(un)); 9370 cmlb_invalidate(un->un_cmlbhandle, 9371 (void *)SD_PATH_DIRECT); 9372 mutex_enter(SD_MUTEX(un)); 9373 if (err == EACCES) { 9374 rval = SD_RESERVED_BY_OTHERS; 9375 } else { 9376 rval = SD_NOT_READY_VALID; 9377 } 9378 goto done; 9379 } 9380 } 9381 9382 if (un->un_f_format_in_progress == FALSE) { 9383 mutex_exit(SD_MUTEX(un)); 9384 if (cmlb_validate(un->un_cmlbhandle, 0, 9385 (void *)SD_PATH_DIRECT) != 0) { 9386 rval = SD_NOT_READY_VALID; 9387 mutex_enter(SD_MUTEX(un)); 9388 goto done; 9389 } 9390 if (un->un_f_pkstats_enabled) { 9391 sd_set_pstats(un); 9392 SD_TRACE(SD_LOG_IO_PARTITION, un, 9393 "sd_ready_and_valid: un:0x%p pstats created and " 9394 "set\n", un); 9395 } 9396 mutex_enter(SD_MUTEX(un)); 9397 } 9398 9399 /* 9400 * If this device supports DOOR_LOCK command, try and send 9401 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9402 * if it fails. For a CD, however, it is an error 9403 */ 9404 if (un->un_f_doorlock_supported) { 9405 mutex_exit(SD_MUTEX(un)); 9406 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9407 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9408 rval = SD_NOT_READY_VALID; 9409 mutex_enter(SD_MUTEX(un)); 9410 goto done; 9411 } 9412 mutex_enter(SD_MUTEX(un)); 9413 } 9414 9415 /* The state has changed, inform the media watch routines */ 9416 un->un_mediastate = DKIO_INSERTED; 9417 cv_broadcast(&un->un_state_cv); 9418 rval = SD_READY_VALID; 9419 9420 done: 9421 9422 /* 9423 * Initialize the capacity kstat value, if no media previously 9424 * (capacity kstat is 0) and a media has been inserted 9425 * (un_blockcount > 0). 9426 */ 9427 if (un->un_errstats != NULL) { 9428 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9429 if ((stp->sd_capacity.value.ui64 == 0) && 9430 (un->un_f_blockcount_is_valid == TRUE)) { 9431 stp->sd_capacity.value.ui64 = 9432 (uint64_t)((uint64_t)un->un_blockcount * 9433 un->un_sys_blocksize); 9434 } 9435 } 9436 9437 mutex_exit(SD_MUTEX(un)); 9438 return (rval); 9439 } 9440 9441 9442 /* 9443 * Function: sdmin 9444 * 9445 * Description: Routine to limit the size of a data transfer. Used in 9446 * conjunction with physio(9F). 9447 * 9448 * Arguments: bp - pointer to the indicated buf(9S) struct. 9449 * 9450 * Context: Kernel thread context. 9451 */ 9452 9453 static void 9454 sdmin(struct buf *bp) 9455 { 9456 struct sd_lun *un; 9457 int instance; 9458 9459 instance = SDUNIT(bp->b_edev); 9460 9461 un = ddi_get_soft_state(sd_state, instance); 9462 ASSERT(un != NULL); 9463 9464 if (bp->b_bcount > un->un_max_xfer_size) { 9465 bp->b_bcount = un->un_max_xfer_size; 9466 } 9467 } 9468 9469 9470 /* 9471 * Function: sdread 9472 * 9473 * Description: Driver's read(9e) entry point function. 9474 * 9475 * Arguments: dev - device number 9476 * uio - structure pointer describing where data is to be stored 9477 * in user's space 9478 * cred_p - user credential pointer 9479 * 9480 * Return Code: ENXIO 9481 * EIO 9482 * EINVAL 9483 * value returned by physio 9484 * 9485 * Context: Kernel thread context. 9486 */ 9487 /* ARGSUSED */ 9488 static int 9489 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9490 { 9491 struct sd_lun *un = NULL; 9492 int secmask; 9493 int err; 9494 9495 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9496 return (ENXIO); 9497 } 9498 9499 ASSERT(!mutex_owned(SD_MUTEX(un))); 9500 9501 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9502 mutex_enter(SD_MUTEX(un)); 9503 /* 9504 * Because the call to sd_ready_and_valid will issue I/O we 9505 * must wait here if either the device is suspended or 9506 * if it's power level is changing. 9507 */ 9508 while ((un->un_state == SD_STATE_SUSPENDED) || 9509 (un->un_state == SD_STATE_PM_CHANGING)) { 9510 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9511 } 9512 un->un_ncmds_in_driver++; 9513 mutex_exit(SD_MUTEX(un)); 9514 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9515 mutex_enter(SD_MUTEX(un)); 9516 un->un_ncmds_in_driver--; 9517 ASSERT(un->un_ncmds_in_driver >= 0); 9518 mutex_exit(SD_MUTEX(un)); 9519 return (EIO); 9520 } 9521 mutex_enter(SD_MUTEX(un)); 9522 un->un_ncmds_in_driver--; 9523 ASSERT(un->un_ncmds_in_driver >= 0); 9524 mutex_exit(SD_MUTEX(un)); 9525 } 9526 9527 /* 9528 * Read requests are restricted to multiples of the system block size. 9529 */ 9530 secmask = un->un_sys_blocksize - 1; 9531 9532 if (uio->uio_loffset & ((offset_t)(secmask))) { 9533 SD_ERROR(SD_LOG_READ_WRITE, un, 9534 "sdread: file offset not modulo %d\n", 9535 un->un_sys_blocksize); 9536 err = EINVAL; 9537 } else if (uio->uio_iov->iov_len & (secmask)) { 9538 SD_ERROR(SD_LOG_READ_WRITE, un, 9539 "sdread: transfer length not modulo %d\n", 9540 un->un_sys_blocksize); 9541 err = EINVAL; 9542 } else { 9543 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9544 } 9545 return (err); 9546 } 9547 9548 9549 /* 9550 * Function: sdwrite 9551 * 9552 * Description: Driver's write(9e) entry point function. 9553 * 9554 * Arguments: dev - device number 9555 * uio - structure pointer describing where data is stored in 9556 * user's space 9557 * cred_p - user credential pointer 9558 * 9559 * Return Code: ENXIO 9560 * EIO 9561 * EINVAL 9562 * value returned by physio 9563 * 9564 * Context: Kernel thread context. 9565 */ 9566 /* ARGSUSED */ 9567 static int 9568 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9569 { 9570 struct sd_lun *un = NULL; 9571 int secmask; 9572 int err; 9573 9574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9575 return (ENXIO); 9576 } 9577 9578 ASSERT(!mutex_owned(SD_MUTEX(un))); 9579 9580 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9581 mutex_enter(SD_MUTEX(un)); 9582 /* 9583 * Because the call to sd_ready_and_valid will issue I/O we 9584 * must wait here if either the device is suspended or 9585 * if it's power level is changing. 9586 */ 9587 while ((un->un_state == SD_STATE_SUSPENDED) || 9588 (un->un_state == SD_STATE_PM_CHANGING)) { 9589 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9590 } 9591 un->un_ncmds_in_driver++; 9592 mutex_exit(SD_MUTEX(un)); 9593 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9594 mutex_enter(SD_MUTEX(un)); 9595 un->un_ncmds_in_driver--; 9596 ASSERT(un->un_ncmds_in_driver >= 0); 9597 mutex_exit(SD_MUTEX(un)); 9598 return (EIO); 9599 } 9600 mutex_enter(SD_MUTEX(un)); 9601 un->un_ncmds_in_driver--; 9602 ASSERT(un->un_ncmds_in_driver >= 0); 9603 mutex_exit(SD_MUTEX(un)); 9604 } 9605 9606 /* 9607 * Write requests are restricted to multiples of the system block size. 9608 */ 9609 secmask = un->un_sys_blocksize - 1; 9610 9611 if (uio->uio_loffset & ((offset_t)(secmask))) { 9612 SD_ERROR(SD_LOG_READ_WRITE, un, 9613 "sdwrite: file offset not modulo %d\n", 9614 un->un_sys_blocksize); 9615 err = EINVAL; 9616 } else if (uio->uio_iov->iov_len & (secmask)) { 9617 SD_ERROR(SD_LOG_READ_WRITE, un, 9618 "sdwrite: transfer length not modulo %d\n", 9619 un->un_sys_blocksize); 9620 err = EINVAL; 9621 } else { 9622 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9623 } 9624 return (err); 9625 } 9626 9627 9628 /* 9629 * Function: sdaread 9630 * 9631 * Description: Driver's aread(9e) entry point function. 9632 * 9633 * Arguments: dev - device number 9634 * aio - structure pointer describing where data is to be stored 9635 * cred_p - user credential pointer 9636 * 9637 * Return Code: ENXIO 9638 * EIO 9639 * EINVAL 9640 * value returned by aphysio 9641 * 9642 * Context: Kernel thread context. 9643 */ 9644 /* ARGSUSED */ 9645 static int 9646 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9647 { 9648 struct sd_lun *un = NULL; 9649 struct uio *uio = aio->aio_uio; 9650 int secmask; 9651 int err; 9652 9653 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9654 return (ENXIO); 9655 } 9656 9657 ASSERT(!mutex_owned(SD_MUTEX(un))); 9658 9659 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9660 mutex_enter(SD_MUTEX(un)); 9661 /* 9662 * Because the call to sd_ready_and_valid will issue I/O we 9663 * must wait here if either the device is suspended or 9664 * if it's power level is changing. 9665 */ 9666 while ((un->un_state == SD_STATE_SUSPENDED) || 9667 (un->un_state == SD_STATE_PM_CHANGING)) { 9668 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9669 } 9670 un->un_ncmds_in_driver++; 9671 mutex_exit(SD_MUTEX(un)); 9672 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9673 mutex_enter(SD_MUTEX(un)); 9674 un->un_ncmds_in_driver--; 9675 ASSERT(un->un_ncmds_in_driver >= 0); 9676 mutex_exit(SD_MUTEX(un)); 9677 return (EIO); 9678 } 9679 mutex_enter(SD_MUTEX(un)); 9680 un->un_ncmds_in_driver--; 9681 ASSERT(un->un_ncmds_in_driver >= 0); 9682 mutex_exit(SD_MUTEX(un)); 9683 } 9684 9685 /* 9686 * Read requests are restricted to multiples of the system block size. 9687 */ 9688 secmask = un->un_sys_blocksize - 1; 9689 9690 if (uio->uio_loffset & ((offset_t)(secmask))) { 9691 SD_ERROR(SD_LOG_READ_WRITE, un, 9692 "sdaread: file offset not modulo %d\n", 9693 un->un_sys_blocksize); 9694 err = EINVAL; 9695 } else if (uio->uio_iov->iov_len & (secmask)) { 9696 SD_ERROR(SD_LOG_READ_WRITE, un, 9697 "sdaread: transfer length not modulo %d\n", 9698 un->un_sys_blocksize); 9699 err = EINVAL; 9700 } else { 9701 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9702 } 9703 return (err); 9704 } 9705 9706 9707 /* 9708 * Function: sdawrite 9709 * 9710 * Description: Driver's awrite(9e) entry point function. 9711 * 9712 * Arguments: dev - device number 9713 * aio - structure pointer describing where data is stored 9714 * cred_p - user credential pointer 9715 * 9716 * Return Code: ENXIO 9717 * EIO 9718 * EINVAL 9719 * value returned by aphysio 9720 * 9721 * Context: Kernel thread context. 9722 */ 9723 /* ARGSUSED */ 9724 static int 9725 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9726 { 9727 struct sd_lun *un = NULL; 9728 struct uio *uio = aio->aio_uio; 9729 int secmask; 9730 int err; 9731 9732 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9733 return (ENXIO); 9734 } 9735 9736 ASSERT(!mutex_owned(SD_MUTEX(un))); 9737 9738 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9739 mutex_enter(SD_MUTEX(un)); 9740 /* 9741 * Because the call to sd_ready_and_valid will issue I/O we 9742 * must wait here if either the device is suspended or 9743 * if it's power level is changing. 9744 */ 9745 while ((un->un_state == SD_STATE_SUSPENDED) || 9746 (un->un_state == SD_STATE_PM_CHANGING)) { 9747 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9748 } 9749 un->un_ncmds_in_driver++; 9750 mutex_exit(SD_MUTEX(un)); 9751 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9752 mutex_enter(SD_MUTEX(un)); 9753 un->un_ncmds_in_driver--; 9754 ASSERT(un->un_ncmds_in_driver >= 0); 9755 mutex_exit(SD_MUTEX(un)); 9756 return (EIO); 9757 } 9758 mutex_enter(SD_MUTEX(un)); 9759 un->un_ncmds_in_driver--; 9760 ASSERT(un->un_ncmds_in_driver >= 0); 9761 mutex_exit(SD_MUTEX(un)); 9762 } 9763 9764 /* 9765 * Write requests are restricted to multiples of the system block size. 9766 */ 9767 secmask = un->un_sys_blocksize - 1; 9768 9769 if (uio->uio_loffset & ((offset_t)(secmask))) { 9770 SD_ERROR(SD_LOG_READ_WRITE, un, 9771 "sdawrite: file offset not modulo %d\n", 9772 un->un_sys_blocksize); 9773 err = EINVAL; 9774 } else if (uio->uio_iov->iov_len & (secmask)) { 9775 SD_ERROR(SD_LOG_READ_WRITE, un, 9776 "sdawrite: transfer length not modulo %d\n", 9777 un->un_sys_blocksize); 9778 err = EINVAL; 9779 } else { 9780 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9781 } 9782 return (err); 9783 } 9784 9785 9786 9787 9788 9789 /* 9790 * Driver IO processing follows the following sequence: 9791 * 9792 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9793 * | | ^ 9794 * v v | 9795 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9796 * | | | | 9797 * v | | | 9798 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9799 * | | ^ ^ 9800 * v v | | 9801 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9802 * | | | | 9803 * +---+ | +------------+ +-------+ 9804 * | | | | 9805 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9806 * | v | | 9807 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9808 * | | ^ | 9809 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9810 * | v | | 9811 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9812 * | | ^ | 9813 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9814 * | v | | 9815 * | sd_checksum_iostart() sd_checksum_iodone() | 9816 * | | ^ | 9817 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9818 * | v | | 9819 * | sd_pm_iostart() sd_pm_iodone() | 9820 * | | ^ | 9821 * | | | | 9822 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9823 * | ^ 9824 * v | 9825 * sd_core_iostart() | 9826 * | | 9827 * | +------>(*destroypkt)() 9828 * +-> sd_start_cmds() <-+ | | 9829 * | | | v 9830 * | | | scsi_destroy_pkt(9F) 9831 * | | | 9832 * +->(*initpkt)() +- sdintr() 9833 * | | | | 9834 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9835 * | +-> scsi_setup_cdb(9F) | 9836 * | | 9837 * +--> scsi_transport(9F) | 9838 * | | 9839 * +----> SCSA ---->+ 9840 * 9841 * 9842 * This code is based upon the following presumptions: 9843 * 9844 * - iostart and iodone functions operate on buf(9S) structures. These 9845 * functions perform the necessary operations on the buf(9S) and pass 9846 * them along to the next function in the chain by using the macros 9847 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9848 * (for iodone side functions). 9849 * 9850 * - The iostart side functions may sleep. The iodone side functions 9851 * are called under interrupt context and may NOT sleep. Therefore 9852 * iodone side functions also may not call iostart side functions. 9853 * (NOTE: iostart side functions should NOT sleep for memory, as 9854 * this could result in deadlock.) 9855 * 9856 * - An iostart side function may call its corresponding iodone side 9857 * function directly (if necessary). 9858 * 9859 * - In the event of an error, an iostart side function can return a buf(9S) 9860 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9861 * b_error in the usual way of course). 9862 * 9863 * - The taskq mechanism may be used by the iodone side functions to dispatch 9864 * requests to the iostart side functions. The iostart side functions in 9865 * this case would be called under the context of a taskq thread, so it's 9866 * OK for them to block/sleep/spin in this case. 9867 * 9868 * - iostart side functions may allocate "shadow" buf(9S) structs and 9869 * pass them along to the next function in the chain. The corresponding 9870 * iodone side functions must coalesce the "shadow" bufs and return 9871 * the "original" buf to the next higher layer. 9872 * 9873 * - The b_private field of the buf(9S) struct holds a pointer to 9874 * an sd_xbuf struct, which contains information needed to 9875 * construct the scsi_pkt for the command. 9876 * 9877 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9878 * layer must acquire & release the SD_MUTEX(un) as needed. 9879 */ 9880 9881 9882 /* 9883 * Create taskq for all targets in the system. This is created at 9884 * _init(9E) and destroyed at _fini(9E). 9885 * 9886 * Note: here we set the minalloc to a reasonably high number to ensure that 9887 * we will have an adequate supply of task entries available at interrupt time. 9888 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9889 * sd_create_taskq(). Since we do not want to sleep for allocations at 9890 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9891 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9892 * requests any one instant in time. 9893 */ 9894 #define SD_TASKQ_NUMTHREADS 8 9895 #define SD_TASKQ_MINALLOC 256 9896 #define SD_TASKQ_MAXALLOC 256 9897 9898 static taskq_t *sd_tq = NULL; 9899 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9900 9901 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9902 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9903 9904 /* 9905 * The following task queue is being created for the write part of 9906 * read-modify-write of non-512 block size devices. 9907 * Limit the number of threads to 1 for now. This number has been chosen 9908 * considering the fact that it applies only to dvd ram drives/MO drives 9909 * currently. Performance for which is not main criteria at this stage. 9910 * Note: It needs to be explored if we can use a single taskq in future 9911 */ 9912 #define SD_WMR_TASKQ_NUMTHREADS 1 9913 static taskq_t *sd_wmr_tq = NULL; 9914 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9915 9916 /* 9917 * Function: sd_taskq_create 9918 * 9919 * Description: Create taskq thread(s) and preallocate task entries 9920 * 9921 * Return Code: Returns a pointer to the allocated taskq_t. 9922 * 9923 * Context: Can sleep. Requires blockable context. 9924 * 9925 * Notes: - The taskq() facility currently is NOT part of the DDI. 9926 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9927 * - taskq_create() will block for memory, also it will panic 9928 * if it cannot create the requested number of threads. 9929 * - Currently taskq_create() creates threads that cannot be 9930 * swapped. 9931 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9932 * supply of taskq entries at interrupt time (ie, so that we 9933 * do not have to sleep for memory) 9934 */ 9935 9936 static void 9937 sd_taskq_create(void) 9938 { 9939 char taskq_name[TASKQ_NAMELEN]; 9940 9941 ASSERT(sd_tq == NULL); 9942 ASSERT(sd_wmr_tq == NULL); 9943 9944 (void) snprintf(taskq_name, sizeof (taskq_name), 9945 "%s_drv_taskq", sd_label); 9946 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 9947 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9948 TASKQ_PREPOPULATE)); 9949 9950 (void) snprintf(taskq_name, sizeof (taskq_name), 9951 "%s_rmw_taskq", sd_label); 9952 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 9953 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9954 TASKQ_PREPOPULATE)); 9955 } 9956 9957 9958 /* 9959 * Function: sd_taskq_delete 9960 * 9961 * Description: Complementary cleanup routine for sd_taskq_create(). 9962 * 9963 * Context: Kernel thread context. 9964 */ 9965 9966 static void 9967 sd_taskq_delete(void) 9968 { 9969 ASSERT(sd_tq != NULL); 9970 ASSERT(sd_wmr_tq != NULL); 9971 taskq_destroy(sd_tq); 9972 taskq_destroy(sd_wmr_tq); 9973 sd_tq = NULL; 9974 sd_wmr_tq = NULL; 9975 } 9976 9977 9978 /* 9979 * Function: sdstrategy 9980 * 9981 * Description: Driver's strategy (9E) entry point function. 9982 * 9983 * Arguments: bp - pointer to buf(9S) 9984 * 9985 * Return Code: Always returns zero 9986 * 9987 * Context: Kernel thread context. 9988 */ 9989 9990 static int 9991 sdstrategy(struct buf *bp) 9992 { 9993 struct sd_lun *un; 9994 9995 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 9996 if (un == NULL) { 9997 bioerror(bp, EIO); 9998 bp->b_resid = bp->b_bcount; 9999 biodone(bp); 10000 return (0); 10001 } 10002 /* As was done in the past, fail new cmds. if state is dumping. */ 10003 if (un->un_state == SD_STATE_DUMPING) { 10004 bioerror(bp, ENXIO); 10005 bp->b_resid = bp->b_bcount; 10006 biodone(bp); 10007 return (0); 10008 } 10009 10010 ASSERT(!mutex_owned(SD_MUTEX(un))); 10011 10012 /* 10013 * Commands may sneak in while we released the mutex in 10014 * DDI_SUSPEND, we should block new commands. However, old 10015 * commands that are still in the driver at this point should 10016 * still be allowed to drain. 10017 */ 10018 mutex_enter(SD_MUTEX(un)); 10019 /* 10020 * Must wait here if either the device is suspended or 10021 * if it's power level is changing. 10022 */ 10023 while ((un->un_state == SD_STATE_SUSPENDED) || 10024 (un->un_state == SD_STATE_PM_CHANGING)) { 10025 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10026 } 10027 10028 un->un_ncmds_in_driver++; 10029 10030 /* 10031 * atapi: Since we are running the CD for now in PIO mode we need to 10032 * call bp_mapin here to avoid bp_mapin called interrupt context under 10033 * the HBA's init_pkt routine. 10034 */ 10035 if (un->un_f_cfg_is_atapi == TRUE) { 10036 mutex_exit(SD_MUTEX(un)); 10037 bp_mapin(bp); 10038 mutex_enter(SD_MUTEX(un)); 10039 } 10040 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10041 un->un_ncmds_in_driver); 10042 10043 mutex_exit(SD_MUTEX(un)); 10044 10045 /* 10046 * This will (eventually) allocate the sd_xbuf area and 10047 * call sd_xbuf_strategy(). We just want to return the 10048 * result of ddi_xbuf_qstrategy so that we have an opt- 10049 * imized tail call which saves us a stack frame. 10050 */ 10051 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10052 } 10053 10054 10055 /* 10056 * Function: sd_xbuf_strategy 10057 * 10058 * Description: Function for initiating IO operations via the 10059 * ddi_xbuf_qstrategy() mechanism. 10060 * 10061 * Context: Kernel thread context. 10062 */ 10063 10064 static void 10065 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10066 { 10067 struct sd_lun *un = arg; 10068 10069 ASSERT(bp != NULL); 10070 ASSERT(xp != NULL); 10071 ASSERT(un != NULL); 10072 ASSERT(!mutex_owned(SD_MUTEX(un))); 10073 10074 /* 10075 * Initialize the fields in the xbuf and save a pointer to the 10076 * xbuf in bp->b_private. 10077 */ 10078 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10079 10080 /* Send the buf down the iostart chain */ 10081 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10082 } 10083 10084 10085 /* 10086 * Function: sd_xbuf_init 10087 * 10088 * Description: Prepare the given sd_xbuf struct for use. 10089 * 10090 * Arguments: un - ptr to softstate 10091 * bp - ptr to associated buf(9S) 10092 * xp - ptr to associated sd_xbuf 10093 * chain_type - IO chain type to use: 10094 * SD_CHAIN_NULL 10095 * SD_CHAIN_BUFIO 10096 * SD_CHAIN_USCSI 10097 * SD_CHAIN_DIRECT 10098 * SD_CHAIN_DIRECT_PRIORITY 10099 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10100 * initialization; may be NULL if none. 10101 * 10102 * Context: Kernel thread context 10103 */ 10104 10105 static void 10106 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10107 uchar_t chain_type, void *pktinfop) 10108 { 10109 int index; 10110 10111 ASSERT(un != NULL); 10112 ASSERT(bp != NULL); 10113 ASSERT(xp != NULL); 10114 10115 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10116 bp, chain_type); 10117 10118 xp->xb_un = un; 10119 xp->xb_pktp = NULL; 10120 xp->xb_pktinfo = pktinfop; 10121 xp->xb_private = bp->b_private; 10122 xp->xb_blkno = (daddr_t)bp->b_blkno; 10123 10124 /* 10125 * Set up the iostart and iodone chain indexes in the xbuf, based 10126 * upon the specified chain type to use. 10127 */ 10128 switch (chain_type) { 10129 case SD_CHAIN_NULL: 10130 /* 10131 * Fall thru to just use the values for the buf type, even 10132 * tho for the NULL chain these values will never be used. 10133 */ 10134 /* FALLTHRU */ 10135 case SD_CHAIN_BUFIO: 10136 index = un->un_buf_chain_type; 10137 break; 10138 case SD_CHAIN_USCSI: 10139 index = un->un_uscsi_chain_type; 10140 break; 10141 case SD_CHAIN_DIRECT: 10142 index = un->un_direct_chain_type; 10143 break; 10144 case SD_CHAIN_DIRECT_PRIORITY: 10145 index = un->un_priority_chain_type; 10146 break; 10147 default: 10148 /* We're really broken if we ever get here... */ 10149 panic("sd_xbuf_init: illegal chain type!"); 10150 /*NOTREACHED*/ 10151 } 10152 10153 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10154 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10155 10156 /* 10157 * It might be a bit easier to simply bzero the entire xbuf above, 10158 * but it turns out that since we init a fair number of members anyway, 10159 * we save a fair number cycles by doing explicit assignment of zero. 10160 */ 10161 xp->xb_pkt_flags = 0; 10162 xp->xb_dma_resid = 0; 10163 xp->xb_retry_count = 0; 10164 xp->xb_victim_retry_count = 0; 10165 xp->xb_ua_retry_count = 0; 10166 xp->xb_sense_bp = NULL; 10167 xp->xb_sense_status = 0; 10168 xp->xb_sense_state = 0; 10169 xp->xb_sense_resid = 0; 10170 10171 bp->b_private = xp; 10172 bp->b_flags &= ~(B_DONE | B_ERROR); 10173 bp->b_resid = 0; 10174 bp->av_forw = NULL; 10175 bp->av_back = NULL; 10176 bioerror(bp, 0); 10177 10178 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10179 } 10180 10181 10182 /* 10183 * Function: sd_uscsi_strategy 10184 * 10185 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10186 * 10187 * Arguments: bp - buf struct ptr 10188 * 10189 * Return Code: Always returns 0 10190 * 10191 * Context: Kernel thread context 10192 */ 10193 10194 static int 10195 sd_uscsi_strategy(struct buf *bp) 10196 { 10197 struct sd_lun *un; 10198 struct sd_uscsi_info *uip; 10199 struct sd_xbuf *xp; 10200 uchar_t chain_type; 10201 10202 ASSERT(bp != NULL); 10203 10204 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10205 if (un == NULL) { 10206 bioerror(bp, EIO); 10207 bp->b_resid = bp->b_bcount; 10208 biodone(bp); 10209 return (0); 10210 } 10211 10212 ASSERT(!mutex_owned(SD_MUTEX(un))); 10213 10214 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10215 10216 mutex_enter(SD_MUTEX(un)); 10217 /* 10218 * atapi: Since we are running the CD for now in PIO mode we need to 10219 * call bp_mapin here to avoid bp_mapin called interrupt context under 10220 * the HBA's init_pkt routine. 10221 */ 10222 if (un->un_f_cfg_is_atapi == TRUE) { 10223 mutex_exit(SD_MUTEX(un)); 10224 bp_mapin(bp); 10225 mutex_enter(SD_MUTEX(un)); 10226 } 10227 un->un_ncmds_in_driver++; 10228 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10229 un->un_ncmds_in_driver); 10230 mutex_exit(SD_MUTEX(un)); 10231 10232 /* 10233 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10234 */ 10235 ASSERT(bp->b_private != NULL); 10236 uip = (struct sd_uscsi_info *)bp->b_private; 10237 10238 switch (uip->ui_flags) { 10239 case SD_PATH_DIRECT: 10240 chain_type = SD_CHAIN_DIRECT; 10241 break; 10242 case SD_PATH_DIRECT_PRIORITY: 10243 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10244 break; 10245 default: 10246 chain_type = SD_CHAIN_USCSI; 10247 break; 10248 } 10249 10250 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10251 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10252 10253 /* Use the index obtained within xbuf_init */ 10254 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10255 10256 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10257 10258 return (0); 10259 } 10260 10261 /* 10262 * Function: sd_send_scsi_cmd 10263 * 10264 * Description: Runs a USCSI command for user (when called thru sdioctl), 10265 * or for the driver 10266 * 10267 * Arguments: dev - the dev_t for the device 10268 * incmd - ptr to a valid uscsi_cmd struct 10269 * flag - bit flag, indicating open settings, 32/64 bit type 10270 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10271 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10272 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10273 * to use the USCSI "direct" chain and bypass the normal 10274 * command waitq. 10275 * 10276 * Return Code: 0 - successful completion of the given command 10277 * EIO - scsi_uscsi_handle_command() failed 10278 * ENXIO - soft state not found for specified dev 10279 * EINVAL 10280 * EFAULT - copyin/copyout error 10281 * return code of scsi_uscsi_handle_command(): 10282 * EIO 10283 * ENXIO 10284 * EACCES 10285 * 10286 * Context: Waits for command to complete. Can sleep. 10287 */ 10288 10289 static int 10290 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10291 enum uio_seg dataspace, int path_flag) 10292 { 10293 struct sd_uscsi_info *uip; 10294 struct uscsi_cmd *uscmd; 10295 struct sd_lun *un; 10296 int format = 0; 10297 int rval; 10298 10299 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10300 if (un == NULL) { 10301 return (ENXIO); 10302 } 10303 10304 ASSERT(!mutex_owned(SD_MUTEX(un))); 10305 10306 #ifdef SDDEBUG 10307 switch (dataspace) { 10308 case UIO_USERSPACE: 10309 SD_TRACE(SD_LOG_IO, un, 10310 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10311 break; 10312 case UIO_SYSSPACE: 10313 SD_TRACE(SD_LOG_IO, un, 10314 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10315 break; 10316 default: 10317 SD_TRACE(SD_LOG_IO, un, 10318 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10319 break; 10320 } 10321 #endif 10322 10323 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10324 SD_ADDRESS(un), &uscmd); 10325 if (rval != 0) { 10326 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10327 "scsi_uscsi_alloc_and_copyin failed\n", un); 10328 return (rval); 10329 } 10330 10331 if ((uscmd->uscsi_cdb != NULL) && 10332 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10333 mutex_enter(SD_MUTEX(un)); 10334 un->un_f_format_in_progress = TRUE; 10335 mutex_exit(SD_MUTEX(un)); 10336 format = 1; 10337 } 10338 10339 /* 10340 * Allocate an sd_uscsi_info struct and fill it with the info 10341 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10342 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10343 * since we allocate the buf here in this function, we do not 10344 * need to preserve the prior contents of b_private. 10345 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10346 */ 10347 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10348 uip->ui_flags = path_flag; 10349 uip->ui_cmdp = uscmd; 10350 10351 /* 10352 * Commands sent with priority are intended for error recovery 10353 * situations, and do not have retries performed. 10354 */ 10355 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10356 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10357 } 10358 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10359 10360 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10361 sd_uscsi_strategy, NULL, uip); 10362 10363 #ifdef SDDEBUG 10364 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10365 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10366 uscmd->uscsi_status, uscmd->uscsi_resid); 10367 if (uscmd->uscsi_bufaddr != NULL) { 10368 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10369 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10370 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10371 if (dataspace == UIO_SYSSPACE) { 10372 SD_DUMP_MEMORY(un, SD_LOG_IO, 10373 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10374 uscmd->uscsi_buflen, SD_LOG_HEX); 10375 } 10376 } 10377 #endif 10378 10379 if (format == 1) { 10380 mutex_enter(SD_MUTEX(un)); 10381 un->un_f_format_in_progress = FALSE; 10382 mutex_exit(SD_MUTEX(un)); 10383 } 10384 10385 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10386 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10387 10388 return (rval); 10389 } 10390 10391 10392 /* 10393 * Function: sd_buf_iodone 10394 * 10395 * Description: Frees the sd_xbuf & returns the buf to its originator. 10396 * 10397 * Context: May be called from interrupt context. 10398 */ 10399 /* ARGSUSED */ 10400 static void 10401 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10402 { 10403 struct sd_xbuf *xp; 10404 10405 ASSERT(un != NULL); 10406 ASSERT(bp != NULL); 10407 ASSERT(!mutex_owned(SD_MUTEX(un))); 10408 10409 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10410 10411 xp = SD_GET_XBUF(bp); 10412 ASSERT(xp != NULL); 10413 10414 mutex_enter(SD_MUTEX(un)); 10415 10416 /* 10417 * Grab time when the cmd completed. 10418 * This is used for determining if the system has been 10419 * idle long enough to make it idle to the PM framework. 10420 * This is for lowering the overhead, and therefore improving 10421 * performance per I/O operation. 10422 */ 10423 un->un_pm_idle_time = ddi_get_time(); 10424 10425 un->un_ncmds_in_driver--; 10426 ASSERT(un->un_ncmds_in_driver >= 0); 10427 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10428 un->un_ncmds_in_driver); 10429 10430 mutex_exit(SD_MUTEX(un)); 10431 10432 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10433 biodone(bp); /* bp is gone after this */ 10434 10435 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10436 } 10437 10438 10439 /* 10440 * Function: sd_uscsi_iodone 10441 * 10442 * Description: Frees the sd_xbuf & returns the buf to its originator. 10443 * 10444 * Context: May be called from interrupt context. 10445 */ 10446 /* ARGSUSED */ 10447 static void 10448 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10449 { 10450 struct sd_xbuf *xp; 10451 10452 ASSERT(un != NULL); 10453 ASSERT(bp != NULL); 10454 10455 xp = SD_GET_XBUF(bp); 10456 ASSERT(xp != NULL); 10457 ASSERT(!mutex_owned(SD_MUTEX(un))); 10458 10459 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10460 10461 bp->b_private = xp->xb_private; 10462 10463 mutex_enter(SD_MUTEX(un)); 10464 10465 /* 10466 * Grab time when the cmd completed. 10467 * This is used for determining if the system has been 10468 * idle long enough to make it idle to the PM framework. 10469 * This is for lowering the overhead, and therefore improving 10470 * performance per I/O operation. 10471 */ 10472 un->un_pm_idle_time = ddi_get_time(); 10473 10474 un->un_ncmds_in_driver--; 10475 ASSERT(un->un_ncmds_in_driver >= 0); 10476 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10477 un->un_ncmds_in_driver); 10478 10479 mutex_exit(SD_MUTEX(un)); 10480 10481 kmem_free(xp, sizeof (struct sd_xbuf)); 10482 biodone(bp); 10483 10484 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10485 } 10486 10487 10488 /* 10489 * Function: sd_mapblockaddr_iostart 10490 * 10491 * Description: Verify request lies within the partition limits for 10492 * the indicated minor device. Issue "overrun" buf if 10493 * request would exceed partition range. Converts 10494 * partition-relative block address to absolute. 10495 * 10496 * Context: Can sleep 10497 * 10498 * Issues: This follows what the old code did, in terms of accessing 10499 * some of the partition info in the unit struct without holding 10500 * the mutext. This is a general issue, if the partition info 10501 * can be altered while IO is in progress... as soon as we send 10502 * a buf, its partitioning can be invalid before it gets to the 10503 * device. Probably the right fix is to move partitioning out 10504 * of the driver entirely. 10505 */ 10506 10507 static void 10508 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10509 { 10510 diskaddr_t nblocks; /* #blocks in the given partition */ 10511 daddr_t blocknum; /* Block number specified by the buf */ 10512 size_t requested_nblocks; 10513 size_t available_nblocks; 10514 int partition; 10515 diskaddr_t partition_offset; 10516 struct sd_xbuf *xp; 10517 10518 10519 ASSERT(un != NULL); 10520 ASSERT(bp != NULL); 10521 ASSERT(!mutex_owned(SD_MUTEX(un))); 10522 10523 SD_TRACE(SD_LOG_IO_PARTITION, un, 10524 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10525 10526 xp = SD_GET_XBUF(bp); 10527 ASSERT(xp != NULL); 10528 10529 /* 10530 * If the geometry is not indicated as valid, attempt to access 10531 * the unit & verify the geometry/label. This can be the case for 10532 * removable-media devices, of if the device was opened in 10533 * NDELAY/NONBLOCK mode. 10534 */ 10535 if (!SD_IS_VALID_LABEL(un) && 10536 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10537 /* 10538 * For removable devices it is possible to start an I/O 10539 * without a media by opening the device in nodelay mode. 10540 * Also for writable CDs there can be many scenarios where 10541 * there is no geometry yet but volume manager is trying to 10542 * issue a read() just because it can see TOC on the CD. So 10543 * do not print a message for removables. 10544 */ 10545 if (!un->un_f_has_removable_media) { 10546 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10547 "i/o to invalid geometry\n"); 10548 } 10549 bioerror(bp, EIO); 10550 bp->b_resid = bp->b_bcount; 10551 SD_BEGIN_IODONE(index, un, bp); 10552 return; 10553 } 10554 10555 partition = SDPART(bp->b_edev); 10556 10557 nblocks = 0; 10558 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10559 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10560 10561 /* 10562 * blocknum is the starting block number of the request. At this 10563 * point it is still relative to the start of the minor device. 10564 */ 10565 blocknum = xp->xb_blkno; 10566 10567 /* 10568 * Legacy: If the starting block number is one past the last block 10569 * in the partition, do not set B_ERROR in the buf. 10570 */ 10571 if (blocknum == nblocks) { 10572 goto error_exit; 10573 } 10574 10575 /* 10576 * Confirm that the first block of the request lies within the 10577 * partition limits. Also the requested number of bytes must be 10578 * a multiple of the system block size. 10579 */ 10580 if ((blocknum < 0) || (blocknum >= nblocks) || 10581 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10582 bp->b_flags |= B_ERROR; 10583 goto error_exit; 10584 } 10585 10586 /* 10587 * If the requsted # blocks exceeds the available # blocks, that 10588 * is an overrun of the partition. 10589 */ 10590 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10591 available_nblocks = (size_t)(nblocks - blocknum); 10592 ASSERT(nblocks >= blocknum); 10593 10594 if (requested_nblocks > available_nblocks) { 10595 /* 10596 * Allocate an "overrun" buf to allow the request to proceed 10597 * for the amount of space available in the partition. The 10598 * amount not transferred will be added into the b_resid 10599 * when the operation is complete. The overrun buf 10600 * replaces the original buf here, and the original buf 10601 * is saved inside the overrun buf, for later use. 10602 */ 10603 size_t resid = SD_SYSBLOCKS2BYTES(un, 10604 (offset_t)(requested_nblocks - available_nblocks)); 10605 size_t count = bp->b_bcount - resid; 10606 /* 10607 * Note: count is an unsigned entity thus it'll NEVER 10608 * be less than 0 so ASSERT the original values are 10609 * correct. 10610 */ 10611 ASSERT(bp->b_bcount >= resid); 10612 10613 bp = sd_bioclone_alloc(bp, count, blocknum, 10614 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10615 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10616 ASSERT(xp != NULL); 10617 } 10618 10619 /* At this point there should be no residual for this buf. */ 10620 ASSERT(bp->b_resid == 0); 10621 10622 /* Convert the block number to an absolute address. */ 10623 xp->xb_blkno += partition_offset; 10624 10625 SD_NEXT_IOSTART(index, un, bp); 10626 10627 SD_TRACE(SD_LOG_IO_PARTITION, un, 10628 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10629 10630 return; 10631 10632 error_exit: 10633 bp->b_resid = bp->b_bcount; 10634 SD_BEGIN_IODONE(index, un, bp); 10635 SD_TRACE(SD_LOG_IO_PARTITION, un, 10636 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10637 } 10638 10639 10640 /* 10641 * Function: sd_mapblockaddr_iodone 10642 * 10643 * Description: Completion-side processing for partition management. 10644 * 10645 * Context: May be called under interrupt context 10646 */ 10647 10648 static void 10649 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10650 { 10651 /* int partition; */ /* Not used, see below. */ 10652 ASSERT(un != NULL); 10653 ASSERT(bp != NULL); 10654 ASSERT(!mutex_owned(SD_MUTEX(un))); 10655 10656 SD_TRACE(SD_LOG_IO_PARTITION, un, 10657 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10658 10659 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10660 /* 10661 * We have an "overrun" buf to deal with... 10662 */ 10663 struct sd_xbuf *xp; 10664 struct buf *obp; /* ptr to the original buf */ 10665 10666 xp = SD_GET_XBUF(bp); 10667 ASSERT(xp != NULL); 10668 10669 /* Retrieve the pointer to the original buf */ 10670 obp = (struct buf *)xp->xb_private; 10671 ASSERT(obp != NULL); 10672 10673 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10674 bioerror(obp, bp->b_error); 10675 10676 sd_bioclone_free(bp); 10677 10678 /* 10679 * Get back the original buf. 10680 * Note that since the restoration of xb_blkno below 10681 * was removed, the sd_xbuf is not needed. 10682 */ 10683 bp = obp; 10684 /* 10685 * xp = SD_GET_XBUF(bp); 10686 * ASSERT(xp != NULL); 10687 */ 10688 } 10689 10690 /* 10691 * Convert sd->xb_blkno back to a minor-device relative value. 10692 * Note: this has been commented out, as it is not needed in the 10693 * current implementation of the driver (ie, since this function 10694 * is at the top of the layering chains, so the info will be 10695 * discarded) and it is in the "hot" IO path. 10696 * 10697 * partition = getminor(bp->b_edev) & SDPART_MASK; 10698 * xp->xb_blkno -= un->un_offset[partition]; 10699 */ 10700 10701 SD_NEXT_IODONE(index, un, bp); 10702 10703 SD_TRACE(SD_LOG_IO_PARTITION, un, 10704 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10705 } 10706 10707 10708 /* 10709 * Function: sd_mapblocksize_iostart 10710 * 10711 * Description: Convert between system block size (un->un_sys_blocksize) 10712 * and target block size (un->un_tgt_blocksize). 10713 * 10714 * Context: Can sleep to allocate resources. 10715 * 10716 * Assumptions: A higher layer has already performed any partition validation, 10717 * and converted the xp->xb_blkno to an absolute value relative 10718 * to the start of the device. 10719 * 10720 * It is also assumed that the higher layer has implemented 10721 * an "overrun" mechanism for the case where the request would 10722 * read/write beyond the end of a partition. In this case we 10723 * assume (and ASSERT) that bp->b_resid == 0. 10724 * 10725 * Note: The implementation for this routine assumes the target 10726 * block size remains constant between allocation and transport. 10727 */ 10728 10729 static void 10730 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10731 { 10732 struct sd_mapblocksize_info *bsp; 10733 struct sd_xbuf *xp; 10734 offset_t first_byte; 10735 daddr_t start_block, end_block; 10736 daddr_t request_bytes; 10737 ushort_t is_aligned = FALSE; 10738 10739 ASSERT(un != NULL); 10740 ASSERT(bp != NULL); 10741 ASSERT(!mutex_owned(SD_MUTEX(un))); 10742 ASSERT(bp->b_resid == 0); 10743 10744 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10745 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10746 10747 /* 10748 * For a non-writable CD, a write request is an error 10749 */ 10750 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10751 (un->un_f_mmc_writable_media == FALSE)) { 10752 bioerror(bp, EIO); 10753 bp->b_resid = bp->b_bcount; 10754 SD_BEGIN_IODONE(index, un, bp); 10755 return; 10756 } 10757 10758 /* 10759 * We do not need a shadow buf if the device is using 10760 * un->un_sys_blocksize as its block size or if bcount == 0. 10761 * In this case there is no layer-private data block allocated. 10762 */ 10763 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10764 (bp->b_bcount == 0)) { 10765 goto done; 10766 } 10767 10768 #if defined(__i386) || defined(__amd64) 10769 /* We do not support non-block-aligned transfers for ROD devices */ 10770 ASSERT(!ISROD(un)); 10771 #endif 10772 10773 xp = SD_GET_XBUF(bp); 10774 ASSERT(xp != NULL); 10775 10776 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10777 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10778 un->un_tgt_blocksize, un->un_sys_blocksize); 10779 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10780 "request start block:0x%x\n", xp->xb_blkno); 10781 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10782 "request len:0x%x\n", bp->b_bcount); 10783 10784 /* 10785 * Allocate the layer-private data area for the mapblocksize layer. 10786 * Layers are allowed to use the xp_private member of the sd_xbuf 10787 * struct to store the pointer to their layer-private data block, but 10788 * each layer also has the responsibility of restoring the prior 10789 * contents of xb_private before returning the buf/xbuf to the 10790 * higher layer that sent it. 10791 * 10792 * Here we save the prior contents of xp->xb_private into the 10793 * bsp->mbs_oprivate field of our layer-private data area. This value 10794 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10795 * the layer-private area and returning the buf/xbuf to the layer 10796 * that sent it. 10797 * 10798 * Note that here we use kmem_zalloc for the allocation as there are 10799 * parts of the mapblocksize code that expect certain fields to be 10800 * zero unless explicitly set to a required value. 10801 */ 10802 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10803 bsp->mbs_oprivate = xp->xb_private; 10804 xp->xb_private = bsp; 10805 10806 /* 10807 * This treats the data on the disk (target) as an array of bytes. 10808 * first_byte is the byte offset, from the beginning of the device, 10809 * to the location of the request. This is converted from a 10810 * un->un_sys_blocksize block address to a byte offset, and then back 10811 * to a block address based upon a un->un_tgt_blocksize block size. 10812 * 10813 * xp->xb_blkno should be absolute upon entry into this function, 10814 * but, but it is based upon partitions that use the "system" 10815 * block size. It must be adjusted to reflect the block size of 10816 * the target. 10817 * 10818 * Note that end_block is actually the block that follows the last 10819 * block of the request, but that's what is needed for the computation. 10820 */ 10821 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10822 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10823 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10824 un->un_tgt_blocksize; 10825 10826 /* request_bytes is rounded up to a multiple of the target block size */ 10827 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10828 10829 /* 10830 * See if the starting address of the request and the request 10831 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10832 * then we do not need to allocate a shadow buf to handle the request. 10833 */ 10834 if (((first_byte % un->un_tgt_blocksize) == 0) && 10835 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10836 is_aligned = TRUE; 10837 } 10838 10839 if ((bp->b_flags & B_READ) == 0) { 10840 /* 10841 * Lock the range for a write operation. An aligned request is 10842 * considered a simple write; otherwise the request must be a 10843 * read-modify-write. 10844 */ 10845 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10846 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10847 } 10848 10849 /* 10850 * Alloc a shadow buf if the request is not aligned. Also, this is 10851 * where the READ command is generated for a read-modify-write. (The 10852 * write phase is deferred until after the read completes.) 10853 */ 10854 if (is_aligned == FALSE) { 10855 10856 struct sd_mapblocksize_info *shadow_bsp; 10857 struct sd_xbuf *shadow_xp; 10858 struct buf *shadow_bp; 10859 10860 /* 10861 * Allocate the shadow buf and it associated xbuf. Note that 10862 * after this call the xb_blkno value in both the original 10863 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10864 * same: absolute relative to the start of the device, and 10865 * adjusted for the target block size. The b_blkno in the 10866 * shadow buf will also be set to this value. We should never 10867 * change b_blkno in the original bp however. 10868 * 10869 * Note also that the shadow buf will always need to be a 10870 * READ command, regardless of whether the incoming command 10871 * is a READ or a WRITE. 10872 */ 10873 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10874 xp->xb_blkno, 10875 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10876 10877 shadow_xp = SD_GET_XBUF(shadow_bp); 10878 10879 /* 10880 * Allocate the layer-private data for the shadow buf. 10881 * (No need to preserve xb_private in the shadow xbuf.) 10882 */ 10883 shadow_xp->xb_private = shadow_bsp = 10884 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10885 10886 /* 10887 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10888 * to figure out where the start of the user data is (based upon 10889 * the system block size) in the data returned by the READ 10890 * command (which will be based upon the target blocksize). Note 10891 * that this is only really used if the request is unaligned. 10892 */ 10893 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10894 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10895 ASSERT((bsp->mbs_copy_offset >= 0) && 10896 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10897 10898 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10899 10900 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10901 10902 /* Transfer the wmap (if any) to the shadow buf */ 10903 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10904 bsp->mbs_wmp = NULL; 10905 10906 /* 10907 * The shadow buf goes on from here in place of the 10908 * original buf. 10909 */ 10910 shadow_bsp->mbs_orig_bp = bp; 10911 bp = shadow_bp; 10912 } 10913 10914 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10915 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10916 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10917 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10918 request_bytes); 10919 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10920 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10921 10922 done: 10923 SD_NEXT_IOSTART(index, un, bp); 10924 10925 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10926 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10927 } 10928 10929 10930 /* 10931 * Function: sd_mapblocksize_iodone 10932 * 10933 * Description: Completion side processing for block-size mapping. 10934 * 10935 * Context: May be called under interrupt context 10936 */ 10937 10938 static void 10939 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 10940 { 10941 struct sd_mapblocksize_info *bsp; 10942 struct sd_xbuf *xp; 10943 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 10944 struct buf *orig_bp; /* ptr to the original buf */ 10945 offset_t shadow_end; 10946 offset_t request_end; 10947 offset_t shadow_start; 10948 ssize_t copy_offset; 10949 size_t copy_length; 10950 size_t shortfall; 10951 uint_t is_write; /* TRUE if this bp is a WRITE */ 10952 uint_t has_wmap; /* TRUE is this bp has a wmap */ 10953 10954 ASSERT(un != NULL); 10955 ASSERT(bp != NULL); 10956 10957 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10958 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 10959 10960 /* 10961 * There is no shadow buf or layer-private data if the target is 10962 * using un->un_sys_blocksize as its block size or if bcount == 0. 10963 */ 10964 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10965 (bp->b_bcount == 0)) { 10966 goto exit; 10967 } 10968 10969 xp = SD_GET_XBUF(bp); 10970 ASSERT(xp != NULL); 10971 10972 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 10973 bsp = xp->xb_private; 10974 10975 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 10976 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 10977 10978 if (is_write) { 10979 /* 10980 * For a WRITE request we must free up the block range that 10981 * we have locked up. This holds regardless of whether this is 10982 * an aligned write request or a read-modify-write request. 10983 */ 10984 sd_range_unlock(un, bsp->mbs_wmp); 10985 bsp->mbs_wmp = NULL; 10986 } 10987 10988 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 10989 /* 10990 * An aligned read or write command will have no shadow buf; 10991 * there is not much else to do with it. 10992 */ 10993 goto done; 10994 } 10995 10996 orig_bp = bsp->mbs_orig_bp; 10997 ASSERT(orig_bp != NULL); 10998 orig_xp = SD_GET_XBUF(orig_bp); 10999 ASSERT(orig_xp != NULL); 11000 ASSERT(!mutex_owned(SD_MUTEX(un))); 11001 11002 if (!is_write && has_wmap) { 11003 /* 11004 * A READ with a wmap means this is the READ phase of a 11005 * read-modify-write. If an error occurred on the READ then 11006 * we do not proceed with the WRITE phase or copy any data. 11007 * Just release the write maps and return with an error. 11008 */ 11009 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11010 orig_bp->b_resid = orig_bp->b_bcount; 11011 bioerror(orig_bp, bp->b_error); 11012 sd_range_unlock(un, bsp->mbs_wmp); 11013 goto freebuf_done; 11014 } 11015 } 11016 11017 /* 11018 * Here is where we set up to copy the data from the shadow buf 11019 * into the space associated with the original buf. 11020 * 11021 * To deal with the conversion between block sizes, these 11022 * computations treat the data as an array of bytes, with the 11023 * first byte (byte 0) corresponding to the first byte in the 11024 * first block on the disk. 11025 */ 11026 11027 /* 11028 * shadow_start and shadow_len indicate the location and size of 11029 * the data returned with the shadow IO request. 11030 */ 11031 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11032 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11033 11034 /* 11035 * copy_offset gives the offset (in bytes) from the start of the first 11036 * block of the READ request to the beginning of the data. We retrieve 11037 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11038 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11039 * data to be copied (in bytes). 11040 */ 11041 copy_offset = bsp->mbs_copy_offset; 11042 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11043 copy_length = orig_bp->b_bcount; 11044 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11045 11046 /* 11047 * Set up the resid and error fields of orig_bp as appropriate. 11048 */ 11049 if (shadow_end >= request_end) { 11050 /* We got all the requested data; set resid to zero */ 11051 orig_bp->b_resid = 0; 11052 } else { 11053 /* 11054 * We failed to get enough data to fully satisfy the original 11055 * request. Just copy back whatever data we got and set 11056 * up the residual and error code as required. 11057 * 11058 * 'shortfall' is the amount by which the data received with the 11059 * shadow buf has "fallen short" of the requested amount. 11060 */ 11061 shortfall = (size_t)(request_end - shadow_end); 11062 11063 if (shortfall > orig_bp->b_bcount) { 11064 /* 11065 * We did not get enough data to even partially 11066 * fulfill the original request. The residual is 11067 * equal to the amount requested. 11068 */ 11069 orig_bp->b_resid = orig_bp->b_bcount; 11070 } else { 11071 /* 11072 * We did not get all the data that we requested 11073 * from the device, but we will try to return what 11074 * portion we did get. 11075 */ 11076 orig_bp->b_resid = shortfall; 11077 } 11078 ASSERT(copy_length >= orig_bp->b_resid); 11079 copy_length -= orig_bp->b_resid; 11080 } 11081 11082 /* Propagate the error code from the shadow buf to the original buf */ 11083 bioerror(orig_bp, bp->b_error); 11084 11085 if (is_write) { 11086 goto freebuf_done; /* No data copying for a WRITE */ 11087 } 11088 11089 if (has_wmap) { 11090 /* 11091 * This is a READ command from the READ phase of a 11092 * read-modify-write request. We have to copy the data given 11093 * by the user OVER the data returned by the READ command, 11094 * then convert the command from a READ to a WRITE and send 11095 * it back to the target. 11096 */ 11097 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11098 copy_length); 11099 11100 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11101 11102 /* 11103 * Dispatch the WRITE command to the taskq thread, which 11104 * will in turn send the command to the target. When the 11105 * WRITE command completes, we (sd_mapblocksize_iodone()) 11106 * will get called again as part of the iodone chain 11107 * processing for it. Note that we will still be dealing 11108 * with the shadow buf at that point. 11109 */ 11110 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11111 KM_NOSLEEP) != 0) { 11112 /* 11113 * Dispatch was successful so we are done. Return 11114 * without going any higher up the iodone chain. Do 11115 * not free up any layer-private data until after the 11116 * WRITE completes. 11117 */ 11118 return; 11119 } 11120 11121 /* 11122 * Dispatch of the WRITE command failed; set up the error 11123 * condition and send this IO back up the iodone chain. 11124 */ 11125 bioerror(orig_bp, EIO); 11126 orig_bp->b_resid = orig_bp->b_bcount; 11127 11128 } else { 11129 /* 11130 * This is a regular READ request (ie, not a RMW). Copy the 11131 * data from the shadow buf into the original buf. The 11132 * copy_offset compensates for any "misalignment" between the 11133 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11134 * original buf (with its un->un_sys_blocksize blocks). 11135 */ 11136 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11137 copy_length); 11138 } 11139 11140 freebuf_done: 11141 11142 /* 11143 * At this point we still have both the shadow buf AND the original 11144 * buf to deal with, as well as the layer-private data area in each. 11145 * Local variables are as follows: 11146 * 11147 * bp -- points to shadow buf 11148 * xp -- points to xbuf of shadow buf 11149 * bsp -- points to layer-private data area of shadow buf 11150 * orig_bp -- points to original buf 11151 * 11152 * First free the shadow buf and its associated xbuf, then free the 11153 * layer-private data area from the shadow buf. There is no need to 11154 * restore xb_private in the shadow xbuf. 11155 */ 11156 sd_shadow_buf_free(bp); 11157 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11158 11159 /* 11160 * Now update the local variables to point to the original buf, xbuf, 11161 * and layer-private area. 11162 */ 11163 bp = orig_bp; 11164 xp = SD_GET_XBUF(bp); 11165 ASSERT(xp != NULL); 11166 ASSERT(xp == orig_xp); 11167 bsp = xp->xb_private; 11168 ASSERT(bsp != NULL); 11169 11170 done: 11171 /* 11172 * Restore xb_private to whatever it was set to by the next higher 11173 * layer in the chain, then free the layer-private data area. 11174 */ 11175 xp->xb_private = bsp->mbs_oprivate; 11176 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11177 11178 exit: 11179 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11180 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11181 11182 SD_NEXT_IODONE(index, un, bp); 11183 } 11184 11185 11186 /* 11187 * Function: sd_checksum_iostart 11188 * 11189 * Description: A stub function for a layer that's currently not used. 11190 * For now just a placeholder. 11191 * 11192 * Context: Kernel thread context 11193 */ 11194 11195 static void 11196 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11197 { 11198 ASSERT(un != NULL); 11199 ASSERT(bp != NULL); 11200 ASSERT(!mutex_owned(SD_MUTEX(un))); 11201 SD_NEXT_IOSTART(index, un, bp); 11202 } 11203 11204 11205 /* 11206 * Function: sd_checksum_iodone 11207 * 11208 * Description: A stub function for a layer that's currently not used. 11209 * For now just a placeholder. 11210 * 11211 * Context: May be called under interrupt context 11212 */ 11213 11214 static void 11215 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11216 { 11217 ASSERT(un != NULL); 11218 ASSERT(bp != NULL); 11219 ASSERT(!mutex_owned(SD_MUTEX(un))); 11220 SD_NEXT_IODONE(index, un, bp); 11221 } 11222 11223 11224 /* 11225 * Function: sd_checksum_uscsi_iostart 11226 * 11227 * Description: A stub function for a layer that's currently not used. 11228 * For now just a placeholder. 11229 * 11230 * Context: Kernel thread context 11231 */ 11232 11233 static void 11234 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11235 { 11236 ASSERT(un != NULL); 11237 ASSERT(bp != NULL); 11238 ASSERT(!mutex_owned(SD_MUTEX(un))); 11239 SD_NEXT_IOSTART(index, un, bp); 11240 } 11241 11242 11243 /* 11244 * Function: sd_checksum_uscsi_iodone 11245 * 11246 * Description: A stub function for a layer that's currently not used. 11247 * For now just a placeholder. 11248 * 11249 * Context: May be called under interrupt context 11250 */ 11251 11252 static void 11253 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11254 { 11255 ASSERT(un != NULL); 11256 ASSERT(bp != NULL); 11257 ASSERT(!mutex_owned(SD_MUTEX(un))); 11258 SD_NEXT_IODONE(index, un, bp); 11259 } 11260 11261 11262 /* 11263 * Function: sd_pm_iostart 11264 * 11265 * Description: iostart-side routine for Power mangement. 11266 * 11267 * Context: Kernel thread context 11268 */ 11269 11270 static void 11271 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11272 { 11273 ASSERT(un != NULL); 11274 ASSERT(bp != NULL); 11275 ASSERT(!mutex_owned(SD_MUTEX(un))); 11276 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11277 11278 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11279 11280 if (sd_pm_entry(un) != DDI_SUCCESS) { 11281 /* 11282 * Set up to return the failed buf back up the 'iodone' 11283 * side of the calling chain. 11284 */ 11285 bioerror(bp, EIO); 11286 bp->b_resid = bp->b_bcount; 11287 11288 SD_BEGIN_IODONE(index, un, bp); 11289 11290 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11291 return; 11292 } 11293 11294 SD_NEXT_IOSTART(index, un, bp); 11295 11296 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11297 } 11298 11299 11300 /* 11301 * Function: sd_pm_iodone 11302 * 11303 * Description: iodone-side routine for power mangement. 11304 * 11305 * Context: may be called from interrupt context 11306 */ 11307 11308 static void 11309 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11310 { 11311 ASSERT(un != NULL); 11312 ASSERT(bp != NULL); 11313 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11314 11315 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11316 11317 /* 11318 * After attach the following flag is only read, so don't 11319 * take the penalty of acquiring a mutex for it. 11320 */ 11321 if (un->un_f_pm_is_enabled == TRUE) { 11322 sd_pm_exit(un); 11323 } 11324 11325 SD_NEXT_IODONE(index, un, bp); 11326 11327 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11328 } 11329 11330 11331 /* 11332 * Function: sd_core_iostart 11333 * 11334 * Description: Primary driver function for enqueuing buf(9S) structs from 11335 * the system and initiating IO to the target device 11336 * 11337 * Context: Kernel thread context. Can sleep. 11338 * 11339 * Assumptions: - The given xp->xb_blkno is absolute 11340 * (ie, relative to the start of the device). 11341 * - The IO is to be done using the native blocksize of 11342 * the device, as specified in un->un_tgt_blocksize. 11343 */ 11344 /* ARGSUSED */ 11345 static void 11346 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11347 { 11348 struct sd_xbuf *xp; 11349 11350 ASSERT(un != NULL); 11351 ASSERT(bp != NULL); 11352 ASSERT(!mutex_owned(SD_MUTEX(un))); 11353 ASSERT(bp->b_resid == 0); 11354 11355 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11356 11357 xp = SD_GET_XBUF(bp); 11358 ASSERT(xp != NULL); 11359 11360 mutex_enter(SD_MUTEX(un)); 11361 11362 /* 11363 * If we are currently in the failfast state, fail any new IO 11364 * that has B_FAILFAST set, then return. 11365 */ 11366 if ((bp->b_flags & B_FAILFAST) && 11367 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11368 mutex_exit(SD_MUTEX(un)); 11369 bioerror(bp, EIO); 11370 bp->b_resid = bp->b_bcount; 11371 SD_BEGIN_IODONE(index, un, bp); 11372 return; 11373 } 11374 11375 if (SD_IS_DIRECT_PRIORITY(xp)) { 11376 /* 11377 * Priority command -- transport it immediately. 11378 * 11379 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11380 * because all direct priority commands should be associated 11381 * with error recovery actions which we don't want to retry. 11382 */ 11383 sd_start_cmds(un, bp); 11384 } else { 11385 /* 11386 * Normal command -- add it to the wait queue, then start 11387 * transporting commands from the wait queue. 11388 */ 11389 sd_add_buf_to_waitq(un, bp); 11390 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11391 sd_start_cmds(un, NULL); 11392 } 11393 11394 mutex_exit(SD_MUTEX(un)); 11395 11396 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11397 } 11398 11399 11400 /* 11401 * Function: sd_init_cdb_limits 11402 * 11403 * Description: This is to handle scsi_pkt initialization differences 11404 * between the driver platforms. 11405 * 11406 * Legacy behaviors: 11407 * 11408 * If the block number or the sector count exceeds the 11409 * capabilities of a Group 0 command, shift over to a 11410 * Group 1 command. We don't blindly use Group 1 11411 * commands because a) some drives (CDC Wren IVs) get a 11412 * bit confused, and b) there is probably a fair amount 11413 * of speed difference for a target to receive and decode 11414 * a 10 byte command instead of a 6 byte command. 11415 * 11416 * The xfer time difference of 6 vs 10 byte CDBs is 11417 * still significant so this code is still worthwhile. 11418 * 10 byte CDBs are very inefficient with the fas HBA driver 11419 * and older disks. Each CDB byte took 1 usec with some 11420 * popular disks. 11421 * 11422 * Context: Must be called at attach time 11423 */ 11424 11425 static void 11426 sd_init_cdb_limits(struct sd_lun *un) 11427 { 11428 int hba_cdb_limit; 11429 11430 /* 11431 * Use CDB_GROUP1 commands for most devices except for 11432 * parallel SCSI fixed drives in which case we get better 11433 * performance using CDB_GROUP0 commands (where applicable). 11434 */ 11435 un->un_mincdb = SD_CDB_GROUP1; 11436 #if !defined(__fibre) 11437 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11438 !un->un_f_has_removable_media) { 11439 un->un_mincdb = SD_CDB_GROUP0; 11440 } 11441 #endif 11442 11443 /* 11444 * Try to read the max-cdb-length supported by HBA. 11445 */ 11446 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11447 if (0 >= un->un_max_hba_cdb) { 11448 un->un_max_hba_cdb = CDB_GROUP4; 11449 hba_cdb_limit = SD_CDB_GROUP4; 11450 } else if (0 < un->un_max_hba_cdb && 11451 un->un_max_hba_cdb < CDB_GROUP1) { 11452 hba_cdb_limit = SD_CDB_GROUP0; 11453 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11454 un->un_max_hba_cdb < CDB_GROUP5) { 11455 hba_cdb_limit = SD_CDB_GROUP1; 11456 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11457 un->un_max_hba_cdb < CDB_GROUP4) { 11458 hba_cdb_limit = SD_CDB_GROUP5; 11459 } else { 11460 hba_cdb_limit = SD_CDB_GROUP4; 11461 } 11462 11463 /* 11464 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11465 * commands for fixed disks unless we are building for a 32 bit 11466 * kernel. 11467 */ 11468 #ifdef _LP64 11469 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11470 min(hba_cdb_limit, SD_CDB_GROUP4); 11471 #else 11472 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11473 min(hba_cdb_limit, SD_CDB_GROUP1); 11474 #endif 11475 11476 /* 11477 * x86 systems require the PKT_DMA_PARTIAL flag 11478 */ 11479 #if defined(__x86) 11480 un->un_pkt_flags = PKT_DMA_PARTIAL; 11481 #else 11482 un->un_pkt_flags = 0; 11483 #endif 11484 11485 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11486 ? sizeof (struct scsi_arq_status) : 1); 11487 un->un_cmd_timeout = (ushort_t)sd_io_time; 11488 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11489 } 11490 11491 11492 /* 11493 * Function: sd_initpkt_for_buf 11494 * 11495 * Description: Allocate and initialize for transport a scsi_pkt struct, 11496 * based upon the info specified in the given buf struct. 11497 * 11498 * Assumes the xb_blkno in the request is absolute (ie, 11499 * relative to the start of the device (NOT partition!). 11500 * Also assumes that the request is using the native block 11501 * size of the device (as returned by the READ CAPACITY 11502 * command). 11503 * 11504 * Return Code: SD_PKT_ALLOC_SUCCESS 11505 * SD_PKT_ALLOC_FAILURE 11506 * SD_PKT_ALLOC_FAILURE_NO_DMA 11507 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11508 * 11509 * Context: Kernel thread and may be called from software interrupt context 11510 * as part of a sdrunout callback. This function may not block or 11511 * call routines that block 11512 */ 11513 11514 static int 11515 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11516 { 11517 struct sd_xbuf *xp; 11518 struct scsi_pkt *pktp = NULL; 11519 struct sd_lun *un; 11520 size_t blockcount; 11521 daddr_t startblock; 11522 int rval; 11523 int cmd_flags; 11524 11525 ASSERT(bp != NULL); 11526 ASSERT(pktpp != NULL); 11527 xp = SD_GET_XBUF(bp); 11528 ASSERT(xp != NULL); 11529 un = SD_GET_UN(bp); 11530 ASSERT(un != NULL); 11531 ASSERT(mutex_owned(SD_MUTEX(un))); 11532 ASSERT(bp->b_resid == 0); 11533 11534 SD_TRACE(SD_LOG_IO_CORE, un, 11535 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11536 11537 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11538 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11539 /* 11540 * Already have a scsi_pkt -- just need DMA resources. 11541 * We must recompute the CDB in case the mapping returns 11542 * a nonzero pkt_resid. 11543 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11544 * that is being retried, the unmap/remap of the DMA resouces 11545 * will result in the entire transfer starting over again 11546 * from the very first block. 11547 */ 11548 ASSERT(xp->xb_pktp != NULL); 11549 pktp = xp->xb_pktp; 11550 } else { 11551 pktp = NULL; 11552 } 11553 #endif /* __i386 || __amd64 */ 11554 11555 startblock = xp->xb_blkno; /* Absolute block num. */ 11556 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11557 11558 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11559 11560 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11561 11562 #else 11563 11564 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11565 11566 #endif 11567 11568 /* 11569 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11570 * call scsi_init_pkt, and build the CDB. 11571 */ 11572 rval = sd_setup_rw_pkt(un, &pktp, bp, 11573 cmd_flags, sdrunout, (caddr_t)un, 11574 startblock, blockcount); 11575 11576 if (rval == 0) { 11577 /* 11578 * Success. 11579 * 11580 * If partial DMA is being used and required for this transfer. 11581 * set it up here. 11582 */ 11583 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11584 (pktp->pkt_resid != 0)) { 11585 11586 /* 11587 * Save the CDB length and pkt_resid for the 11588 * next xfer 11589 */ 11590 xp->xb_dma_resid = pktp->pkt_resid; 11591 11592 /* rezero resid */ 11593 pktp->pkt_resid = 0; 11594 11595 } else { 11596 xp->xb_dma_resid = 0; 11597 } 11598 11599 pktp->pkt_flags = un->un_tagflags; 11600 pktp->pkt_time = un->un_cmd_timeout; 11601 pktp->pkt_comp = sdintr; 11602 11603 pktp->pkt_private = bp; 11604 *pktpp = pktp; 11605 11606 SD_TRACE(SD_LOG_IO_CORE, un, 11607 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11608 11609 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11610 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11611 #endif 11612 11613 return (SD_PKT_ALLOC_SUCCESS); 11614 11615 } 11616 11617 /* 11618 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11619 * from sd_setup_rw_pkt. 11620 */ 11621 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11622 11623 if (rval == SD_PKT_ALLOC_FAILURE) { 11624 *pktpp = NULL; 11625 /* 11626 * Set the driver state to RWAIT to indicate the driver 11627 * is waiting on resource allocations. The driver will not 11628 * suspend, pm_suspend, or detatch while the state is RWAIT. 11629 */ 11630 New_state(un, SD_STATE_RWAIT); 11631 11632 SD_ERROR(SD_LOG_IO_CORE, un, 11633 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11634 11635 if ((bp->b_flags & B_ERROR) != 0) { 11636 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11637 } 11638 return (SD_PKT_ALLOC_FAILURE); 11639 } else { 11640 /* 11641 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11642 * 11643 * This should never happen. Maybe someone messed with the 11644 * kernel's minphys? 11645 */ 11646 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11647 "Request rejected: too large for CDB: " 11648 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11649 SD_ERROR(SD_LOG_IO_CORE, un, 11650 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11651 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11652 11653 } 11654 } 11655 11656 11657 /* 11658 * Function: sd_destroypkt_for_buf 11659 * 11660 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11661 * 11662 * Context: Kernel thread or interrupt context 11663 */ 11664 11665 static void 11666 sd_destroypkt_for_buf(struct buf *bp) 11667 { 11668 ASSERT(bp != NULL); 11669 ASSERT(SD_GET_UN(bp) != NULL); 11670 11671 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11672 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11673 11674 ASSERT(SD_GET_PKTP(bp) != NULL); 11675 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11676 11677 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11678 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11679 } 11680 11681 /* 11682 * Function: sd_setup_rw_pkt 11683 * 11684 * Description: Determines appropriate CDB group for the requested LBA 11685 * and transfer length, calls scsi_init_pkt, and builds 11686 * the CDB. Do not use for partial DMA transfers except 11687 * for the initial transfer since the CDB size must 11688 * remain constant. 11689 * 11690 * Context: Kernel thread and may be called from software interrupt 11691 * context as part of a sdrunout callback. This function may not 11692 * block or call routines that block 11693 */ 11694 11695 11696 int 11697 sd_setup_rw_pkt(struct sd_lun *un, 11698 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11699 int (*callback)(caddr_t), caddr_t callback_arg, 11700 diskaddr_t lba, uint32_t blockcount) 11701 { 11702 struct scsi_pkt *return_pktp; 11703 union scsi_cdb *cdbp; 11704 struct sd_cdbinfo *cp = NULL; 11705 int i; 11706 11707 /* 11708 * See which size CDB to use, based upon the request. 11709 */ 11710 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11711 11712 /* 11713 * Check lba and block count against sd_cdbtab limits. 11714 * In the partial DMA case, we have to use the same size 11715 * CDB for all the transfers. Check lba + blockcount 11716 * against the max LBA so we know that segment of the 11717 * transfer can use the CDB we select. 11718 */ 11719 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11720 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11721 11722 /* 11723 * The command will fit into the CDB type 11724 * specified by sd_cdbtab[i]. 11725 */ 11726 cp = sd_cdbtab + i; 11727 11728 /* 11729 * Call scsi_init_pkt so we can fill in the 11730 * CDB. 11731 */ 11732 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11733 bp, cp->sc_grpcode, un->un_status_len, 0, 11734 flags, callback, callback_arg); 11735 11736 if (return_pktp != NULL) { 11737 11738 /* 11739 * Return new value of pkt 11740 */ 11741 *pktpp = return_pktp; 11742 11743 /* 11744 * To be safe, zero the CDB insuring there is 11745 * no leftover data from a previous command. 11746 */ 11747 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11748 11749 /* 11750 * Handle partial DMA mapping 11751 */ 11752 if (return_pktp->pkt_resid != 0) { 11753 11754 /* 11755 * Not going to xfer as many blocks as 11756 * originally expected 11757 */ 11758 blockcount -= 11759 SD_BYTES2TGTBLOCKS(un, 11760 return_pktp->pkt_resid); 11761 } 11762 11763 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11764 11765 /* 11766 * Set command byte based on the CDB 11767 * type we matched. 11768 */ 11769 cdbp->scc_cmd = cp->sc_grpmask | 11770 ((bp->b_flags & B_READ) ? 11771 SCMD_READ : SCMD_WRITE); 11772 11773 SD_FILL_SCSI1_LUN(un, return_pktp); 11774 11775 /* 11776 * Fill in LBA and length 11777 */ 11778 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11779 (cp->sc_grpcode == CDB_GROUP4) || 11780 (cp->sc_grpcode == CDB_GROUP0) || 11781 (cp->sc_grpcode == CDB_GROUP5)); 11782 11783 if (cp->sc_grpcode == CDB_GROUP1) { 11784 FORMG1ADDR(cdbp, lba); 11785 FORMG1COUNT(cdbp, blockcount); 11786 return (0); 11787 } else if (cp->sc_grpcode == CDB_GROUP4) { 11788 FORMG4LONGADDR(cdbp, lba); 11789 FORMG4COUNT(cdbp, blockcount); 11790 return (0); 11791 } else if (cp->sc_grpcode == CDB_GROUP0) { 11792 FORMG0ADDR(cdbp, lba); 11793 FORMG0COUNT(cdbp, blockcount); 11794 return (0); 11795 } else if (cp->sc_grpcode == CDB_GROUP5) { 11796 FORMG5ADDR(cdbp, lba); 11797 FORMG5COUNT(cdbp, blockcount); 11798 return (0); 11799 } 11800 11801 /* 11802 * It should be impossible to not match one 11803 * of the CDB types above, so we should never 11804 * reach this point. Set the CDB command byte 11805 * to test-unit-ready to avoid writing 11806 * to somewhere we don't intend. 11807 */ 11808 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11809 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11810 } else { 11811 /* 11812 * Couldn't get scsi_pkt 11813 */ 11814 return (SD_PKT_ALLOC_FAILURE); 11815 } 11816 } 11817 } 11818 11819 /* 11820 * None of the available CDB types were suitable. This really 11821 * should never happen: on a 64 bit system we support 11822 * READ16/WRITE16 which will hold an entire 64 bit disk address 11823 * and on a 32 bit system we will refuse to bind to a device 11824 * larger than 2TB so addresses will never be larger than 32 bits. 11825 */ 11826 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11827 } 11828 11829 #if defined(__i386) || defined(__amd64) 11830 /* 11831 * Function: sd_setup_next_rw_pkt 11832 * 11833 * Description: Setup packet for partial DMA transfers, except for the 11834 * initial transfer. sd_setup_rw_pkt should be used for 11835 * the initial transfer. 11836 * 11837 * Context: Kernel thread and may be called from interrupt context. 11838 */ 11839 11840 int 11841 sd_setup_next_rw_pkt(struct sd_lun *un, 11842 struct scsi_pkt *pktp, struct buf *bp, 11843 diskaddr_t lba, uint32_t blockcount) 11844 { 11845 uchar_t com; 11846 union scsi_cdb *cdbp; 11847 uchar_t cdb_group_id; 11848 11849 ASSERT(pktp != NULL); 11850 ASSERT(pktp->pkt_cdbp != NULL); 11851 11852 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11853 com = cdbp->scc_cmd; 11854 cdb_group_id = CDB_GROUPID(com); 11855 11856 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11857 (cdb_group_id == CDB_GROUPID_1) || 11858 (cdb_group_id == CDB_GROUPID_4) || 11859 (cdb_group_id == CDB_GROUPID_5)); 11860 11861 /* 11862 * Move pkt to the next portion of the xfer. 11863 * func is NULL_FUNC so we do not have to release 11864 * the disk mutex here. 11865 */ 11866 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11867 NULL_FUNC, NULL) == pktp) { 11868 /* Success. Handle partial DMA */ 11869 if (pktp->pkt_resid != 0) { 11870 blockcount -= 11871 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11872 } 11873 11874 cdbp->scc_cmd = com; 11875 SD_FILL_SCSI1_LUN(un, pktp); 11876 if (cdb_group_id == CDB_GROUPID_1) { 11877 FORMG1ADDR(cdbp, lba); 11878 FORMG1COUNT(cdbp, blockcount); 11879 return (0); 11880 } else if (cdb_group_id == CDB_GROUPID_4) { 11881 FORMG4LONGADDR(cdbp, lba); 11882 FORMG4COUNT(cdbp, blockcount); 11883 return (0); 11884 } else if (cdb_group_id == CDB_GROUPID_0) { 11885 FORMG0ADDR(cdbp, lba); 11886 FORMG0COUNT(cdbp, blockcount); 11887 return (0); 11888 } else if (cdb_group_id == CDB_GROUPID_5) { 11889 FORMG5ADDR(cdbp, lba); 11890 FORMG5COUNT(cdbp, blockcount); 11891 return (0); 11892 } 11893 11894 /* Unreachable */ 11895 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11896 } 11897 11898 /* 11899 * Error setting up next portion of cmd transfer. 11900 * Something is definitely very wrong and this 11901 * should not happen. 11902 */ 11903 return (SD_PKT_ALLOC_FAILURE); 11904 } 11905 #endif /* defined(__i386) || defined(__amd64) */ 11906 11907 /* 11908 * Function: sd_initpkt_for_uscsi 11909 * 11910 * Description: Allocate and initialize for transport a scsi_pkt struct, 11911 * based upon the info specified in the given uscsi_cmd struct. 11912 * 11913 * Return Code: SD_PKT_ALLOC_SUCCESS 11914 * SD_PKT_ALLOC_FAILURE 11915 * SD_PKT_ALLOC_FAILURE_NO_DMA 11916 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11917 * 11918 * Context: Kernel thread and may be called from software interrupt context 11919 * as part of a sdrunout callback. This function may not block or 11920 * call routines that block 11921 */ 11922 11923 static int 11924 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11925 { 11926 struct uscsi_cmd *uscmd; 11927 struct sd_xbuf *xp; 11928 struct scsi_pkt *pktp; 11929 struct sd_lun *un; 11930 uint32_t flags = 0; 11931 11932 ASSERT(bp != NULL); 11933 ASSERT(pktpp != NULL); 11934 xp = SD_GET_XBUF(bp); 11935 ASSERT(xp != NULL); 11936 un = SD_GET_UN(bp); 11937 ASSERT(un != NULL); 11938 ASSERT(mutex_owned(SD_MUTEX(un))); 11939 11940 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 11941 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 11942 ASSERT(uscmd != NULL); 11943 11944 SD_TRACE(SD_LOG_IO_CORE, un, 11945 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 11946 11947 /* 11948 * Allocate the scsi_pkt for the command. 11949 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 11950 * during scsi_init_pkt time and will continue to use the 11951 * same path as long as the same scsi_pkt is used without 11952 * intervening scsi_dma_free(). Since uscsi command does 11953 * not call scsi_dmafree() before retry failed command, it 11954 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 11955 * set such that scsi_vhci can use other available path for 11956 * retry. Besides, ucsci command does not allow DMA breakup, 11957 * so there is no need to set PKT_DMA_PARTIAL flag. 11958 */ 11959 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 11960 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 11961 sizeof (struct scsi_arq_status), 0, 11962 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 11963 sdrunout, (caddr_t)un); 11964 11965 if (pktp == NULL) { 11966 *pktpp = NULL; 11967 /* 11968 * Set the driver state to RWAIT to indicate the driver 11969 * is waiting on resource allocations. The driver will not 11970 * suspend, pm_suspend, or detatch while the state is RWAIT. 11971 */ 11972 New_state(un, SD_STATE_RWAIT); 11973 11974 SD_ERROR(SD_LOG_IO_CORE, un, 11975 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 11976 11977 if ((bp->b_flags & B_ERROR) != 0) { 11978 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11979 } 11980 return (SD_PKT_ALLOC_FAILURE); 11981 } 11982 11983 /* 11984 * We do not do DMA breakup for USCSI commands, so return failure 11985 * here if all the needed DMA resources were not allocated. 11986 */ 11987 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 11988 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 11989 scsi_destroy_pkt(pktp); 11990 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 11991 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 11992 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 11993 } 11994 11995 /* Init the cdb from the given uscsi struct */ 11996 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 11997 uscmd->uscsi_cdb[0], 0, 0, 0); 11998 11999 SD_FILL_SCSI1_LUN(un, pktp); 12000 12001 /* 12002 * Set up the optional USCSI flags. See the uscsi (7I) man page 12003 * for listing of the supported flags. 12004 */ 12005 12006 if (uscmd->uscsi_flags & USCSI_SILENT) { 12007 flags |= FLAG_SILENT; 12008 } 12009 12010 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12011 flags |= FLAG_DIAGNOSE; 12012 } 12013 12014 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12015 flags |= FLAG_ISOLATE; 12016 } 12017 12018 if (un->un_f_is_fibre == FALSE) { 12019 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12020 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12021 } 12022 } 12023 12024 /* 12025 * Set the pkt flags here so we save time later. 12026 * Note: These flags are NOT in the uscsi man page!!! 12027 */ 12028 if (uscmd->uscsi_flags & USCSI_HEAD) { 12029 flags |= FLAG_HEAD; 12030 } 12031 12032 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12033 flags |= FLAG_NOINTR; 12034 } 12035 12036 /* 12037 * For tagged queueing, things get a bit complicated. 12038 * Check first for head of queue and last for ordered queue. 12039 * If neither head nor order, use the default driver tag flags. 12040 */ 12041 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12042 if (uscmd->uscsi_flags & USCSI_HTAG) { 12043 flags |= FLAG_HTAG; 12044 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12045 flags |= FLAG_OTAG; 12046 } else { 12047 flags |= un->un_tagflags & FLAG_TAGMASK; 12048 } 12049 } 12050 12051 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12052 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12053 } 12054 12055 pktp->pkt_flags = flags; 12056 12057 /* Copy the caller's CDB into the pkt... */ 12058 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12059 12060 if (uscmd->uscsi_timeout == 0) { 12061 pktp->pkt_time = un->un_uscsi_timeout; 12062 } else { 12063 pktp->pkt_time = uscmd->uscsi_timeout; 12064 } 12065 12066 /* need it later to identify USCSI request in sdintr */ 12067 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12068 12069 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12070 12071 pktp->pkt_private = bp; 12072 pktp->pkt_comp = sdintr; 12073 *pktpp = pktp; 12074 12075 SD_TRACE(SD_LOG_IO_CORE, un, 12076 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12077 12078 return (SD_PKT_ALLOC_SUCCESS); 12079 } 12080 12081 12082 /* 12083 * Function: sd_destroypkt_for_uscsi 12084 * 12085 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12086 * IOs.. Also saves relevant info into the associated uscsi_cmd 12087 * struct. 12088 * 12089 * Context: May be called under interrupt context 12090 */ 12091 12092 static void 12093 sd_destroypkt_for_uscsi(struct buf *bp) 12094 { 12095 struct uscsi_cmd *uscmd; 12096 struct sd_xbuf *xp; 12097 struct scsi_pkt *pktp; 12098 struct sd_lun *un; 12099 12100 ASSERT(bp != NULL); 12101 xp = SD_GET_XBUF(bp); 12102 ASSERT(xp != NULL); 12103 un = SD_GET_UN(bp); 12104 ASSERT(un != NULL); 12105 ASSERT(!mutex_owned(SD_MUTEX(un))); 12106 pktp = SD_GET_PKTP(bp); 12107 ASSERT(pktp != NULL); 12108 12109 SD_TRACE(SD_LOG_IO_CORE, un, 12110 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12111 12112 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12113 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12114 ASSERT(uscmd != NULL); 12115 12116 /* Save the status and the residual into the uscsi_cmd struct */ 12117 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12118 uscmd->uscsi_resid = bp->b_resid; 12119 12120 /* 12121 * If enabled, copy any saved sense data into the area specified 12122 * by the uscsi command. 12123 */ 12124 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12125 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12126 /* 12127 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12128 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12129 */ 12130 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12131 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12132 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12133 } 12134 12135 /* We are done with the scsi_pkt; free it now */ 12136 ASSERT(SD_GET_PKTP(bp) != NULL); 12137 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12138 12139 SD_TRACE(SD_LOG_IO_CORE, un, 12140 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12141 } 12142 12143 12144 /* 12145 * Function: sd_bioclone_alloc 12146 * 12147 * Description: Allocate a buf(9S) and init it as per the given buf 12148 * and the various arguments. The associated sd_xbuf 12149 * struct is (nearly) duplicated. The struct buf *bp 12150 * argument is saved in new_xp->xb_private. 12151 * 12152 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12153 * datalen - size of data area for the shadow bp 12154 * blkno - starting LBA 12155 * func - function pointer for b_iodone in the shadow buf. (May 12156 * be NULL if none.) 12157 * 12158 * Return Code: Pointer to allocates buf(9S) struct 12159 * 12160 * Context: Can sleep. 12161 */ 12162 12163 static struct buf * 12164 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12165 daddr_t blkno, int (*func)(struct buf *)) 12166 { 12167 struct sd_lun *un; 12168 struct sd_xbuf *xp; 12169 struct sd_xbuf *new_xp; 12170 struct buf *new_bp; 12171 12172 ASSERT(bp != NULL); 12173 xp = SD_GET_XBUF(bp); 12174 ASSERT(xp != NULL); 12175 un = SD_GET_UN(bp); 12176 ASSERT(un != NULL); 12177 ASSERT(!mutex_owned(SD_MUTEX(un))); 12178 12179 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12180 NULL, KM_SLEEP); 12181 12182 new_bp->b_lblkno = blkno; 12183 12184 /* 12185 * Allocate an xbuf for the shadow bp and copy the contents of the 12186 * original xbuf into it. 12187 */ 12188 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12189 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12190 12191 /* 12192 * The given bp is automatically saved in the xb_private member 12193 * of the new xbuf. Callers are allowed to depend on this. 12194 */ 12195 new_xp->xb_private = bp; 12196 12197 new_bp->b_private = new_xp; 12198 12199 return (new_bp); 12200 } 12201 12202 /* 12203 * Function: sd_shadow_buf_alloc 12204 * 12205 * Description: Allocate a buf(9S) and init it as per the given buf 12206 * and the various arguments. The associated sd_xbuf 12207 * struct is (nearly) duplicated. The struct buf *bp 12208 * argument is saved in new_xp->xb_private. 12209 * 12210 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12211 * datalen - size of data area for the shadow bp 12212 * bflags - B_READ or B_WRITE (pseudo flag) 12213 * blkno - starting LBA 12214 * func - function pointer for b_iodone in the shadow buf. (May 12215 * be NULL if none.) 12216 * 12217 * Return Code: Pointer to allocates buf(9S) struct 12218 * 12219 * Context: Can sleep. 12220 */ 12221 12222 static struct buf * 12223 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12224 daddr_t blkno, int (*func)(struct buf *)) 12225 { 12226 struct sd_lun *un; 12227 struct sd_xbuf *xp; 12228 struct sd_xbuf *new_xp; 12229 struct buf *new_bp; 12230 12231 ASSERT(bp != NULL); 12232 xp = SD_GET_XBUF(bp); 12233 ASSERT(xp != NULL); 12234 un = SD_GET_UN(bp); 12235 ASSERT(un != NULL); 12236 ASSERT(!mutex_owned(SD_MUTEX(un))); 12237 12238 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12239 bp_mapin(bp); 12240 } 12241 12242 bflags &= (B_READ | B_WRITE); 12243 #if defined(__i386) || defined(__amd64) 12244 new_bp = getrbuf(KM_SLEEP); 12245 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12246 new_bp->b_bcount = datalen; 12247 new_bp->b_flags = bflags | 12248 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12249 #else 12250 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12251 datalen, bflags, SLEEP_FUNC, NULL); 12252 #endif 12253 new_bp->av_forw = NULL; 12254 new_bp->av_back = NULL; 12255 new_bp->b_dev = bp->b_dev; 12256 new_bp->b_blkno = blkno; 12257 new_bp->b_iodone = func; 12258 new_bp->b_edev = bp->b_edev; 12259 new_bp->b_resid = 0; 12260 12261 /* We need to preserve the B_FAILFAST flag */ 12262 if (bp->b_flags & B_FAILFAST) { 12263 new_bp->b_flags |= B_FAILFAST; 12264 } 12265 12266 /* 12267 * Allocate an xbuf for the shadow bp and copy the contents of the 12268 * original xbuf into it. 12269 */ 12270 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12271 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12272 12273 /* Need later to copy data between the shadow buf & original buf! */ 12274 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12275 12276 /* 12277 * The given bp is automatically saved in the xb_private member 12278 * of the new xbuf. Callers are allowed to depend on this. 12279 */ 12280 new_xp->xb_private = bp; 12281 12282 new_bp->b_private = new_xp; 12283 12284 return (new_bp); 12285 } 12286 12287 /* 12288 * Function: sd_bioclone_free 12289 * 12290 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12291 * in the larger than partition operation. 12292 * 12293 * Context: May be called under interrupt context 12294 */ 12295 12296 static void 12297 sd_bioclone_free(struct buf *bp) 12298 { 12299 struct sd_xbuf *xp; 12300 12301 ASSERT(bp != NULL); 12302 xp = SD_GET_XBUF(bp); 12303 ASSERT(xp != NULL); 12304 12305 /* 12306 * Call bp_mapout() before freeing the buf, in case a lower 12307 * layer or HBA had done a bp_mapin(). we must do this here 12308 * as we are the "originator" of the shadow buf. 12309 */ 12310 bp_mapout(bp); 12311 12312 /* 12313 * Null out b_iodone before freeing the bp, to ensure that the driver 12314 * never gets confused by a stale value in this field. (Just a little 12315 * extra defensiveness here.) 12316 */ 12317 bp->b_iodone = NULL; 12318 12319 freerbuf(bp); 12320 12321 kmem_free(xp, sizeof (struct sd_xbuf)); 12322 } 12323 12324 /* 12325 * Function: sd_shadow_buf_free 12326 * 12327 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12328 * 12329 * Context: May be called under interrupt context 12330 */ 12331 12332 static void 12333 sd_shadow_buf_free(struct buf *bp) 12334 { 12335 struct sd_xbuf *xp; 12336 12337 ASSERT(bp != NULL); 12338 xp = SD_GET_XBUF(bp); 12339 ASSERT(xp != NULL); 12340 12341 #if defined(__sparc) 12342 /* 12343 * Call bp_mapout() before freeing the buf, in case a lower 12344 * layer or HBA had done a bp_mapin(). we must do this here 12345 * as we are the "originator" of the shadow buf. 12346 */ 12347 bp_mapout(bp); 12348 #endif 12349 12350 /* 12351 * Null out b_iodone before freeing the bp, to ensure that the driver 12352 * never gets confused by a stale value in this field. (Just a little 12353 * extra defensiveness here.) 12354 */ 12355 bp->b_iodone = NULL; 12356 12357 #if defined(__i386) || defined(__amd64) 12358 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12359 freerbuf(bp); 12360 #else 12361 scsi_free_consistent_buf(bp); 12362 #endif 12363 12364 kmem_free(xp, sizeof (struct sd_xbuf)); 12365 } 12366 12367 12368 /* 12369 * Function: sd_print_transport_rejected_message 12370 * 12371 * Description: This implements the ludicrously complex rules for printing 12372 * a "transport rejected" message. This is to address the 12373 * specific problem of having a flood of this error message 12374 * produced when a failover occurs. 12375 * 12376 * Context: Any. 12377 */ 12378 12379 static void 12380 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12381 int code) 12382 { 12383 ASSERT(un != NULL); 12384 ASSERT(mutex_owned(SD_MUTEX(un))); 12385 ASSERT(xp != NULL); 12386 12387 /* 12388 * Print the "transport rejected" message under the following 12389 * conditions: 12390 * 12391 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12392 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12393 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12394 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12395 * scsi_transport(9F) (which indicates that the target might have 12396 * gone off-line). This uses the un->un_tran_fatal_count 12397 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12398 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12399 * from scsi_transport(). 12400 * 12401 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12402 * the preceeding cases in order for the message to be printed. 12403 */ 12404 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12405 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12406 (code != TRAN_FATAL_ERROR) || 12407 (un->un_tran_fatal_count == 1)) { 12408 switch (code) { 12409 case TRAN_BADPKT: 12410 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12411 "transport rejected bad packet\n"); 12412 break; 12413 case TRAN_FATAL_ERROR: 12414 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12415 "transport rejected fatal error\n"); 12416 break; 12417 default: 12418 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12419 "transport rejected (%d)\n", code); 12420 break; 12421 } 12422 } 12423 } 12424 } 12425 12426 12427 /* 12428 * Function: sd_add_buf_to_waitq 12429 * 12430 * Description: Add the given buf(9S) struct to the wait queue for the 12431 * instance. If sorting is enabled, then the buf is added 12432 * to the queue via an elevator sort algorithm (a la 12433 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12434 * If sorting is not enabled, then the buf is just added 12435 * to the end of the wait queue. 12436 * 12437 * Return Code: void 12438 * 12439 * Context: Does not sleep/block, therefore technically can be called 12440 * from any context. However if sorting is enabled then the 12441 * execution time is indeterminate, and may take long if 12442 * the wait queue grows large. 12443 */ 12444 12445 static void 12446 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12447 { 12448 struct buf *ap; 12449 12450 ASSERT(bp != NULL); 12451 ASSERT(un != NULL); 12452 ASSERT(mutex_owned(SD_MUTEX(un))); 12453 12454 /* If the queue is empty, add the buf as the only entry & return. */ 12455 if (un->un_waitq_headp == NULL) { 12456 ASSERT(un->un_waitq_tailp == NULL); 12457 un->un_waitq_headp = un->un_waitq_tailp = bp; 12458 bp->av_forw = NULL; 12459 return; 12460 } 12461 12462 ASSERT(un->un_waitq_tailp != NULL); 12463 12464 /* 12465 * If sorting is disabled, just add the buf to the tail end of 12466 * the wait queue and return. 12467 */ 12468 if (un->un_f_disksort_disabled) { 12469 un->un_waitq_tailp->av_forw = bp; 12470 un->un_waitq_tailp = bp; 12471 bp->av_forw = NULL; 12472 return; 12473 } 12474 12475 /* 12476 * Sort thru the list of requests currently on the wait queue 12477 * and add the new buf request at the appropriate position. 12478 * 12479 * The un->un_waitq_headp is an activity chain pointer on which 12480 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12481 * first queue holds those requests which are positioned after 12482 * the current SD_GET_BLKNO() (in the first request); the second holds 12483 * requests which came in after their SD_GET_BLKNO() number was passed. 12484 * Thus we implement a one way scan, retracting after reaching 12485 * the end of the drive to the first request on the second 12486 * queue, at which time it becomes the first queue. 12487 * A one-way scan is natural because of the way UNIX read-ahead 12488 * blocks are allocated. 12489 * 12490 * If we lie after the first request, then we must locate the 12491 * second request list and add ourselves to it. 12492 */ 12493 ap = un->un_waitq_headp; 12494 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12495 while (ap->av_forw != NULL) { 12496 /* 12497 * Look for an "inversion" in the (normally 12498 * ascending) block numbers. This indicates 12499 * the start of the second request list. 12500 */ 12501 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12502 /* 12503 * Search the second request list for the 12504 * first request at a larger block number. 12505 * We go before that; however if there is 12506 * no such request, we go at the end. 12507 */ 12508 do { 12509 if (SD_GET_BLKNO(bp) < 12510 SD_GET_BLKNO(ap->av_forw)) { 12511 goto insert; 12512 } 12513 ap = ap->av_forw; 12514 } while (ap->av_forw != NULL); 12515 goto insert; /* after last */ 12516 } 12517 ap = ap->av_forw; 12518 } 12519 12520 /* 12521 * No inversions... we will go after the last, and 12522 * be the first request in the second request list. 12523 */ 12524 goto insert; 12525 } 12526 12527 /* 12528 * Request is at/after the current request... 12529 * sort in the first request list. 12530 */ 12531 while (ap->av_forw != NULL) { 12532 /* 12533 * We want to go after the current request (1) if 12534 * there is an inversion after it (i.e. it is the end 12535 * of the first request list), or (2) if the next 12536 * request is a larger block no. than our request. 12537 */ 12538 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12539 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12540 goto insert; 12541 } 12542 ap = ap->av_forw; 12543 } 12544 12545 /* 12546 * Neither a second list nor a larger request, therefore 12547 * we go at the end of the first list (which is the same 12548 * as the end of the whole schebang). 12549 */ 12550 insert: 12551 bp->av_forw = ap->av_forw; 12552 ap->av_forw = bp; 12553 12554 /* 12555 * If we inserted onto the tail end of the waitq, make sure the 12556 * tail pointer is updated. 12557 */ 12558 if (ap == un->un_waitq_tailp) { 12559 un->un_waitq_tailp = bp; 12560 } 12561 } 12562 12563 12564 /* 12565 * Function: sd_start_cmds 12566 * 12567 * Description: Remove and transport cmds from the driver queues. 12568 * 12569 * Arguments: un - pointer to the unit (soft state) struct for the target. 12570 * 12571 * immed_bp - ptr to a buf to be transported immediately. Only 12572 * the immed_bp is transported; bufs on the waitq are not 12573 * processed and the un_retry_bp is not checked. If immed_bp is 12574 * NULL, then normal queue processing is performed. 12575 * 12576 * Context: May be called from kernel thread context, interrupt context, 12577 * or runout callback context. This function may not block or 12578 * call routines that block. 12579 */ 12580 12581 static void 12582 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12583 { 12584 struct sd_xbuf *xp; 12585 struct buf *bp; 12586 void (*statp)(kstat_io_t *); 12587 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12588 void (*saved_statp)(kstat_io_t *); 12589 #endif 12590 int rval; 12591 12592 ASSERT(un != NULL); 12593 ASSERT(mutex_owned(SD_MUTEX(un))); 12594 ASSERT(un->un_ncmds_in_transport >= 0); 12595 ASSERT(un->un_throttle >= 0); 12596 12597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12598 12599 do { 12600 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12601 saved_statp = NULL; 12602 #endif 12603 12604 /* 12605 * If we are syncing or dumping, fail the command to 12606 * avoid recursively calling back into scsi_transport(). 12607 * The dump I/O itself uses a separate code path so this 12608 * only prevents non-dump I/O from being sent while dumping. 12609 * File system sync takes place before dumping begins. 12610 * During panic, filesystem I/O is allowed provided 12611 * un_in_callback is <= 1. This is to prevent recursion 12612 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12613 * sd_start_cmds and so on. See panic.c for more information 12614 * about the states the system can be in during panic. 12615 */ 12616 if ((un->un_state == SD_STATE_DUMPING) || 12617 (ddi_in_panic() && (un->un_in_callback > 1))) { 12618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12619 "sd_start_cmds: panicking\n"); 12620 goto exit; 12621 } 12622 12623 if ((bp = immed_bp) != NULL) { 12624 /* 12625 * We have a bp that must be transported immediately. 12626 * It's OK to transport the immed_bp here without doing 12627 * the throttle limit check because the immed_bp is 12628 * always used in a retry/recovery case. This means 12629 * that we know we are not at the throttle limit by 12630 * virtue of the fact that to get here we must have 12631 * already gotten a command back via sdintr(). This also 12632 * relies on (1) the command on un_retry_bp preventing 12633 * further commands from the waitq from being issued; 12634 * and (2) the code in sd_retry_command checking the 12635 * throttle limit before issuing a delayed or immediate 12636 * retry. This holds even if the throttle limit is 12637 * currently ratcheted down from its maximum value. 12638 */ 12639 statp = kstat_runq_enter; 12640 if (bp == un->un_retry_bp) { 12641 ASSERT((un->un_retry_statp == NULL) || 12642 (un->un_retry_statp == kstat_waitq_enter) || 12643 (un->un_retry_statp == 12644 kstat_runq_back_to_waitq)); 12645 /* 12646 * If the waitq kstat was incremented when 12647 * sd_set_retry_bp() queued this bp for a retry, 12648 * then we must set up statp so that the waitq 12649 * count will get decremented correctly below. 12650 * Also we must clear un->un_retry_statp to 12651 * ensure that we do not act on a stale value 12652 * in this field. 12653 */ 12654 if ((un->un_retry_statp == kstat_waitq_enter) || 12655 (un->un_retry_statp == 12656 kstat_runq_back_to_waitq)) { 12657 statp = kstat_waitq_to_runq; 12658 } 12659 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12660 saved_statp = un->un_retry_statp; 12661 #endif 12662 un->un_retry_statp = NULL; 12663 12664 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12665 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12666 "un_throttle:%d un_ncmds_in_transport:%d\n", 12667 un, un->un_retry_bp, un->un_throttle, 12668 un->un_ncmds_in_transport); 12669 } else { 12670 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12671 "processing priority bp:0x%p\n", bp); 12672 } 12673 12674 } else if ((bp = un->un_waitq_headp) != NULL) { 12675 /* 12676 * A command on the waitq is ready to go, but do not 12677 * send it if: 12678 * 12679 * (1) the throttle limit has been reached, or 12680 * (2) a retry is pending, or 12681 * (3) a START_STOP_UNIT callback pending, or 12682 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12683 * command is pending. 12684 * 12685 * For all of these conditions, IO processing will 12686 * restart after the condition is cleared. 12687 */ 12688 if (un->un_ncmds_in_transport >= un->un_throttle) { 12689 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12690 "sd_start_cmds: exiting, " 12691 "throttle limit reached!\n"); 12692 goto exit; 12693 } 12694 if (un->un_retry_bp != NULL) { 12695 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12696 "sd_start_cmds: exiting, retry pending!\n"); 12697 goto exit; 12698 } 12699 if (un->un_startstop_timeid != NULL) { 12700 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12701 "sd_start_cmds: exiting, " 12702 "START_STOP pending!\n"); 12703 goto exit; 12704 } 12705 if (un->un_direct_priority_timeid != NULL) { 12706 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12707 "sd_start_cmds: exiting, " 12708 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12709 goto exit; 12710 } 12711 12712 /* Dequeue the command */ 12713 un->un_waitq_headp = bp->av_forw; 12714 if (un->un_waitq_headp == NULL) { 12715 un->un_waitq_tailp = NULL; 12716 } 12717 bp->av_forw = NULL; 12718 statp = kstat_waitq_to_runq; 12719 SD_TRACE(SD_LOG_IO_CORE, un, 12720 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12721 12722 } else { 12723 /* No work to do so bail out now */ 12724 SD_TRACE(SD_LOG_IO_CORE, un, 12725 "sd_start_cmds: no more work, exiting!\n"); 12726 goto exit; 12727 } 12728 12729 /* 12730 * Reset the state to normal. This is the mechanism by which 12731 * the state transitions from either SD_STATE_RWAIT or 12732 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12733 * If state is SD_STATE_PM_CHANGING then this command is 12734 * part of the device power control and the state must 12735 * not be put back to normal. Doing so would would 12736 * allow new commands to proceed when they shouldn't, 12737 * the device may be going off. 12738 */ 12739 if ((un->un_state != SD_STATE_SUSPENDED) && 12740 (un->un_state != SD_STATE_PM_CHANGING)) { 12741 New_state(un, SD_STATE_NORMAL); 12742 } 12743 12744 xp = SD_GET_XBUF(bp); 12745 ASSERT(xp != NULL); 12746 12747 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12748 /* 12749 * Allocate the scsi_pkt if we need one, or attach DMA 12750 * resources if we have a scsi_pkt that needs them. The 12751 * latter should only occur for commands that are being 12752 * retried. 12753 */ 12754 if ((xp->xb_pktp == NULL) || 12755 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12756 #else 12757 if (xp->xb_pktp == NULL) { 12758 #endif 12759 /* 12760 * There is no scsi_pkt allocated for this buf. Call 12761 * the initpkt function to allocate & init one. 12762 * 12763 * The scsi_init_pkt runout callback functionality is 12764 * implemented as follows: 12765 * 12766 * 1) The initpkt function always calls 12767 * scsi_init_pkt(9F) with sdrunout specified as the 12768 * callback routine. 12769 * 2) A successful packet allocation is initialized and 12770 * the I/O is transported. 12771 * 3) The I/O associated with an allocation resource 12772 * failure is left on its queue to be retried via 12773 * runout or the next I/O. 12774 * 4) The I/O associated with a DMA error is removed 12775 * from the queue and failed with EIO. Processing of 12776 * the transport queues is also halted to be 12777 * restarted via runout or the next I/O. 12778 * 5) The I/O associated with a CDB size or packet 12779 * size error is removed from the queue and failed 12780 * with EIO. Processing of the transport queues is 12781 * continued. 12782 * 12783 * Note: there is no interface for canceling a runout 12784 * callback. To prevent the driver from detaching or 12785 * suspending while a runout is pending the driver 12786 * state is set to SD_STATE_RWAIT 12787 * 12788 * Note: using the scsi_init_pkt callback facility can 12789 * result in an I/O request persisting at the head of 12790 * the list which cannot be satisfied even after 12791 * multiple retries. In the future the driver may 12792 * implement some kind of maximum runout count before 12793 * failing an I/O. 12794 * 12795 * Note: the use of funcp below may seem superfluous, 12796 * but it helps warlock figure out the correct 12797 * initpkt function calls (see [s]sd.wlcmd). 12798 */ 12799 struct scsi_pkt *pktp; 12800 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12801 12802 ASSERT(bp != un->un_rqs_bp); 12803 12804 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12805 switch ((*funcp)(bp, &pktp)) { 12806 case SD_PKT_ALLOC_SUCCESS: 12807 xp->xb_pktp = pktp; 12808 SD_TRACE(SD_LOG_IO_CORE, un, 12809 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12810 pktp); 12811 goto got_pkt; 12812 12813 case SD_PKT_ALLOC_FAILURE: 12814 /* 12815 * Temporary (hopefully) resource depletion. 12816 * Since retries and RQS commands always have a 12817 * scsi_pkt allocated, these cases should never 12818 * get here. So the only cases this needs to 12819 * handle is a bp from the waitq (which we put 12820 * back onto the waitq for sdrunout), or a bp 12821 * sent as an immed_bp (which we just fail). 12822 */ 12823 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12824 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12825 12826 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12827 12828 if (bp == immed_bp) { 12829 /* 12830 * If SD_XB_DMA_FREED is clear, then 12831 * this is a failure to allocate a 12832 * scsi_pkt, and we must fail the 12833 * command. 12834 */ 12835 if ((xp->xb_pkt_flags & 12836 SD_XB_DMA_FREED) == 0) { 12837 break; 12838 } 12839 12840 /* 12841 * If this immediate command is NOT our 12842 * un_retry_bp, then we must fail it. 12843 */ 12844 if (bp != un->un_retry_bp) { 12845 break; 12846 } 12847 12848 /* 12849 * We get here if this cmd is our 12850 * un_retry_bp that was DMAFREED, but 12851 * scsi_init_pkt() failed to reallocate 12852 * DMA resources when we attempted to 12853 * retry it. This can happen when an 12854 * mpxio failover is in progress, but 12855 * we don't want to just fail the 12856 * command in this case. 12857 * 12858 * Use timeout(9F) to restart it after 12859 * a 100ms delay. We don't want to 12860 * let sdrunout() restart it, because 12861 * sdrunout() is just supposed to start 12862 * commands that are sitting on the 12863 * wait queue. The un_retry_bp stays 12864 * set until the command completes, but 12865 * sdrunout can be called many times 12866 * before that happens. Since sdrunout 12867 * cannot tell if the un_retry_bp is 12868 * already in the transport, it could 12869 * end up calling scsi_transport() for 12870 * the un_retry_bp multiple times. 12871 * 12872 * Also: don't schedule the callback 12873 * if some other callback is already 12874 * pending. 12875 */ 12876 if (un->un_retry_statp == NULL) { 12877 /* 12878 * restore the kstat pointer to 12879 * keep kstat counts coherent 12880 * when we do retry the command. 12881 */ 12882 un->un_retry_statp = 12883 saved_statp; 12884 } 12885 12886 if ((un->un_startstop_timeid == NULL) && 12887 (un->un_retry_timeid == NULL) && 12888 (un->un_direct_priority_timeid == 12889 NULL)) { 12890 12891 un->un_retry_timeid = 12892 timeout( 12893 sd_start_retry_command, 12894 un, SD_RESTART_TIMEOUT); 12895 } 12896 goto exit; 12897 } 12898 12899 #else 12900 if (bp == immed_bp) { 12901 break; /* Just fail the command */ 12902 } 12903 #endif 12904 12905 /* Add the buf back to the head of the waitq */ 12906 bp->av_forw = un->un_waitq_headp; 12907 un->un_waitq_headp = bp; 12908 if (un->un_waitq_tailp == NULL) { 12909 un->un_waitq_tailp = bp; 12910 } 12911 goto exit; 12912 12913 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12914 /* 12915 * HBA DMA resource failure. Fail the command 12916 * and continue processing of the queues. 12917 */ 12918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12919 "sd_start_cmds: " 12920 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12921 break; 12922 12923 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12924 /* 12925 * Note:x86: Partial DMA mapping not supported 12926 * for USCSI commands, and all the needed DMA 12927 * resources were not allocated. 12928 */ 12929 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12930 "sd_start_cmds: " 12931 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12932 break; 12933 12934 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12935 /* 12936 * Note:x86: Request cannot fit into CDB based 12937 * on lba and len. 12938 */ 12939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12940 "sd_start_cmds: " 12941 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 12942 break; 12943 12944 default: 12945 /* Should NEVER get here! */ 12946 panic("scsi_initpkt error"); 12947 /*NOTREACHED*/ 12948 } 12949 12950 /* 12951 * Fatal error in allocating a scsi_pkt for this buf. 12952 * Update kstats & return the buf with an error code. 12953 * We must use sd_return_failed_command_no_restart() to 12954 * avoid a recursive call back into sd_start_cmds(). 12955 * However this also means that we must keep processing 12956 * the waitq here in order to avoid stalling. 12957 */ 12958 if (statp == kstat_waitq_to_runq) { 12959 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 12960 } 12961 sd_return_failed_command_no_restart(un, bp, EIO); 12962 if (bp == immed_bp) { 12963 /* immed_bp is gone by now, so clear this */ 12964 immed_bp = NULL; 12965 } 12966 continue; 12967 } 12968 got_pkt: 12969 if (bp == immed_bp) { 12970 /* goto the head of the class.... */ 12971 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 12972 } 12973 12974 un->un_ncmds_in_transport++; 12975 SD_UPDATE_KSTATS(un, statp, bp); 12976 12977 /* 12978 * Call scsi_transport() to send the command to the target. 12979 * According to SCSA architecture, we must drop the mutex here 12980 * before calling scsi_transport() in order to avoid deadlock. 12981 * Note that the scsi_pkt's completion routine can be executed 12982 * (from interrupt context) even before the call to 12983 * scsi_transport() returns. 12984 */ 12985 SD_TRACE(SD_LOG_IO_CORE, un, 12986 "sd_start_cmds: calling scsi_transport()\n"); 12987 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 12988 12989 mutex_exit(SD_MUTEX(un)); 12990 rval = scsi_transport(xp->xb_pktp); 12991 mutex_enter(SD_MUTEX(un)); 12992 12993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12994 "sd_start_cmds: scsi_transport() returned %d\n", rval); 12995 12996 switch (rval) { 12997 case TRAN_ACCEPT: 12998 /* Clear this with every pkt accepted by the HBA */ 12999 un->un_tran_fatal_count = 0; 13000 break; /* Success; try the next cmd (if any) */ 13001 13002 case TRAN_BUSY: 13003 un->un_ncmds_in_transport--; 13004 ASSERT(un->un_ncmds_in_transport >= 0); 13005 13006 /* 13007 * Don't retry request sense, the sense data 13008 * is lost when another request is sent. 13009 * Free up the rqs buf and retry 13010 * the original failed cmd. Update kstat. 13011 */ 13012 if (bp == un->un_rqs_bp) { 13013 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13014 bp = sd_mark_rqs_idle(un, xp); 13015 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13016 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13017 kstat_waitq_enter); 13018 goto exit; 13019 } 13020 13021 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13022 /* 13023 * Free the DMA resources for the scsi_pkt. This will 13024 * allow mpxio to select another path the next time 13025 * we call scsi_transport() with this scsi_pkt. 13026 * See sdintr() for the rationalization behind this. 13027 */ 13028 if ((un->un_f_is_fibre == TRUE) && 13029 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13030 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13031 scsi_dmafree(xp->xb_pktp); 13032 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13033 } 13034 #endif 13035 13036 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13037 /* 13038 * Commands that are SD_PATH_DIRECT_PRIORITY 13039 * are for error recovery situations. These do 13040 * not use the normal command waitq, so if they 13041 * get a TRAN_BUSY we cannot put them back onto 13042 * the waitq for later retry. One possible 13043 * problem is that there could already be some 13044 * other command on un_retry_bp that is waiting 13045 * for this one to complete, so we would be 13046 * deadlocked if we put this command back onto 13047 * the waitq for later retry (since un_retry_bp 13048 * must complete before the driver gets back to 13049 * commands on the waitq). 13050 * 13051 * To avoid deadlock we must schedule a callback 13052 * that will restart this command after a set 13053 * interval. This should keep retrying for as 13054 * long as the underlying transport keeps 13055 * returning TRAN_BUSY (just like for other 13056 * commands). Use the same timeout interval as 13057 * for the ordinary TRAN_BUSY retry. 13058 */ 13059 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13060 "sd_start_cmds: scsi_transport() returned " 13061 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13062 13063 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13064 un->un_direct_priority_timeid = 13065 timeout(sd_start_direct_priority_command, 13066 bp, SD_BSY_TIMEOUT / 500); 13067 13068 goto exit; 13069 } 13070 13071 /* 13072 * For TRAN_BUSY, we want to reduce the throttle value, 13073 * unless we are retrying a command. 13074 */ 13075 if (bp != un->un_retry_bp) { 13076 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13077 } 13078 13079 /* 13080 * Set up the bp to be tried again 10 ms later. 13081 * Note:x86: Is there a timeout value in the sd_lun 13082 * for this condition? 13083 */ 13084 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13085 kstat_runq_back_to_waitq); 13086 goto exit; 13087 13088 case TRAN_FATAL_ERROR: 13089 un->un_tran_fatal_count++; 13090 /* FALLTHRU */ 13091 13092 case TRAN_BADPKT: 13093 default: 13094 un->un_ncmds_in_transport--; 13095 ASSERT(un->un_ncmds_in_transport >= 0); 13096 13097 /* 13098 * If this is our REQUEST SENSE command with a 13099 * transport error, we must get back the pointers 13100 * to the original buf, and mark the REQUEST 13101 * SENSE command as "available". 13102 */ 13103 if (bp == un->un_rqs_bp) { 13104 bp = sd_mark_rqs_idle(un, xp); 13105 xp = SD_GET_XBUF(bp); 13106 } else { 13107 /* 13108 * Legacy behavior: do not update transport 13109 * error count for request sense commands. 13110 */ 13111 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13112 } 13113 13114 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13115 sd_print_transport_rejected_message(un, xp, rval); 13116 13117 /* 13118 * We must use sd_return_failed_command_no_restart() to 13119 * avoid a recursive call back into sd_start_cmds(). 13120 * However this also means that we must keep processing 13121 * the waitq here in order to avoid stalling. 13122 */ 13123 sd_return_failed_command_no_restart(un, bp, EIO); 13124 13125 /* 13126 * Notify any threads waiting in sd_ddi_suspend() that 13127 * a command completion has occurred. 13128 */ 13129 if (un->un_state == SD_STATE_SUSPENDED) { 13130 cv_broadcast(&un->un_disk_busy_cv); 13131 } 13132 13133 if (bp == immed_bp) { 13134 /* immed_bp is gone by now, so clear this */ 13135 immed_bp = NULL; 13136 } 13137 break; 13138 } 13139 13140 } while (immed_bp == NULL); 13141 13142 exit: 13143 ASSERT(mutex_owned(SD_MUTEX(un))); 13144 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13145 } 13146 13147 13148 /* 13149 * Function: sd_return_command 13150 * 13151 * Description: Returns a command to its originator (with or without an 13152 * error). Also starts commands waiting to be transported 13153 * to the target. 13154 * 13155 * Context: May be called from interrupt, kernel, or timeout context 13156 */ 13157 13158 static void 13159 sd_return_command(struct sd_lun *un, struct buf *bp) 13160 { 13161 struct sd_xbuf *xp; 13162 #if defined(__i386) || defined(__amd64) 13163 struct scsi_pkt *pktp; 13164 #endif 13165 13166 ASSERT(bp != NULL); 13167 ASSERT(un != NULL); 13168 ASSERT(mutex_owned(SD_MUTEX(un))); 13169 ASSERT(bp != un->un_rqs_bp); 13170 xp = SD_GET_XBUF(bp); 13171 ASSERT(xp != NULL); 13172 13173 #if defined(__i386) || defined(__amd64) 13174 pktp = SD_GET_PKTP(bp); 13175 #endif 13176 13177 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13178 13179 #if defined(__i386) || defined(__amd64) 13180 /* 13181 * Note:x86: check for the "sdrestart failed" case. 13182 */ 13183 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13184 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13185 (xp->xb_pktp->pkt_resid == 0)) { 13186 13187 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13188 /* 13189 * Successfully set up next portion of cmd 13190 * transfer, try sending it 13191 */ 13192 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13193 NULL, NULL, 0, (clock_t)0, NULL); 13194 sd_start_cmds(un, NULL); 13195 return; /* Note:x86: need a return here? */ 13196 } 13197 } 13198 #endif 13199 13200 /* 13201 * If this is the failfast bp, clear it from un_failfast_bp. This 13202 * can happen if upon being re-tried the failfast bp either 13203 * succeeded or encountered another error (possibly even a different 13204 * error than the one that precipitated the failfast state, but in 13205 * that case it would have had to exhaust retries as well). Regardless, 13206 * this should not occur whenever the instance is in the active 13207 * failfast state. 13208 */ 13209 if (bp == un->un_failfast_bp) { 13210 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13211 un->un_failfast_bp = NULL; 13212 } 13213 13214 /* 13215 * Clear the failfast state upon successful completion of ANY cmd. 13216 */ 13217 if (bp->b_error == 0) { 13218 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13219 } 13220 13221 /* 13222 * This is used if the command was retried one or more times. Show that 13223 * we are done with it, and allow processing of the waitq to resume. 13224 */ 13225 if (bp == un->un_retry_bp) { 13226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13227 "sd_return_command: un:0x%p: " 13228 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13229 un->un_retry_bp = NULL; 13230 un->un_retry_statp = NULL; 13231 } 13232 13233 SD_UPDATE_RDWR_STATS(un, bp); 13234 SD_UPDATE_PARTITION_STATS(un, bp); 13235 13236 switch (un->un_state) { 13237 case SD_STATE_SUSPENDED: 13238 /* 13239 * Notify any threads waiting in sd_ddi_suspend() that 13240 * a command completion has occurred. 13241 */ 13242 cv_broadcast(&un->un_disk_busy_cv); 13243 break; 13244 default: 13245 sd_start_cmds(un, NULL); 13246 break; 13247 } 13248 13249 /* Return this command up the iodone chain to its originator. */ 13250 mutex_exit(SD_MUTEX(un)); 13251 13252 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13253 xp->xb_pktp = NULL; 13254 13255 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13256 13257 ASSERT(!mutex_owned(SD_MUTEX(un))); 13258 mutex_enter(SD_MUTEX(un)); 13259 13260 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13261 } 13262 13263 13264 /* 13265 * Function: sd_return_failed_command 13266 * 13267 * Description: Command completion when an error occurred. 13268 * 13269 * Context: May be called from interrupt context 13270 */ 13271 13272 static void 13273 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13274 { 13275 ASSERT(bp != NULL); 13276 ASSERT(un != NULL); 13277 ASSERT(mutex_owned(SD_MUTEX(un))); 13278 13279 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13280 "sd_return_failed_command: entry\n"); 13281 13282 /* 13283 * b_resid could already be nonzero due to a partial data 13284 * transfer, so do not change it here. 13285 */ 13286 SD_BIOERROR(bp, errcode); 13287 13288 sd_return_command(un, bp); 13289 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13290 "sd_return_failed_command: exit\n"); 13291 } 13292 13293 13294 /* 13295 * Function: sd_return_failed_command_no_restart 13296 * 13297 * Description: Same as sd_return_failed_command, but ensures that no 13298 * call back into sd_start_cmds will be issued. 13299 * 13300 * Context: May be called from interrupt context 13301 */ 13302 13303 static void 13304 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13305 int errcode) 13306 { 13307 struct sd_xbuf *xp; 13308 13309 ASSERT(bp != NULL); 13310 ASSERT(un != NULL); 13311 ASSERT(mutex_owned(SD_MUTEX(un))); 13312 xp = SD_GET_XBUF(bp); 13313 ASSERT(xp != NULL); 13314 ASSERT(errcode != 0); 13315 13316 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13317 "sd_return_failed_command_no_restart: entry\n"); 13318 13319 /* 13320 * b_resid could already be nonzero due to a partial data 13321 * transfer, so do not change it here. 13322 */ 13323 SD_BIOERROR(bp, errcode); 13324 13325 /* 13326 * If this is the failfast bp, clear it. This can happen if the 13327 * failfast bp encounterd a fatal error when we attempted to 13328 * re-try it (such as a scsi_transport(9F) failure). However 13329 * we should NOT be in an active failfast state if the failfast 13330 * bp is not NULL. 13331 */ 13332 if (bp == un->un_failfast_bp) { 13333 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13334 un->un_failfast_bp = NULL; 13335 } 13336 13337 if (bp == un->un_retry_bp) { 13338 /* 13339 * This command was retried one or more times. Show that we are 13340 * done with it, and allow processing of the waitq to resume. 13341 */ 13342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13343 "sd_return_failed_command_no_restart: " 13344 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13345 un->un_retry_bp = NULL; 13346 un->un_retry_statp = NULL; 13347 } 13348 13349 SD_UPDATE_RDWR_STATS(un, bp); 13350 SD_UPDATE_PARTITION_STATS(un, bp); 13351 13352 mutex_exit(SD_MUTEX(un)); 13353 13354 if (xp->xb_pktp != NULL) { 13355 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13356 xp->xb_pktp = NULL; 13357 } 13358 13359 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13360 13361 mutex_enter(SD_MUTEX(un)); 13362 13363 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13364 "sd_return_failed_command_no_restart: exit\n"); 13365 } 13366 13367 13368 /* 13369 * Function: sd_retry_command 13370 * 13371 * Description: queue up a command for retry, or (optionally) fail it 13372 * if retry counts are exhausted. 13373 * 13374 * Arguments: un - Pointer to the sd_lun struct for the target. 13375 * 13376 * bp - Pointer to the buf for the command to be retried. 13377 * 13378 * retry_check_flag - Flag to see which (if any) of the retry 13379 * counts should be decremented/checked. If the indicated 13380 * retry count is exhausted, then the command will not be 13381 * retried; it will be failed instead. This should use a 13382 * value equal to one of the following: 13383 * 13384 * SD_RETRIES_NOCHECK 13385 * SD_RESD_RETRIES_STANDARD 13386 * SD_RETRIES_VICTIM 13387 * 13388 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13389 * if the check should be made to see of FLAG_ISOLATE is set 13390 * in the pkt. If FLAG_ISOLATE is set, then the command is 13391 * not retried, it is simply failed. 13392 * 13393 * user_funcp - Ptr to function to call before dispatching the 13394 * command. May be NULL if no action needs to be performed. 13395 * (Primarily intended for printing messages.) 13396 * 13397 * user_arg - Optional argument to be passed along to 13398 * the user_funcp call. 13399 * 13400 * failure_code - errno return code to set in the bp if the 13401 * command is going to be failed. 13402 * 13403 * retry_delay - Retry delay interval in (clock_t) units. May 13404 * be zero which indicates that the retry should be retried 13405 * immediately (ie, without an intervening delay). 13406 * 13407 * statp - Ptr to kstat function to be updated if the command 13408 * is queued for a delayed retry. May be NULL if no kstat 13409 * update is desired. 13410 * 13411 * Context: May be called from interrupt context. 13412 */ 13413 13414 static void 13415 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13416 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13417 code), void *user_arg, int failure_code, clock_t retry_delay, 13418 void (*statp)(kstat_io_t *)) 13419 { 13420 struct sd_xbuf *xp; 13421 struct scsi_pkt *pktp; 13422 13423 ASSERT(un != NULL); 13424 ASSERT(mutex_owned(SD_MUTEX(un))); 13425 ASSERT(bp != NULL); 13426 xp = SD_GET_XBUF(bp); 13427 ASSERT(xp != NULL); 13428 pktp = SD_GET_PKTP(bp); 13429 ASSERT(pktp != NULL); 13430 13431 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13432 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13433 13434 /* 13435 * If we are syncing or dumping, fail the command to avoid 13436 * recursively calling back into scsi_transport(). 13437 */ 13438 if (ddi_in_panic()) { 13439 goto fail_command_no_log; 13440 } 13441 13442 /* 13443 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13444 * log an error and fail the command. 13445 */ 13446 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13447 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13448 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13449 sd_dump_memory(un, SD_LOG_IO, "CDB", 13450 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13451 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13452 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13453 goto fail_command; 13454 } 13455 13456 /* 13457 * If we are suspended, then put the command onto head of the 13458 * wait queue since we don't want to start more commands, and 13459 * clear the un_retry_bp. Next time when we are resumed, will 13460 * handle the command in the wait queue. 13461 */ 13462 switch (un->un_state) { 13463 case SD_STATE_SUSPENDED: 13464 case SD_STATE_DUMPING: 13465 bp->av_forw = un->un_waitq_headp; 13466 un->un_waitq_headp = bp; 13467 if (un->un_waitq_tailp == NULL) { 13468 un->un_waitq_tailp = bp; 13469 } 13470 if (bp == un->un_retry_bp) { 13471 un->un_retry_bp = NULL; 13472 un->un_retry_statp = NULL; 13473 } 13474 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13475 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13476 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13477 return; 13478 default: 13479 break; 13480 } 13481 13482 /* 13483 * If the caller wants us to check FLAG_ISOLATE, then see if that 13484 * is set; if it is then we do not want to retry the command. 13485 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13486 */ 13487 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13488 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13489 goto fail_command; 13490 } 13491 } 13492 13493 13494 /* 13495 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13496 * command timeout or a selection timeout has occurred. This means 13497 * that we were unable to establish an kind of communication with 13498 * the target, and subsequent retries and/or commands are likely 13499 * to encounter similar results and take a long time to complete. 13500 * 13501 * If this is a failfast error condition, we need to update the 13502 * failfast state, even if this bp does not have B_FAILFAST set. 13503 */ 13504 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13505 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13506 ASSERT(un->un_failfast_bp == NULL); 13507 /* 13508 * If we are already in the active failfast state, and 13509 * another failfast error condition has been detected, 13510 * then fail this command if it has B_FAILFAST set. 13511 * If B_FAILFAST is clear, then maintain the legacy 13512 * behavior of retrying heroically, even tho this will 13513 * take a lot more time to fail the command. 13514 */ 13515 if (bp->b_flags & B_FAILFAST) { 13516 goto fail_command; 13517 } 13518 } else { 13519 /* 13520 * We're not in the active failfast state, but we 13521 * have a failfast error condition, so we must begin 13522 * transition to the next state. We do this regardless 13523 * of whether or not this bp has B_FAILFAST set. 13524 */ 13525 if (un->un_failfast_bp == NULL) { 13526 /* 13527 * This is the first bp to meet a failfast 13528 * condition so save it on un_failfast_bp & 13529 * do normal retry processing. Do not enter 13530 * active failfast state yet. This marks 13531 * entry into the "failfast pending" state. 13532 */ 13533 un->un_failfast_bp = bp; 13534 13535 } else if (un->un_failfast_bp == bp) { 13536 /* 13537 * This is the second time *this* bp has 13538 * encountered a failfast error condition, 13539 * so enter active failfast state & flush 13540 * queues as appropriate. 13541 */ 13542 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13543 un->un_failfast_bp = NULL; 13544 sd_failfast_flushq(un); 13545 13546 /* 13547 * Fail this bp now if B_FAILFAST set; 13548 * otherwise continue with retries. (It would 13549 * be pretty ironic if this bp succeeded on a 13550 * subsequent retry after we just flushed all 13551 * the queues). 13552 */ 13553 if (bp->b_flags & B_FAILFAST) { 13554 goto fail_command; 13555 } 13556 13557 #if !defined(lint) && !defined(__lint) 13558 } else { 13559 /* 13560 * If neither of the preceeding conditionals 13561 * was true, it means that there is some 13562 * *other* bp that has met an inital failfast 13563 * condition and is currently either being 13564 * retried or is waiting to be retried. In 13565 * that case we should perform normal retry 13566 * processing on *this* bp, since there is a 13567 * chance that the current failfast condition 13568 * is transient and recoverable. If that does 13569 * not turn out to be the case, then retries 13570 * will be cleared when the wait queue is 13571 * flushed anyway. 13572 */ 13573 #endif 13574 } 13575 } 13576 } else { 13577 /* 13578 * SD_RETRIES_FAILFAST is clear, which indicates that we 13579 * likely were able to at least establish some level of 13580 * communication with the target and subsequent commands 13581 * and/or retries are likely to get through to the target, 13582 * In this case we want to be aggressive about clearing 13583 * the failfast state. Note that this does not affect 13584 * the "failfast pending" condition. 13585 */ 13586 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13587 } 13588 13589 13590 /* 13591 * Check the specified retry count to see if we can still do 13592 * any retries with this pkt before we should fail it. 13593 */ 13594 switch (retry_check_flag & SD_RETRIES_MASK) { 13595 case SD_RETRIES_VICTIM: 13596 /* 13597 * Check the victim retry count. If exhausted, then fall 13598 * thru & check against the standard retry count. 13599 */ 13600 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13601 /* Increment count & proceed with the retry */ 13602 xp->xb_victim_retry_count++; 13603 break; 13604 } 13605 /* Victim retries exhausted, fall back to std. retries... */ 13606 /* FALLTHRU */ 13607 13608 case SD_RETRIES_STANDARD: 13609 if (xp->xb_retry_count >= un->un_retry_count) { 13610 /* Retries exhausted, fail the command */ 13611 SD_TRACE(SD_LOG_IO_CORE, un, 13612 "sd_retry_command: retries exhausted!\n"); 13613 /* 13614 * update b_resid for failed SCMD_READ & SCMD_WRITE 13615 * commands with nonzero pkt_resid. 13616 */ 13617 if ((pktp->pkt_reason == CMD_CMPLT) && 13618 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13619 (pktp->pkt_resid != 0)) { 13620 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13621 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13622 SD_UPDATE_B_RESID(bp, pktp); 13623 } 13624 } 13625 goto fail_command; 13626 } 13627 xp->xb_retry_count++; 13628 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13629 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13630 break; 13631 13632 case SD_RETRIES_UA: 13633 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13634 /* Retries exhausted, fail the command */ 13635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13636 "Unit Attention retries exhausted. " 13637 "Check the target.\n"); 13638 goto fail_command; 13639 } 13640 xp->xb_ua_retry_count++; 13641 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13642 "sd_retry_command: retry count:%d\n", 13643 xp->xb_ua_retry_count); 13644 break; 13645 13646 case SD_RETRIES_BUSY: 13647 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13648 /* Retries exhausted, fail the command */ 13649 SD_TRACE(SD_LOG_IO_CORE, un, 13650 "sd_retry_command: retries exhausted!\n"); 13651 goto fail_command; 13652 } 13653 xp->xb_retry_count++; 13654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13655 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13656 break; 13657 13658 case SD_RETRIES_NOCHECK: 13659 default: 13660 /* No retry count to check. Just proceed with the retry */ 13661 break; 13662 } 13663 13664 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13665 13666 /* 13667 * If we were given a zero timeout, we must attempt to retry the 13668 * command immediately (ie, without a delay). 13669 */ 13670 if (retry_delay == 0) { 13671 /* 13672 * Check some limiting conditions to see if we can actually 13673 * do the immediate retry. If we cannot, then we must 13674 * fall back to queueing up a delayed retry. 13675 */ 13676 if (un->un_ncmds_in_transport >= un->un_throttle) { 13677 /* 13678 * We are at the throttle limit for the target, 13679 * fall back to delayed retry. 13680 */ 13681 retry_delay = SD_BSY_TIMEOUT; 13682 statp = kstat_waitq_enter; 13683 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13684 "sd_retry_command: immed. retry hit " 13685 "throttle!\n"); 13686 } else { 13687 /* 13688 * We're clear to proceed with the immediate retry. 13689 * First call the user-provided function (if any) 13690 */ 13691 if (user_funcp != NULL) { 13692 (*user_funcp)(un, bp, user_arg, 13693 SD_IMMEDIATE_RETRY_ISSUED); 13694 #ifdef __lock_lint 13695 sd_print_incomplete_msg(un, bp, user_arg, 13696 SD_IMMEDIATE_RETRY_ISSUED); 13697 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13698 SD_IMMEDIATE_RETRY_ISSUED); 13699 sd_print_sense_failed_msg(un, bp, user_arg, 13700 SD_IMMEDIATE_RETRY_ISSUED); 13701 #endif 13702 } 13703 13704 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13705 "sd_retry_command: issuing immediate retry\n"); 13706 13707 /* 13708 * Call sd_start_cmds() to transport the command to 13709 * the target. 13710 */ 13711 sd_start_cmds(un, bp); 13712 13713 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13714 "sd_retry_command exit\n"); 13715 return; 13716 } 13717 } 13718 13719 /* 13720 * Set up to retry the command after a delay. 13721 * First call the user-provided function (if any) 13722 */ 13723 if (user_funcp != NULL) { 13724 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13725 } 13726 13727 sd_set_retry_bp(un, bp, retry_delay, statp); 13728 13729 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13730 return; 13731 13732 fail_command: 13733 13734 if (user_funcp != NULL) { 13735 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13736 } 13737 13738 fail_command_no_log: 13739 13740 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13741 "sd_retry_command: returning failed command\n"); 13742 13743 sd_return_failed_command(un, bp, failure_code); 13744 13745 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13746 } 13747 13748 13749 /* 13750 * Function: sd_set_retry_bp 13751 * 13752 * Description: Set up the given bp for retry. 13753 * 13754 * Arguments: un - ptr to associated softstate 13755 * bp - ptr to buf(9S) for the command 13756 * retry_delay - time interval before issuing retry (may be 0) 13757 * statp - optional pointer to kstat function 13758 * 13759 * Context: May be called under interrupt context 13760 */ 13761 13762 static void 13763 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13764 void (*statp)(kstat_io_t *)) 13765 { 13766 ASSERT(un != NULL); 13767 ASSERT(mutex_owned(SD_MUTEX(un))); 13768 ASSERT(bp != NULL); 13769 13770 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13771 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13772 13773 /* 13774 * Indicate that the command is being retried. This will not allow any 13775 * other commands on the wait queue to be transported to the target 13776 * until this command has been completed (success or failure). The 13777 * "retry command" is not transported to the target until the given 13778 * time delay expires, unless the user specified a 0 retry_delay. 13779 * 13780 * Note: the timeout(9F) callback routine is what actually calls 13781 * sd_start_cmds() to transport the command, with the exception of a 13782 * zero retry_delay. The only current implementor of a zero retry delay 13783 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13784 */ 13785 if (un->un_retry_bp == NULL) { 13786 ASSERT(un->un_retry_statp == NULL); 13787 un->un_retry_bp = bp; 13788 13789 /* 13790 * If the user has not specified a delay the command should 13791 * be queued and no timeout should be scheduled. 13792 */ 13793 if (retry_delay == 0) { 13794 /* 13795 * Save the kstat pointer that will be used in the 13796 * call to SD_UPDATE_KSTATS() below, so that 13797 * sd_start_cmds() can correctly decrement the waitq 13798 * count when it is time to transport this command. 13799 */ 13800 un->un_retry_statp = statp; 13801 goto done; 13802 } 13803 } 13804 13805 if (un->un_retry_bp == bp) { 13806 /* 13807 * Save the kstat pointer that will be used in the call to 13808 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13809 * correctly decrement the waitq count when it is time to 13810 * transport this command. 13811 */ 13812 un->un_retry_statp = statp; 13813 13814 /* 13815 * Schedule a timeout if: 13816 * 1) The user has specified a delay. 13817 * 2) There is not a START_STOP_UNIT callback pending. 13818 * 13819 * If no delay has been specified, then it is up to the caller 13820 * to ensure that IO processing continues without stalling. 13821 * Effectively, this means that the caller will issue the 13822 * required call to sd_start_cmds(). The START_STOP_UNIT 13823 * callback does this after the START STOP UNIT command has 13824 * completed. In either of these cases we should not schedule 13825 * a timeout callback here. Also don't schedule the timeout if 13826 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13827 */ 13828 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13829 (un->un_direct_priority_timeid == NULL)) { 13830 un->un_retry_timeid = 13831 timeout(sd_start_retry_command, un, retry_delay); 13832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13833 "sd_set_retry_bp: setting timeout: un: 0x%p" 13834 " bp:0x%p un_retry_timeid:0x%p\n", 13835 un, bp, un->un_retry_timeid); 13836 } 13837 } else { 13838 /* 13839 * We only get in here if there is already another command 13840 * waiting to be retried. In this case, we just put the 13841 * given command onto the wait queue, so it can be transported 13842 * after the current retry command has completed. 13843 * 13844 * Also we have to make sure that if the command at the head 13845 * of the wait queue is the un_failfast_bp, that we do not 13846 * put ahead of it any other commands that are to be retried. 13847 */ 13848 if ((un->un_failfast_bp != NULL) && 13849 (un->un_failfast_bp == un->un_waitq_headp)) { 13850 /* 13851 * Enqueue this command AFTER the first command on 13852 * the wait queue (which is also un_failfast_bp). 13853 */ 13854 bp->av_forw = un->un_waitq_headp->av_forw; 13855 un->un_waitq_headp->av_forw = bp; 13856 if (un->un_waitq_headp == un->un_waitq_tailp) { 13857 un->un_waitq_tailp = bp; 13858 } 13859 } else { 13860 /* Enqueue this command at the head of the waitq. */ 13861 bp->av_forw = un->un_waitq_headp; 13862 un->un_waitq_headp = bp; 13863 if (un->un_waitq_tailp == NULL) { 13864 un->un_waitq_tailp = bp; 13865 } 13866 } 13867 13868 if (statp == NULL) { 13869 statp = kstat_waitq_enter; 13870 } 13871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13872 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13873 } 13874 13875 done: 13876 if (statp != NULL) { 13877 SD_UPDATE_KSTATS(un, statp, bp); 13878 } 13879 13880 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13881 "sd_set_retry_bp: exit un:0x%p\n", un); 13882 } 13883 13884 13885 /* 13886 * Function: sd_start_retry_command 13887 * 13888 * Description: Start the command that has been waiting on the target's 13889 * retry queue. Called from timeout(9F) context after the 13890 * retry delay interval has expired. 13891 * 13892 * Arguments: arg - pointer to associated softstate for the device. 13893 * 13894 * Context: timeout(9F) thread context. May not sleep. 13895 */ 13896 13897 static void 13898 sd_start_retry_command(void *arg) 13899 { 13900 struct sd_lun *un = arg; 13901 13902 ASSERT(un != NULL); 13903 ASSERT(!mutex_owned(SD_MUTEX(un))); 13904 13905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13906 "sd_start_retry_command: entry\n"); 13907 13908 mutex_enter(SD_MUTEX(un)); 13909 13910 un->un_retry_timeid = NULL; 13911 13912 if (un->un_retry_bp != NULL) { 13913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13914 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13915 un, un->un_retry_bp); 13916 sd_start_cmds(un, un->un_retry_bp); 13917 } 13918 13919 mutex_exit(SD_MUTEX(un)); 13920 13921 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13922 "sd_start_retry_command: exit\n"); 13923 } 13924 13925 13926 /* 13927 * Function: sd_start_direct_priority_command 13928 * 13929 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13930 * received TRAN_BUSY when we called scsi_transport() to send it 13931 * to the underlying HBA. This function is called from timeout(9F) 13932 * context after the delay interval has expired. 13933 * 13934 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13935 * 13936 * Context: timeout(9F) thread context. May not sleep. 13937 */ 13938 13939 static void 13940 sd_start_direct_priority_command(void *arg) 13941 { 13942 struct buf *priority_bp = arg; 13943 struct sd_lun *un; 13944 13945 ASSERT(priority_bp != NULL); 13946 un = SD_GET_UN(priority_bp); 13947 ASSERT(un != NULL); 13948 ASSERT(!mutex_owned(SD_MUTEX(un))); 13949 13950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13951 "sd_start_direct_priority_command: entry\n"); 13952 13953 mutex_enter(SD_MUTEX(un)); 13954 un->un_direct_priority_timeid = NULL; 13955 sd_start_cmds(un, priority_bp); 13956 mutex_exit(SD_MUTEX(un)); 13957 13958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13959 "sd_start_direct_priority_command: exit\n"); 13960 } 13961 13962 13963 /* 13964 * Function: sd_send_request_sense_command 13965 * 13966 * Description: Sends a REQUEST SENSE command to the target 13967 * 13968 * Context: May be called from interrupt context. 13969 */ 13970 13971 static void 13972 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 13973 struct scsi_pkt *pktp) 13974 { 13975 ASSERT(bp != NULL); 13976 ASSERT(un != NULL); 13977 ASSERT(mutex_owned(SD_MUTEX(un))); 13978 13979 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 13980 "entry: buf:0x%p\n", bp); 13981 13982 /* 13983 * If we are syncing or dumping, then fail the command to avoid a 13984 * recursive callback into scsi_transport(). Also fail the command 13985 * if we are suspended (legacy behavior). 13986 */ 13987 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 13988 (un->un_state == SD_STATE_DUMPING)) { 13989 sd_return_failed_command(un, bp, EIO); 13990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13991 "sd_send_request_sense_command: syncing/dumping, exit\n"); 13992 return; 13993 } 13994 13995 /* 13996 * Retry the failed command and don't issue the request sense if: 13997 * 1) the sense buf is busy 13998 * 2) we have 1 or more outstanding commands on the target 13999 * (the sense data will be cleared or invalidated any way) 14000 * 14001 * Note: There could be an issue with not checking a retry limit here, 14002 * the problem is determining which retry limit to check. 14003 */ 14004 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14005 /* Don't retry if the command is flagged as non-retryable */ 14006 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14007 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14008 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14009 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14010 "sd_send_request_sense_command: " 14011 "at full throttle, retrying exit\n"); 14012 } else { 14013 sd_return_failed_command(un, bp, EIO); 14014 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14015 "sd_send_request_sense_command: " 14016 "at full throttle, non-retryable exit\n"); 14017 } 14018 return; 14019 } 14020 14021 sd_mark_rqs_busy(un, bp); 14022 sd_start_cmds(un, un->un_rqs_bp); 14023 14024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14025 "sd_send_request_sense_command: exit\n"); 14026 } 14027 14028 14029 /* 14030 * Function: sd_mark_rqs_busy 14031 * 14032 * Description: Indicate that the request sense bp for this instance is 14033 * in use. 14034 * 14035 * Context: May be called under interrupt context 14036 */ 14037 14038 static void 14039 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14040 { 14041 struct sd_xbuf *sense_xp; 14042 14043 ASSERT(un != NULL); 14044 ASSERT(bp != NULL); 14045 ASSERT(mutex_owned(SD_MUTEX(un))); 14046 ASSERT(un->un_sense_isbusy == 0); 14047 14048 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14049 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14050 14051 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14052 ASSERT(sense_xp != NULL); 14053 14054 SD_INFO(SD_LOG_IO, un, 14055 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14056 14057 ASSERT(sense_xp->xb_pktp != NULL); 14058 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14059 == (FLAG_SENSING | FLAG_HEAD)); 14060 14061 un->un_sense_isbusy = 1; 14062 un->un_rqs_bp->b_resid = 0; 14063 sense_xp->xb_pktp->pkt_resid = 0; 14064 sense_xp->xb_pktp->pkt_reason = 0; 14065 14066 /* So we can get back the bp at interrupt time! */ 14067 sense_xp->xb_sense_bp = bp; 14068 14069 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14070 14071 /* 14072 * Mark this buf as awaiting sense data. (This is already set in 14073 * the pkt_flags for the RQS packet.) 14074 */ 14075 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14076 14077 sense_xp->xb_retry_count = 0; 14078 sense_xp->xb_victim_retry_count = 0; 14079 sense_xp->xb_ua_retry_count = 0; 14080 sense_xp->xb_dma_resid = 0; 14081 14082 /* Clean up the fields for auto-request sense */ 14083 sense_xp->xb_sense_status = 0; 14084 sense_xp->xb_sense_state = 0; 14085 sense_xp->xb_sense_resid = 0; 14086 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14087 14088 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14089 } 14090 14091 14092 /* 14093 * Function: sd_mark_rqs_idle 14094 * 14095 * Description: SD_MUTEX must be held continuously through this routine 14096 * to prevent reuse of the rqs struct before the caller can 14097 * complete it's processing. 14098 * 14099 * Return Code: Pointer to the RQS buf 14100 * 14101 * Context: May be called under interrupt context 14102 */ 14103 14104 static struct buf * 14105 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14106 { 14107 struct buf *bp; 14108 ASSERT(un != NULL); 14109 ASSERT(sense_xp != NULL); 14110 ASSERT(mutex_owned(SD_MUTEX(un))); 14111 ASSERT(un->un_sense_isbusy != 0); 14112 14113 un->un_sense_isbusy = 0; 14114 bp = sense_xp->xb_sense_bp; 14115 sense_xp->xb_sense_bp = NULL; 14116 14117 /* This pkt is no longer interested in getting sense data */ 14118 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14119 14120 return (bp); 14121 } 14122 14123 14124 14125 /* 14126 * Function: sd_alloc_rqs 14127 * 14128 * Description: Set up the unit to receive auto request sense data 14129 * 14130 * Return Code: DDI_SUCCESS or DDI_FAILURE 14131 * 14132 * Context: Called under attach(9E) context 14133 */ 14134 14135 static int 14136 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14137 { 14138 struct sd_xbuf *xp; 14139 14140 ASSERT(un != NULL); 14141 ASSERT(!mutex_owned(SD_MUTEX(un))); 14142 ASSERT(un->un_rqs_bp == NULL); 14143 ASSERT(un->un_rqs_pktp == NULL); 14144 14145 /* 14146 * First allocate the required buf and scsi_pkt structs, then set up 14147 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14148 */ 14149 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14150 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14151 if (un->un_rqs_bp == NULL) { 14152 return (DDI_FAILURE); 14153 } 14154 14155 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14156 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14157 14158 if (un->un_rqs_pktp == NULL) { 14159 sd_free_rqs(un); 14160 return (DDI_FAILURE); 14161 } 14162 14163 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14164 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14165 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14166 14167 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14168 14169 /* Set up the other needed members in the ARQ scsi_pkt. */ 14170 un->un_rqs_pktp->pkt_comp = sdintr; 14171 un->un_rqs_pktp->pkt_time = sd_io_time; 14172 un->un_rqs_pktp->pkt_flags |= 14173 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14174 14175 /* 14176 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14177 * provide any intpkt, destroypkt routines as we take care of 14178 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14179 */ 14180 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14181 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14182 xp->xb_pktp = un->un_rqs_pktp; 14183 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14184 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14185 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14186 14187 /* 14188 * Save the pointer to the request sense private bp so it can 14189 * be retrieved in sdintr. 14190 */ 14191 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14192 ASSERT(un->un_rqs_bp->b_private == xp); 14193 14194 /* 14195 * See if the HBA supports auto-request sense for the specified 14196 * target/lun. If it does, then try to enable it (if not already 14197 * enabled). 14198 * 14199 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14200 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14201 * return success. However, in both of these cases ARQ is always 14202 * enabled and scsi_ifgetcap will always return true. The best approach 14203 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14204 * 14205 * The 3rd case is the HBA (adp) always return enabled on 14206 * scsi_ifgetgetcap even when it's not enable, the best approach 14207 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14208 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14209 */ 14210 14211 if (un->un_f_is_fibre == TRUE) { 14212 un->un_f_arq_enabled = TRUE; 14213 } else { 14214 #if defined(__i386) || defined(__amd64) 14215 /* 14216 * Circumvent the Adaptec bug, remove this code when 14217 * the bug is fixed 14218 */ 14219 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14220 #endif 14221 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14222 case 0: 14223 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14224 "sd_alloc_rqs: HBA supports ARQ\n"); 14225 /* 14226 * ARQ is supported by this HBA but currently is not 14227 * enabled. Attempt to enable it and if successful then 14228 * mark this instance as ARQ enabled. 14229 */ 14230 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14231 == 1) { 14232 /* Successfully enabled ARQ in the HBA */ 14233 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14234 "sd_alloc_rqs: ARQ enabled\n"); 14235 un->un_f_arq_enabled = TRUE; 14236 } else { 14237 /* Could not enable ARQ in the HBA */ 14238 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14239 "sd_alloc_rqs: failed ARQ enable\n"); 14240 un->un_f_arq_enabled = FALSE; 14241 } 14242 break; 14243 case 1: 14244 /* 14245 * ARQ is supported by this HBA and is already enabled. 14246 * Just mark ARQ as enabled for this instance. 14247 */ 14248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14249 "sd_alloc_rqs: ARQ already enabled\n"); 14250 un->un_f_arq_enabled = TRUE; 14251 break; 14252 default: 14253 /* 14254 * ARQ is not supported by this HBA; disable it for this 14255 * instance. 14256 */ 14257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14258 "sd_alloc_rqs: HBA does not support ARQ\n"); 14259 un->un_f_arq_enabled = FALSE; 14260 break; 14261 } 14262 } 14263 14264 return (DDI_SUCCESS); 14265 } 14266 14267 14268 /* 14269 * Function: sd_free_rqs 14270 * 14271 * Description: Cleanup for the pre-instance RQS command. 14272 * 14273 * Context: Kernel thread context 14274 */ 14275 14276 static void 14277 sd_free_rqs(struct sd_lun *un) 14278 { 14279 ASSERT(un != NULL); 14280 14281 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14282 14283 /* 14284 * If consistent memory is bound to a scsi_pkt, the pkt 14285 * has to be destroyed *before* freeing the consistent memory. 14286 * Don't change the sequence of this operations. 14287 * scsi_destroy_pkt() might access memory, which isn't allowed, 14288 * after it was freed in scsi_free_consistent_buf(). 14289 */ 14290 if (un->un_rqs_pktp != NULL) { 14291 scsi_destroy_pkt(un->un_rqs_pktp); 14292 un->un_rqs_pktp = NULL; 14293 } 14294 14295 if (un->un_rqs_bp != NULL) { 14296 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14297 scsi_free_consistent_buf(un->un_rqs_bp); 14298 un->un_rqs_bp = NULL; 14299 } 14300 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14301 } 14302 14303 14304 14305 /* 14306 * Function: sd_reduce_throttle 14307 * 14308 * Description: Reduces the maximum # of outstanding commands on a 14309 * target to the current number of outstanding commands. 14310 * Queues a tiemout(9F) callback to restore the limit 14311 * after a specified interval has elapsed. 14312 * Typically used when we get a TRAN_BUSY return code 14313 * back from scsi_transport(). 14314 * 14315 * Arguments: un - ptr to the sd_lun softstate struct 14316 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14317 * 14318 * Context: May be called from interrupt context 14319 */ 14320 14321 static void 14322 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14323 { 14324 ASSERT(un != NULL); 14325 ASSERT(mutex_owned(SD_MUTEX(un))); 14326 ASSERT(un->un_ncmds_in_transport >= 0); 14327 14328 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14329 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14330 un, un->un_throttle, un->un_ncmds_in_transport); 14331 14332 if (un->un_throttle > 1) { 14333 if (un->un_f_use_adaptive_throttle == TRUE) { 14334 switch (throttle_type) { 14335 case SD_THROTTLE_TRAN_BUSY: 14336 if (un->un_busy_throttle == 0) { 14337 un->un_busy_throttle = un->un_throttle; 14338 } 14339 break; 14340 case SD_THROTTLE_QFULL: 14341 un->un_busy_throttle = 0; 14342 break; 14343 default: 14344 ASSERT(FALSE); 14345 } 14346 14347 if (un->un_ncmds_in_transport > 0) { 14348 un->un_throttle = un->un_ncmds_in_transport; 14349 } 14350 14351 } else { 14352 if (un->un_ncmds_in_transport == 0) { 14353 un->un_throttle = 1; 14354 } else { 14355 un->un_throttle = un->un_ncmds_in_transport; 14356 } 14357 } 14358 } 14359 14360 /* Reschedule the timeout if none is currently active */ 14361 if (un->un_reset_throttle_timeid == NULL) { 14362 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14363 un, SD_THROTTLE_RESET_INTERVAL); 14364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14365 "sd_reduce_throttle: timeout scheduled!\n"); 14366 } 14367 14368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14369 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14370 } 14371 14372 14373 14374 /* 14375 * Function: sd_restore_throttle 14376 * 14377 * Description: Callback function for timeout(9F). Resets the current 14378 * value of un->un_throttle to its default. 14379 * 14380 * Arguments: arg - pointer to associated softstate for the device. 14381 * 14382 * Context: May be called from interrupt context 14383 */ 14384 14385 static void 14386 sd_restore_throttle(void *arg) 14387 { 14388 struct sd_lun *un = arg; 14389 14390 ASSERT(un != NULL); 14391 ASSERT(!mutex_owned(SD_MUTEX(un))); 14392 14393 mutex_enter(SD_MUTEX(un)); 14394 14395 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14396 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14397 14398 un->un_reset_throttle_timeid = NULL; 14399 14400 if (un->un_f_use_adaptive_throttle == TRUE) { 14401 /* 14402 * If un_busy_throttle is nonzero, then it contains the 14403 * value that un_throttle was when we got a TRAN_BUSY back 14404 * from scsi_transport(). We want to revert back to this 14405 * value. 14406 * 14407 * In the QFULL case, the throttle limit will incrementally 14408 * increase until it reaches max throttle. 14409 */ 14410 if (un->un_busy_throttle > 0) { 14411 un->un_throttle = un->un_busy_throttle; 14412 un->un_busy_throttle = 0; 14413 } else { 14414 /* 14415 * increase throttle by 10% open gate slowly, schedule 14416 * another restore if saved throttle has not been 14417 * reached 14418 */ 14419 short throttle; 14420 if (sd_qfull_throttle_enable) { 14421 throttle = un->un_throttle + 14422 max((un->un_throttle / 10), 1); 14423 un->un_throttle = 14424 (throttle < un->un_saved_throttle) ? 14425 throttle : un->un_saved_throttle; 14426 if (un->un_throttle < un->un_saved_throttle) { 14427 un->un_reset_throttle_timeid = 14428 timeout(sd_restore_throttle, 14429 un, 14430 SD_QFULL_THROTTLE_RESET_INTERVAL); 14431 } 14432 } 14433 } 14434 14435 /* 14436 * If un_throttle has fallen below the low-water mark, we 14437 * restore the maximum value here (and allow it to ratchet 14438 * down again if necessary). 14439 */ 14440 if (un->un_throttle < un->un_min_throttle) { 14441 un->un_throttle = un->un_saved_throttle; 14442 } 14443 } else { 14444 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14445 "restoring limit from 0x%x to 0x%x\n", 14446 un->un_throttle, un->un_saved_throttle); 14447 un->un_throttle = un->un_saved_throttle; 14448 } 14449 14450 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14451 "sd_restore_throttle: calling sd_start_cmds!\n"); 14452 14453 sd_start_cmds(un, NULL); 14454 14455 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14456 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14457 un, un->un_throttle); 14458 14459 mutex_exit(SD_MUTEX(un)); 14460 14461 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14462 } 14463 14464 /* 14465 * Function: sdrunout 14466 * 14467 * Description: Callback routine for scsi_init_pkt when a resource allocation 14468 * fails. 14469 * 14470 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14471 * soft state instance. 14472 * 14473 * Return Code: The scsi_init_pkt routine allows for the callback function to 14474 * return a 0 indicating the callback should be rescheduled or a 1 14475 * indicating not to reschedule. This routine always returns 1 14476 * because the driver always provides a callback function to 14477 * scsi_init_pkt. This results in a callback always being scheduled 14478 * (via the scsi_init_pkt callback implementation) if a resource 14479 * failure occurs. 14480 * 14481 * Context: This callback function may not block or call routines that block 14482 * 14483 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14484 * request persisting at the head of the list which cannot be 14485 * satisfied even after multiple retries. In the future the driver 14486 * may implement some time of maximum runout count before failing 14487 * an I/O. 14488 */ 14489 14490 static int 14491 sdrunout(caddr_t arg) 14492 { 14493 struct sd_lun *un = (struct sd_lun *)arg; 14494 14495 ASSERT(un != NULL); 14496 ASSERT(!mutex_owned(SD_MUTEX(un))); 14497 14498 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14499 14500 mutex_enter(SD_MUTEX(un)); 14501 sd_start_cmds(un, NULL); 14502 mutex_exit(SD_MUTEX(un)); 14503 /* 14504 * This callback routine always returns 1 (i.e. do not reschedule) 14505 * because we always specify sdrunout as the callback handler for 14506 * scsi_init_pkt inside the call to sd_start_cmds. 14507 */ 14508 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14509 return (1); 14510 } 14511 14512 14513 /* 14514 * Function: sdintr 14515 * 14516 * Description: Completion callback routine for scsi_pkt(9S) structs 14517 * sent to the HBA driver via scsi_transport(9F). 14518 * 14519 * Context: Interrupt context 14520 */ 14521 14522 static void 14523 sdintr(struct scsi_pkt *pktp) 14524 { 14525 struct buf *bp; 14526 struct sd_xbuf *xp; 14527 struct sd_lun *un; 14528 14529 ASSERT(pktp != NULL); 14530 bp = (struct buf *)pktp->pkt_private; 14531 ASSERT(bp != NULL); 14532 xp = SD_GET_XBUF(bp); 14533 ASSERT(xp != NULL); 14534 ASSERT(xp->xb_pktp != NULL); 14535 un = SD_GET_UN(bp); 14536 ASSERT(un != NULL); 14537 ASSERT(!mutex_owned(SD_MUTEX(un))); 14538 14539 #ifdef SD_FAULT_INJECTION 14540 14541 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14542 /* SD FaultInjection */ 14543 sd_faultinjection(pktp); 14544 14545 #endif /* SD_FAULT_INJECTION */ 14546 14547 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14548 " xp:0x%p, un:0x%p\n", bp, xp, un); 14549 14550 mutex_enter(SD_MUTEX(un)); 14551 14552 /* Reduce the count of the #commands currently in transport */ 14553 un->un_ncmds_in_transport--; 14554 ASSERT(un->un_ncmds_in_transport >= 0); 14555 14556 /* Increment counter to indicate that the callback routine is active */ 14557 un->un_in_callback++; 14558 14559 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14560 14561 #ifdef SDDEBUG 14562 if (bp == un->un_retry_bp) { 14563 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14564 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14565 un, un->un_retry_bp, un->un_ncmds_in_transport); 14566 } 14567 #endif 14568 14569 /* 14570 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14571 * state if needed. 14572 */ 14573 if (pktp->pkt_reason == CMD_DEV_GONE) { 14574 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14575 "Device is gone\n"); 14576 if (un->un_mediastate != DKIO_DEV_GONE) { 14577 un->un_mediastate = DKIO_DEV_GONE; 14578 cv_broadcast(&un->un_state_cv); 14579 } 14580 sd_return_failed_command(un, bp, EIO); 14581 goto exit; 14582 } 14583 14584 /* 14585 * First see if the pkt has auto-request sense data with it.... 14586 * Look at the packet state first so we don't take a performance 14587 * hit looking at the arq enabled flag unless absolutely necessary. 14588 */ 14589 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14590 (un->un_f_arq_enabled == TRUE)) { 14591 /* 14592 * The HBA did an auto request sense for this command so check 14593 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14594 * driver command that should not be retried. 14595 */ 14596 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14597 /* 14598 * Save the relevant sense info into the xp for the 14599 * original cmd. 14600 */ 14601 struct scsi_arq_status *asp; 14602 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14603 xp->xb_sense_status = 14604 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14605 xp->xb_sense_state = asp->sts_rqpkt_state; 14606 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14607 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14608 min(sizeof (struct scsi_extended_sense), 14609 SENSE_LENGTH)); 14610 14611 /* fail the command */ 14612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14613 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14614 sd_return_failed_command(un, bp, EIO); 14615 goto exit; 14616 } 14617 14618 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14619 /* 14620 * We want to either retry or fail this command, so free 14621 * the DMA resources here. If we retry the command then 14622 * the DMA resources will be reallocated in sd_start_cmds(). 14623 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14624 * causes the *entire* transfer to start over again from the 14625 * beginning of the request, even for PARTIAL chunks that 14626 * have already transferred successfully. 14627 */ 14628 if ((un->un_f_is_fibre == TRUE) && 14629 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14630 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14631 scsi_dmafree(pktp); 14632 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14633 } 14634 #endif 14635 14636 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14637 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14638 14639 sd_handle_auto_request_sense(un, bp, xp, pktp); 14640 goto exit; 14641 } 14642 14643 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14644 if (pktp->pkt_flags & FLAG_SENSING) { 14645 /* This pktp is from the unit's REQUEST_SENSE command */ 14646 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14647 "sdintr: sd_handle_request_sense\n"); 14648 sd_handle_request_sense(un, bp, xp, pktp); 14649 goto exit; 14650 } 14651 14652 /* 14653 * Check to see if the command successfully completed as requested; 14654 * this is the most common case (and also the hot performance path). 14655 * 14656 * Requirements for successful completion are: 14657 * pkt_reason is CMD_CMPLT and packet status is status good. 14658 * In addition: 14659 * - A residual of zero indicates successful completion no matter what 14660 * the command is. 14661 * - If the residual is not zero and the command is not a read or 14662 * write, then it's still defined as successful completion. In other 14663 * words, if the command is a read or write the residual must be 14664 * zero for successful completion. 14665 * - If the residual is not zero and the command is a read or 14666 * write, and it's a USCSICMD, then it's still defined as 14667 * successful completion. 14668 */ 14669 if ((pktp->pkt_reason == CMD_CMPLT) && 14670 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14671 14672 /* 14673 * Since this command is returned with a good status, we 14674 * can reset the count for Sonoma failover. 14675 */ 14676 un->un_sonoma_failure_count = 0; 14677 14678 /* 14679 * Return all USCSI commands on good status 14680 */ 14681 if (pktp->pkt_resid == 0) { 14682 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14683 "sdintr: returning command for resid == 0\n"); 14684 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14685 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14686 SD_UPDATE_B_RESID(bp, pktp); 14687 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14688 "sdintr: returning command for resid != 0\n"); 14689 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14690 SD_UPDATE_B_RESID(bp, pktp); 14691 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14692 "sdintr: returning uscsi command\n"); 14693 } else { 14694 goto not_successful; 14695 } 14696 sd_return_command(un, bp); 14697 14698 /* 14699 * Decrement counter to indicate that the callback routine 14700 * is done. 14701 */ 14702 un->un_in_callback--; 14703 ASSERT(un->un_in_callback >= 0); 14704 mutex_exit(SD_MUTEX(un)); 14705 14706 return; 14707 } 14708 14709 not_successful: 14710 14711 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14712 /* 14713 * The following is based upon knowledge of the underlying transport 14714 * and its use of DMA resources. This code should be removed when 14715 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14716 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14717 * and sd_start_cmds(). 14718 * 14719 * Free any DMA resources associated with this command if there 14720 * is a chance it could be retried or enqueued for later retry. 14721 * If we keep the DMA binding then mpxio cannot reissue the 14722 * command on another path whenever a path failure occurs. 14723 * 14724 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14725 * causes the *entire* transfer to start over again from the 14726 * beginning of the request, even for PARTIAL chunks that 14727 * have already transferred successfully. 14728 * 14729 * This is only done for non-uscsi commands (and also skipped for the 14730 * driver's internal RQS command). Also just do this for Fibre Channel 14731 * devices as these are the only ones that support mpxio. 14732 */ 14733 if ((un->un_f_is_fibre == TRUE) && 14734 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14735 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14736 scsi_dmafree(pktp); 14737 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14738 } 14739 #endif 14740 14741 /* 14742 * The command did not successfully complete as requested so check 14743 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14744 * driver command that should not be retried so just return. If 14745 * FLAG_DIAGNOSE is not set the error will be processed below. 14746 */ 14747 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14748 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14749 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14750 /* 14751 * Issue a request sense if a check condition caused the error 14752 * (we handle the auto request sense case above), otherwise 14753 * just fail the command. 14754 */ 14755 if ((pktp->pkt_reason == CMD_CMPLT) && 14756 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14757 sd_send_request_sense_command(un, bp, pktp); 14758 } else { 14759 sd_return_failed_command(un, bp, EIO); 14760 } 14761 goto exit; 14762 } 14763 14764 /* 14765 * The command did not successfully complete as requested so process 14766 * the error, retry, and/or attempt recovery. 14767 */ 14768 switch (pktp->pkt_reason) { 14769 case CMD_CMPLT: 14770 switch (SD_GET_PKT_STATUS(pktp)) { 14771 case STATUS_GOOD: 14772 /* 14773 * The command completed successfully with a non-zero 14774 * residual 14775 */ 14776 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14777 "sdintr: STATUS_GOOD \n"); 14778 sd_pkt_status_good(un, bp, xp, pktp); 14779 break; 14780 14781 case STATUS_CHECK: 14782 case STATUS_TERMINATED: 14783 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14784 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14785 sd_pkt_status_check_condition(un, bp, xp, pktp); 14786 break; 14787 14788 case STATUS_BUSY: 14789 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14790 "sdintr: STATUS_BUSY\n"); 14791 sd_pkt_status_busy(un, bp, xp, pktp); 14792 break; 14793 14794 case STATUS_RESERVATION_CONFLICT: 14795 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14796 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14797 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14798 break; 14799 14800 case STATUS_QFULL: 14801 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14802 "sdintr: STATUS_QFULL\n"); 14803 sd_pkt_status_qfull(un, bp, xp, pktp); 14804 break; 14805 14806 case STATUS_MET: 14807 case STATUS_INTERMEDIATE: 14808 case STATUS_SCSI2: 14809 case STATUS_INTERMEDIATE_MET: 14810 case STATUS_ACA_ACTIVE: 14811 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14812 "Unexpected SCSI status received: 0x%x\n", 14813 SD_GET_PKT_STATUS(pktp)); 14814 sd_return_failed_command(un, bp, EIO); 14815 break; 14816 14817 default: 14818 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14819 "Invalid SCSI status received: 0x%x\n", 14820 SD_GET_PKT_STATUS(pktp)); 14821 sd_return_failed_command(un, bp, EIO); 14822 break; 14823 14824 } 14825 break; 14826 14827 case CMD_INCOMPLETE: 14828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14829 "sdintr: CMD_INCOMPLETE\n"); 14830 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14831 break; 14832 case CMD_TRAN_ERR: 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14834 "sdintr: CMD_TRAN_ERR\n"); 14835 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14836 break; 14837 case CMD_RESET: 14838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14839 "sdintr: CMD_RESET \n"); 14840 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14841 break; 14842 case CMD_ABORTED: 14843 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14844 "sdintr: CMD_ABORTED \n"); 14845 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14846 break; 14847 case CMD_TIMEOUT: 14848 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14849 "sdintr: CMD_TIMEOUT\n"); 14850 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14851 break; 14852 case CMD_UNX_BUS_FREE: 14853 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14854 "sdintr: CMD_UNX_BUS_FREE \n"); 14855 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14856 break; 14857 case CMD_TAG_REJECT: 14858 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14859 "sdintr: CMD_TAG_REJECT\n"); 14860 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14861 break; 14862 default: 14863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14864 "sdintr: default\n"); 14865 sd_pkt_reason_default(un, bp, xp, pktp); 14866 break; 14867 } 14868 14869 exit: 14870 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14871 14872 /* Decrement counter to indicate that the callback routine is done. */ 14873 un->un_in_callback--; 14874 ASSERT(un->un_in_callback >= 0); 14875 14876 /* 14877 * At this point, the pkt has been dispatched, ie, it is either 14878 * being re-tried or has been returned to its caller and should 14879 * not be referenced. 14880 */ 14881 14882 mutex_exit(SD_MUTEX(un)); 14883 } 14884 14885 14886 /* 14887 * Function: sd_print_incomplete_msg 14888 * 14889 * Description: Prints the error message for a CMD_INCOMPLETE error. 14890 * 14891 * Arguments: un - ptr to associated softstate for the device. 14892 * bp - ptr to the buf(9S) for the command. 14893 * arg - message string ptr 14894 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14895 * or SD_NO_RETRY_ISSUED. 14896 * 14897 * Context: May be called under interrupt context 14898 */ 14899 14900 static void 14901 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14902 { 14903 struct scsi_pkt *pktp; 14904 char *msgp; 14905 char *cmdp = arg; 14906 14907 ASSERT(un != NULL); 14908 ASSERT(mutex_owned(SD_MUTEX(un))); 14909 ASSERT(bp != NULL); 14910 ASSERT(arg != NULL); 14911 pktp = SD_GET_PKTP(bp); 14912 ASSERT(pktp != NULL); 14913 14914 switch (code) { 14915 case SD_DELAYED_RETRY_ISSUED: 14916 case SD_IMMEDIATE_RETRY_ISSUED: 14917 msgp = "retrying"; 14918 break; 14919 case SD_NO_RETRY_ISSUED: 14920 default: 14921 msgp = "giving up"; 14922 break; 14923 } 14924 14925 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14926 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14927 "incomplete %s- %s\n", cmdp, msgp); 14928 } 14929 } 14930 14931 14932 14933 /* 14934 * Function: sd_pkt_status_good 14935 * 14936 * Description: Processing for a STATUS_GOOD code in pkt_status. 14937 * 14938 * Context: May be called under interrupt context 14939 */ 14940 14941 static void 14942 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 14943 struct sd_xbuf *xp, struct scsi_pkt *pktp) 14944 { 14945 char *cmdp; 14946 14947 ASSERT(un != NULL); 14948 ASSERT(mutex_owned(SD_MUTEX(un))); 14949 ASSERT(bp != NULL); 14950 ASSERT(xp != NULL); 14951 ASSERT(pktp != NULL); 14952 ASSERT(pktp->pkt_reason == CMD_CMPLT); 14953 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 14954 ASSERT(pktp->pkt_resid != 0); 14955 14956 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 14957 14958 SD_UPDATE_ERRSTATS(un, sd_harderrs); 14959 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 14960 case SCMD_READ: 14961 cmdp = "read"; 14962 break; 14963 case SCMD_WRITE: 14964 cmdp = "write"; 14965 break; 14966 default: 14967 SD_UPDATE_B_RESID(bp, pktp); 14968 sd_return_command(un, bp); 14969 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14970 return; 14971 } 14972 14973 /* 14974 * See if we can retry the read/write, preferrably immediately. 14975 * If retries are exhaused, then sd_retry_command() will update 14976 * the b_resid count. 14977 */ 14978 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 14979 cmdp, EIO, (clock_t)0, NULL); 14980 14981 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14982 } 14983 14984 14985 14986 14987 14988 /* 14989 * Function: sd_handle_request_sense 14990 * 14991 * Description: Processing for non-auto Request Sense command. 14992 * 14993 * Arguments: un - ptr to associated softstate 14994 * sense_bp - ptr to buf(9S) for the RQS command 14995 * sense_xp - ptr to the sd_xbuf for the RQS command 14996 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 14997 * 14998 * Context: May be called under interrupt context 14999 */ 15000 15001 static void 15002 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15003 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15004 { 15005 struct buf *cmd_bp; /* buf for the original command */ 15006 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15007 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15008 15009 ASSERT(un != NULL); 15010 ASSERT(mutex_owned(SD_MUTEX(un))); 15011 ASSERT(sense_bp != NULL); 15012 ASSERT(sense_xp != NULL); 15013 ASSERT(sense_pktp != NULL); 15014 15015 /* 15016 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15017 * RQS command and not the original command. 15018 */ 15019 ASSERT(sense_pktp == un->un_rqs_pktp); 15020 ASSERT(sense_bp == un->un_rqs_bp); 15021 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15022 (FLAG_SENSING | FLAG_HEAD)); 15023 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15024 FLAG_SENSING) == FLAG_SENSING); 15025 15026 /* These are the bp, xp, and pktp for the original command */ 15027 cmd_bp = sense_xp->xb_sense_bp; 15028 cmd_xp = SD_GET_XBUF(cmd_bp); 15029 cmd_pktp = SD_GET_PKTP(cmd_bp); 15030 15031 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15032 /* 15033 * The REQUEST SENSE command failed. Release the REQUEST 15034 * SENSE command for re-use, get back the bp for the original 15035 * command, and attempt to re-try the original command if 15036 * FLAG_DIAGNOSE is not set in the original packet. 15037 */ 15038 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15039 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15040 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15041 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15042 NULL, NULL, EIO, (clock_t)0, NULL); 15043 return; 15044 } 15045 } 15046 15047 /* 15048 * Save the relevant sense info into the xp for the original cmd. 15049 * 15050 * Note: if the request sense failed the state info will be zero 15051 * as set in sd_mark_rqs_busy() 15052 */ 15053 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15054 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15055 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15056 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15057 15058 /* 15059 * Free up the RQS command.... 15060 * NOTE: 15061 * Must do this BEFORE calling sd_validate_sense_data! 15062 * sd_validate_sense_data may return the original command in 15063 * which case the pkt will be freed and the flags can no 15064 * longer be touched. 15065 * SD_MUTEX is held through this process until the command 15066 * is dispatched based upon the sense data, so there are 15067 * no race conditions. 15068 */ 15069 (void) sd_mark_rqs_idle(un, sense_xp); 15070 15071 /* 15072 * For a retryable command see if we have valid sense data, if so then 15073 * turn it over to sd_decode_sense() to figure out the right course of 15074 * action. Just fail a non-retryable command. 15075 */ 15076 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15077 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15078 SD_SENSE_DATA_IS_VALID) { 15079 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15080 } 15081 } else { 15082 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15083 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15084 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15085 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15086 sd_return_failed_command(un, cmd_bp, EIO); 15087 } 15088 } 15089 15090 15091 15092 15093 /* 15094 * Function: sd_handle_auto_request_sense 15095 * 15096 * Description: Processing for auto-request sense information. 15097 * 15098 * Arguments: un - ptr to associated softstate 15099 * bp - ptr to buf(9S) for the command 15100 * xp - ptr to the sd_xbuf for the command 15101 * pktp - ptr to the scsi_pkt(9S) for the command 15102 * 15103 * Context: May be called under interrupt context 15104 */ 15105 15106 static void 15107 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15108 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15109 { 15110 struct scsi_arq_status *asp; 15111 15112 ASSERT(un != NULL); 15113 ASSERT(mutex_owned(SD_MUTEX(un))); 15114 ASSERT(bp != NULL); 15115 ASSERT(xp != NULL); 15116 ASSERT(pktp != NULL); 15117 ASSERT(pktp != un->un_rqs_pktp); 15118 ASSERT(bp != un->un_rqs_bp); 15119 15120 /* 15121 * For auto-request sense, we get a scsi_arq_status back from 15122 * the HBA, with the sense data in the sts_sensedata member. 15123 * The pkt_scbp of the packet points to this scsi_arq_status. 15124 */ 15125 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15126 15127 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15128 /* 15129 * The auto REQUEST SENSE failed; see if we can re-try 15130 * the original command. 15131 */ 15132 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15133 "auto request sense failed (reason=%s)\n", 15134 scsi_rname(asp->sts_rqpkt_reason)); 15135 15136 sd_reset_target(un, pktp); 15137 15138 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15139 NULL, NULL, EIO, (clock_t)0, NULL); 15140 return; 15141 } 15142 15143 /* Save the relevant sense info into the xp for the original cmd. */ 15144 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15145 xp->xb_sense_state = asp->sts_rqpkt_state; 15146 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15147 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15148 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15149 15150 /* 15151 * See if we have valid sense data, if so then turn it over to 15152 * sd_decode_sense() to figure out the right course of action. 15153 */ 15154 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15155 sd_decode_sense(un, bp, xp, pktp); 15156 } 15157 } 15158 15159 15160 /* 15161 * Function: sd_print_sense_failed_msg 15162 * 15163 * Description: Print log message when RQS has failed. 15164 * 15165 * Arguments: un - ptr to associated softstate 15166 * bp - ptr to buf(9S) for the command 15167 * arg - generic message string ptr 15168 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15169 * or SD_NO_RETRY_ISSUED 15170 * 15171 * Context: May be called from interrupt context 15172 */ 15173 15174 static void 15175 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15176 int code) 15177 { 15178 char *msgp = arg; 15179 15180 ASSERT(un != NULL); 15181 ASSERT(mutex_owned(SD_MUTEX(un))); 15182 ASSERT(bp != NULL); 15183 15184 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15185 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15186 } 15187 } 15188 15189 15190 /* 15191 * Function: sd_validate_sense_data 15192 * 15193 * Description: Check the given sense data for validity. 15194 * If the sense data is not valid, the command will 15195 * be either failed or retried! 15196 * 15197 * Return Code: SD_SENSE_DATA_IS_INVALID 15198 * SD_SENSE_DATA_IS_VALID 15199 * 15200 * Context: May be called from interrupt context 15201 */ 15202 15203 static int 15204 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15205 { 15206 struct scsi_extended_sense *esp; 15207 struct scsi_pkt *pktp; 15208 size_t actual_len; 15209 char *msgp = NULL; 15210 15211 ASSERT(un != NULL); 15212 ASSERT(mutex_owned(SD_MUTEX(un))); 15213 ASSERT(bp != NULL); 15214 ASSERT(bp != un->un_rqs_bp); 15215 ASSERT(xp != NULL); 15216 15217 pktp = SD_GET_PKTP(bp); 15218 ASSERT(pktp != NULL); 15219 15220 /* 15221 * Check the status of the RQS command (auto or manual). 15222 */ 15223 switch (xp->xb_sense_status & STATUS_MASK) { 15224 case STATUS_GOOD: 15225 break; 15226 15227 case STATUS_RESERVATION_CONFLICT: 15228 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15229 return (SD_SENSE_DATA_IS_INVALID); 15230 15231 case STATUS_BUSY: 15232 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15233 "Busy Status on REQUEST SENSE\n"); 15234 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15235 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15236 return (SD_SENSE_DATA_IS_INVALID); 15237 15238 case STATUS_QFULL: 15239 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15240 "QFULL Status on REQUEST SENSE\n"); 15241 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15242 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15243 return (SD_SENSE_DATA_IS_INVALID); 15244 15245 case STATUS_CHECK: 15246 case STATUS_TERMINATED: 15247 msgp = "Check Condition on REQUEST SENSE\n"; 15248 goto sense_failed; 15249 15250 default: 15251 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15252 goto sense_failed; 15253 } 15254 15255 /* 15256 * See if we got the minimum required amount of sense data. 15257 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15258 * or less. 15259 */ 15260 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15261 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15262 (actual_len == 0)) { 15263 msgp = "Request Sense couldn't get sense data\n"; 15264 goto sense_failed; 15265 } 15266 15267 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15268 msgp = "Not enough sense information\n"; 15269 goto sense_failed; 15270 } 15271 15272 /* 15273 * We require the extended sense data 15274 */ 15275 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15276 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15277 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15278 static char tmp[8]; 15279 static char buf[148]; 15280 char *p = (char *)(xp->xb_sense_data); 15281 int i; 15282 15283 mutex_enter(&sd_sense_mutex); 15284 (void) strcpy(buf, "undecodable sense information:"); 15285 for (i = 0; i < actual_len; i++) { 15286 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15287 (void) strcpy(&buf[strlen(buf)], tmp); 15288 } 15289 i = strlen(buf); 15290 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15292 mutex_exit(&sd_sense_mutex); 15293 } 15294 /* Note: Legacy behavior, fail the command with no retry */ 15295 sd_return_failed_command(un, bp, EIO); 15296 return (SD_SENSE_DATA_IS_INVALID); 15297 } 15298 15299 /* 15300 * Check that es_code is valid (es_class concatenated with es_code 15301 * make up the "response code" field. es_class will always be 7, so 15302 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15303 * format. 15304 */ 15305 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15306 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15307 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15308 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15309 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15310 goto sense_failed; 15311 } 15312 15313 return (SD_SENSE_DATA_IS_VALID); 15314 15315 sense_failed: 15316 /* 15317 * If the request sense failed (for whatever reason), attempt 15318 * to retry the original command. 15319 */ 15320 #if defined(__i386) || defined(__amd64) 15321 /* 15322 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15323 * sddef.h for Sparc platform, and x86 uses 1 binary 15324 * for both SCSI/FC. 15325 * The SD_RETRY_DELAY value need to be adjusted here 15326 * when SD_RETRY_DELAY change in sddef.h 15327 */ 15328 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15329 sd_print_sense_failed_msg, msgp, EIO, 15330 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15331 #else 15332 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15333 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15334 #endif 15335 15336 return (SD_SENSE_DATA_IS_INVALID); 15337 } 15338 15339 15340 15341 /* 15342 * Function: sd_decode_sense 15343 * 15344 * Description: Take recovery action(s) when SCSI Sense Data is received. 15345 * 15346 * Context: Interrupt context. 15347 */ 15348 15349 static void 15350 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15351 struct scsi_pkt *pktp) 15352 { 15353 uint8_t sense_key; 15354 15355 ASSERT(un != NULL); 15356 ASSERT(mutex_owned(SD_MUTEX(un))); 15357 ASSERT(bp != NULL); 15358 ASSERT(bp != un->un_rqs_bp); 15359 ASSERT(xp != NULL); 15360 ASSERT(pktp != NULL); 15361 15362 sense_key = scsi_sense_key(xp->xb_sense_data); 15363 15364 switch (sense_key) { 15365 case KEY_NO_SENSE: 15366 sd_sense_key_no_sense(un, bp, xp, pktp); 15367 break; 15368 case KEY_RECOVERABLE_ERROR: 15369 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15370 bp, xp, pktp); 15371 break; 15372 case KEY_NOT_READY: 15373 sd_sense_key_not_ready(un, xp->xb_sense_data, 15374 bp, xp, pktp); 15375 break; 15376 case KEY_MEDIUM_ERROR: 15377 case KEY_HARDWARE_ERROR: 15378 sd_sense_key_medium_or_hardware_error(un, 15379 xp->xb_sense_data, bp, xp, pktp); 15380 break; 15381 case KEY_ILLEGAL_REQUEST: 15382 sd_sense_key_illegal_request(un, bp, xp, pktp); 15383 break; 15384 case KEY_UNIT_ATTENTION: 15385 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15386 bp, xp, pktp); 15387 break; 15388 case KEY_WRITE_PROTECT: 15389 case KEY_VOLUME_OVERFLOW: 15390 case KEY_MISCOMPARE: 15391 sd_sense_key_fail_command(un, bp, xp, pktp); 15392 break; 15393 case KEY_BLANK_CHECK: 15394 sd_sense_key_blank_check(un, bp, xp, pktp); 15395 break; 15396 case KEY_ABORTED_COMMAND: 15397 sd_sense_key_aborted_command(un, bp, xp, pktp); 15398 break; 15399 case KEY_VENDOR_UNIQUE: 15400 case KEY_COPY_ABORTED: 15401 case KEY_EQUAL: 15402 case KEY_RESERVED: 15403 default: 15404 sd_sense_key_default(un, xp->xb_sense_data, 15405 bp, xp, pktp); 15406 break; 15407 } 15408 } 15409 15410 15411 /* 15412 * Function: sd_dump_memory 15413 * 15414 * Description: Debug logging routine to print the contents of a user provided 15415 * buffer. The output of the buffer is broken up into 256 byte 15416 * segments due to a size constraint of the scsi_log. 15417 * implementation. 15418 * 15419 * Arguments: un - ptr to softstate 15420 * comp - component mask 15421 * title - "title" string to preceed data when printed 15422 * data - ptr to data block to be printed 15423 * len - size of data block to be printed 15424 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15425 * 15426 * Context: May be called from interrupt context 15427 */ 15428 15429 #define SD_DUMP_MEMORY_BUF_SIZE 256 15430 15431 static char *sd_dump_format_string[] = { 15432 " 0x%02x", 15433 " %c" 15434 }; 15435 15436 static void 15437 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15438 int len, int fmt) 15439 { 15440 int i, j; 15441 int avail_count; 15442 int start_offset; 15443 int end_offset; 15444 size_t entry_len; 15445 char *bufp; 15446 char *local_buf; 15447 char *format_string; 15448 15449 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15450 15451 /* 15452 * In the debug version of the driver, this function is called from a 15453 * number of places which are NOPs in the release driver. 15454 * The debug driver therefore has additional methods of filtering 15455 * debug output. 15456 */ 15457 #ifdef SDDEBUG 15458 /* 15459 * In the debug version of the driver we can reduce the amount of debug 15460 * messages by setting sd_error_level to something other than 15461 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15462 * sd_component_mask. 15463 */ 15464 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15465 (sd_error_level != SCSI_ERR_ALL)) { 15466 return; 15467 } 15468 if (((sd_component_mask & comp) == 0) || 15469 (sd_error_level != SCSI_ERR_ALL)) { 15470 return; 15471 } 15472 #else 15473 if (sd_error_level != SCSI_ERR_ALL) { 15474 return; 15475 } 15476 #endif 15477 15478 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15479 bufp = local_buf; 15480 /* 15481 * Available length is the length of local_buf[], minus the 15482 * length of the title string, minus one for the ":", minus 15483 * one for the newline, minus one for the NULL terminator. 15484 * This gives the #bytes available for holding the printed 15485 * values from the given data buffer. 15486 */ 15487 if (fmt == SD_LOG_HEX) { 15488 format_string = sd_dump_format_string[0]; 15489 } else /* SD_LOG_CHAR */ { 15490 format_string = sd_dump_format_string[1]; 15491 } 15492 /* 15493 * Available count is the number of elements from the given 15494 * data buffer that we can fit into the available length. 15495 * This is based upon the size of the format string used. 15496 * Make one entry and find it's size. 15497 */ 15498 (void) sprintf(bufp, format_string, data[0]); 15499 entry_len = strlen(bufp); 15500 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15501 15502 j = 0; 15503 while (j < len) { 15504 bufp = local_buf; 15505 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15506 start_offset = j; 15507 15508 end_offset = start_offset + avail_count; 15509 15510 (void) sprintf(bufp, "%s:", title); 15511 bufp += strlen(bufp); 15512 for (i = start_offset; ((i < end_offset) && (j < len)); 15513 i++, j++) { 15514 (void) sprintf(bufp, format_string, data[i]); 15515 bufp += entry_len; 15516 } 15517 (void) sprintf(bufp, "\n"); 15518 15519 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15520 } 15521 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15522 } 15523 15524 /* 15525 * Function: sd_print_sense_msg 15526 * 15527 * Description: Log a message based upon the given sense data. 15528 * 15529 * Arguments: un - ptr to associated softstate 15530 * bp - ptr to buf(9S) for the command 15531 * arg - ptr to associate sd_sense_info struct 15532 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15533 * or SD_NO_RETRY_ISSUED 15534 * 15535 * Context: May be called from interrupt context 15536 */ 15537 15538 static void 15539 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15540 { 15541 struct sd_xbuf *xp; 15542 struct scsi_pkt *pktp; 15543 uint8_t *sensep; 15544 daddr_t request_blkno; 15545 diskaddr_t err_blkno; 15546 int severity; 15547 int pfa_flag; 15548 extern struct scsi_key_strings scsi_cmds[]; 15549 15550 ASSERT(un != NULL); 15551 ASSERT(mutex_owned(SD_MUTEX(un))); 15552 ASSERT(bp != NULL); 15553 xp = SD_GET_XBUF(bp); 15554 ASSERT(xp != NULL); 15555 pktp = SD_GET_PKTP(bp); 15556 ASSERT(pktp != NULL); 15557 ASSERT(arg != NULL); 15558 15559 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15560 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15561 15562 if ((code == SD_DELAYED_RETRY_ISSUED) || 15563 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15564 severity = SCSI_ERR_RETRYABLE; 15565 } 15566 15567 /* Use absolute block number for the request block number */ 15568 request_blkno = xp->xb_blkno; 15569 15570 /* 15571 * Now try to get the error block number from the sense data 15572 */ 15573 sensep = xp->xb_sense_data; 15574 15575 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15576 (uint64_t *)&err_blkno)) { 15577 /* 15578 * We retrieved the error block number from the information 15579 * portion of the sense data. 15580 * 15581 * For USCSI commands we are better off using the error 15582 * block no. as the requested block no. (This is the best 15583 * we can estimate.) 15584 */ 15585 if ((SD_IS_BUFIO(xp) == FALSE) && 15586 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15587 request_blkno = err_blkno; 15588 } 15589 } else { 15590 /* 15591 * Without the es_valid bit set (for fixed format) or an 15592 * information descriptor (for descriptor format) we cannot 15593 * be certain of the error blkno, so just use the 15594 * request_blkno. 15595 */ 15596 err_blkno = (diskaddr_t)request_blkno; 15597 } 15598 15599 /* 15600 * The following will log the buffer contents for the release driver 15601 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15602 * level is set to verbose. 15603 */ 15604 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15605 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15606 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15607 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15608 15609 if (pfa_flag == FALSE) { 15610 /* This is normally only set for USCSI */ 15611 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15612 return; 15613 } 15614 15615 if ((SD_IS_BUFIO(xp) == TRUE) && 15616 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15617 (severity < sd_error_level))) { 15618 return; 15619 } 15620 } 15621 15622 /* 15623 * Check for Sonoma Failover and keep a count of how many failed I/O's 15624 */ 15625 if ((SD_IS_LSI(un)) && 15626 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15627 (scsi_sense_asc(sensep) == 0x94) && 15628 (scsi_sense_ascq(sensep) == 0x01)) { 15629 un->un_sonoma_failure_count++; 15630 if (un->un_sonoma_failure_count > 1) { 15631 return; 15632 } 15633 } 15634 15635 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15636 request_blkno, err_blkno, scsi_cmds, 15637 (struct scsi_extended_sense *)sensep, 15638 un->un_additional_codes, NULL); 15639 } 15640 15641 /* 15642 * Function: sd_sense_key_no_sense 15643 * 15644 * Description: Recovery action when sense data was not received. 15645 * 15646 * Context: May be called from interrupt context 15647 */ 15648 15649 static void 15650 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15651 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15652 { 15653 struct sd_sense_info si; 15654 15655 ASSERT(un != NULL); 15656 ASSERT(mutex_owned(SD_MUTEX(un))); 15657 ASSERT(bp != NULL); 15658 ASSERT(xp != NULL); 15659 ASSERT(pktp != NULL); 15660 15661 si.ssi_severity = SCSI_ERR_FATAL; 15662 si.ssi_pfa_flag = FALSE; 15663 15664 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15665 15666 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15667 &si, EIO, (clock_t)0, NULL); 15668 } 15669 15670 15671 /* 15672 * Function: sd_sense_key_recoverable_error 15673 * 15674 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15675 * 15676 * Context: May be called from interrupt context 15677 */ 15678 15679 static void 15680 sd_sense_key_recoverable_error(struct sd_lun *un, 15681 uint8_t *sense_datap, 15682 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15683 { 15684 struct sd_sense_info si; 15685 uint8_t asc = scsi_sense_asc(sense_datap); 15686 15687 ASSERT(un != NULL); 15688 ASSERT(mutex_owned(SD_MUTEX(un))); 15689 ASSERT(bp != NULL); 15690 ASSERT(xp != NULL); 15691 ASSERT(pktp != NULL); 15692 15693 /* 15694 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15695 */ 15696 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15697 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15698 si.ssi_severity = SCSI_ERR_INFO; 15699 si.ssi_pfa_flag = TRUE; 15700 } else { 15701 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15702 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15703 si.ssi_severity = SCSI_ERR_RECOVERED; 15704 si.ssi_pfa_flag = FALSE; 15705 } 15706 15707 if (pktp->pkt_resid == 0) { 15708 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15709 sd_return_command(un, bp); 15710 return; 15711 } 15712 15713 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15714 &si, EIO, (clock_t)0, NULL); 15715 } 15716 15717 15718 15719 15720 /* 15721 * Function: sd_sense_key_not_ready 15722 * 15723 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15724 * 15725 * Context: May be called from interrupt context 15726 */ 15727 15728 static void 15729 sd_sense_key_not_ready(struct sd_lun *un, 15730 uint8_t *sense_datap, 15731 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15732 { 15733 struct sd_sense_info si; 15734 uint8_t asc = scsi_sense_asc(sense_datap); 15735 uint8_t ascq = scsi_sense_ascq(sense_datap); 15736 15737 ASSERT(un != NULL); 15738 ASSERT(mutex_owned(SD_MUTEX(un))); 15739 ASSERT(bp != NULL); 15740 ASSERT(xp != NULL); 15741 ASSERT(pktp != NULL); 15742 15743 si.ssi_severity = SCSI_ERR_FATAL; 15744 si.ssi_pfa_flag = FALSE; 15745 15746 /* 15747 * Update error stats after first NOT READY error. Disks may have 15748 * been powered down and may need to be restarted. For CDROMs, 15749 * report NOT READY errors only if media is present. 15750 */ 15751 if ((ISCD(un) && (asc == 0x3A)) || 15752 (xp->xb_retry_count > 0)) { 15753 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15754 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15755 } 15756 15757 /* 15758 * Just fail if the "not ready" retry limit has been reached. 15759 */ 15760 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15761 /* Special check for error message printing for removables. */ 15762 if (un->un_f_has_removable_media && (asc == 0x04) && 15763 (ascq >= 0x04)) { 15764 si.ssi_severity = SCSI_ERR_ALL; 15765 } 15766 goto fail_command; 15767 } 15768 15769 /* 15770 * Check the ASC and ASCQ in the sense data as needed, to determine 15771 * what to do. 15772 */ 15773 switch (asc) { 15774 case 0x04: /* LOGICAL UNIT NOT READY */ 15775 /* 15776 * disk drives that don't spin up result in a very long delay 15777 * in format without warning messages. We will log a message 15778 * if the error level is set to verbose. 15779 */ 15780 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15781 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15782 "logical unit not ready, resetting disk\n"); 15783 } 15784 15785 /* 15786 * There are different requirements for CDROMs and disks for 15787 * the number of retries. If a CD-ROM is giving this, it is 15788 * probably reading TOC and is in the process of getting 15789 * ready, so we should keep on trying for a long time to make 15790 * sure that all types of media are taken in account (for 15791 * some media the drive takes a long time to read TOC). For 15792 * disks we do not want to retry this too many times as this 15793 * can cause a long hang in format when the drive refuses to 15794 * spin up (a very common failure). 15795 */ 15796 switch (ascq) { 15797 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15798 /* 15799 * Disk drives frequently refuse to spin up which 15800 * results in a very long hang in format without 15801 * warning messages. 15802 * 15803 * Note: This code preserves the legacy behavior of 15804 * comparing xb_retry_count against zero for fibre 15805 * channel targets instead of comparing against the 15806 * un_reset_retry_count value. The reason for this 15807 * discrepancy has been so utterly lost beneath the 15808 * Sands of Time that even Indiana Jones could not 15809 * find it. 15810 */ 15811 if (un->un_f_is_fibre == TRUE) { 15812 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15813 (xp->xb_retry_count > 0)) && 15814 (un->un_startstop_timeid == NULL)) { 15815 scsi_log(SD_DEVINFO(un), sd_label, 15816 CE_WARN, "logical unit not ready, " 15817 "resetting disk\n"); 15818 sd_reset_target(un, pktp); 15819 } 15820 } else { 15821 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15822 (xp->xb_retry_count > 15823 un->un_reset_retry_count)) && 15824 (un->un_startstop_timeid == NULL)) { 15825 scsi_log(SD_DEVINFO(un), sd_label, 15826 CE_WARN, "logical unit not ready, " 15827 "resetting disk\n"); 15828 sd_reset_target(un, pktp); 15829 } 15830 } 15831 break; 15832 15833 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15834 /* 15835 * If the target is in the process of becoming 15836 * ready, just proceed with the retry. This can 15837 * happen with CD-ROMs that take a long time to 15838 * read TOC after a power cycle or reset. 15839 */ 15840 goto do_retry; 15841 15842 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15843 break; 15844 15845 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15846 /* 15847 * Retries cannot help here so just fail right away. 15848 */ 15849 goto fail_command; 15850 15851 case 0x88: 15852 /* 15853 * Vendor-unique code for T3/T4: it indicates a 15854 * path problem in a mutipathed config, but as far as 15855 * the target driver is concerned it equates to a fatal 15856 * error, so we should just fail the command right away 15857 * (without printing anything to the console). If this 15858 * is not a T3/T4, fall thru to the default recovery 15859 * action. 15860 * T3/T4 is FC only, don't need to check is_fibre 15861 */ 15862 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15863 sd_return_failed_command(un, bp, EIO); 15864 return; 15865 } 15866 /* FALLTHRU */ 15867 15868 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15869 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15870 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15871 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15872 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15873 default: /* Possible future codes in SCSI spec? */ 15874 /* 15875 * For removable-media devices, do not retry if 15876 * ASCQ > 2 as these result mostly from USCSI commands 15877 * on MMC devices issued to check status of an 15878 * operation initiated in immediate mode. Also for 15879 * ASCQ >= 4 do not print console messages as these 15880 * mainly represent a user-initiated operation 15881 * instead of a system failure. 15882 */ 15883 if (un->un_f_has_removable_media) { 15884 si.ssi_severity = SCSI_ERR_ALL; 15885 goto fail_command; 15886 } 15887 break; 15888 } 15889 15890 /* 15891 * As part of our recovery attempt for the NOT READY 15892 * condition, we issue a START STOP UNIT command. However 15893 * we want to wait for a short delay before attempting this 15894 * as there may still be more commands coming back from the 15895 * target with the check condition. To do this we use 15896 * timeout(9F) to call sd_start_stop_unit_callback() after 15897 * the delay interval expires. (sd_start_stop_unit_callback() 15898 * dispatches sd_start_stop_unit_task(), which will issue 15899 * the actual START STOP UNIT command. The delay interval 15900 * is one-half of the delay that we will use to retry the 15901 * command that generated the NOT READY condition. 15902 * 15903 * Note that we could just dispatch sd_start_stop_unit_task() 15904 * from here and allow it to sleep for the delay interval, 15905 * but then we would be tying up the taskq thread 15906 * uncesessarily for the duration of the delay. 15907 * 15908 * Do not issue the START STOP UNIT if the current command 15909 * is already a START STOP UNIT. 15910 */ 15911 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15912 break; 15913 } 15914 15915 /* 15916 * Do not schedule the timeout if one is already pending. 15917 */ 15918 if (un->un_startstop_timeid != NULL) { 15919 SD_INFO(SD_LOG_ERROR, un, 15920 "sd_sense_key_not_ready: restart already issued to" 15921 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15922 ddi_get_instance(SD_DEVINFO(un))); 15923 break; 15924 } 15925 15926 /* 15927 * Schedule the START STOP UNIT command, then queue the command 15928 * for a retry. 15929 * 15930 * Note: A timeout is not scheduled for this retry because we 15931 * want the retry to be serial with the START_STOP_UNIT. The 15932 * retry will be started when the START_STOP_UNIT is completed 15933 * in sd_start_stop_unit_task. 15934 */ 15935 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15936 un, SD_BSY_TIMEOUT / 2); 15937 xp->xb_retry_count++; 15938 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 15939 return; 15940 15941 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 15942 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15943 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15944 "unit does not respond to selection\n"); 15945 } 15946 break; 15947 15948 case 0x3A: /* MEDIUM NOT PRESENT */ 15949 if (sd_error_level >= SCSI_ERR_FATAL) { 15950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15951 "Caddy not inserted in drive\n"); 15952 } 15953 15954 sr_ejected(un); 15955 un->un_mediastate = DKIO_EJECTED; 15956 /* The state has changed, inform the media watch routines */ 15957 cv_broadcast(&un->un_state_cv); 15958 /* Just fail if no media is present in the drive. */ 15959 goto fail_command; 15960 15961 default: 15962 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15963 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15964 "Unit not Ready. Additional sense code 0x%x\n", 15965 asc); 15966 } 15967 break; 15968 } 15969 15970 do_retry: 15971 15972 /* 15973 * Retry the command, as some targets may report NOT READY for 15974 * several seconds after being reset. 15975 */ 15976 xp->xb_retry_count++; 15977 si.ssi_severity = SCSI_ERR_RETRYABLE; 15978 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 15979 &si, EIO, SD_BSY_TIMEOUT, NULL); 15980 15981 return; 15982 15983 fail_command: 15984 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15985 sd_return_failed_command(un, bp, EIO); 15986 } 15987 15988 15989 15990 /* 15991 * Function: sd_sense_key_medium_or_hardware_error 15992 * 15993 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 15994 * sense key. 15995 * 15996 * Context: May be called from interrupt context 15997 */ 15998 15999 static void 16000 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16001 uint8_t *sense_datap, 16002 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16003 { 16004 struct sd_sense_info si; 16005 uint8_t sense_key = scsi_sense_key(sense_datap); 16006 uint8_t asc = scsi_sense_asc(sense_datap); 16007 16008 ASSERT(un != NULL); 16009 ASSERT(mutex_owned(SD_MUTEX(un))); 16010 ASSERT(bp != NULL); 16011 ASSERT(xp != NULL); 16012 ASSERT(pktp != NULL); 16013 16014 si.ssi_severity = SCSI_ERR_FATAL; 16015 si.ssi_pfa_flag = FALSE; 16016 16017 if (sense_key == KEY_MEDIUM_ERROR) { 16018 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16019 } 16020 16021 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16022 16023 if ((un->un_reset_retry_count != 0) && 16024 (xp->xb_retry_count == un->un_reset_retry_count)) { 16025 mutex_exit(SD_MUTEX(un)); 16026 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16027 if (un->un_f_allow_bus_device_reset == TRUE) { 16028 16029 boolean_t try_resetting_target = B_TRUE; 16030 16031 /* 16032 * We need to be able to handle specific ASC when we are 16033 * handling a KEY_HARDWARE_ERROR. In particular 16034 * taking the default action of resetting the target may 16035 * not be the appropriate way to attempt recovery. 16036 * Resetting a target because of a single LUN failure 16037 * victimizes all LUNs on that target. 16038 * 16039 * This is true for the LSI arrays, if an LSI 16040 * array controller returns an ASC of 0x84 (LUN Dead) we 16041 * should trust it. 16042 */ 16043 16044 if (sense_key == KEY_HARDWARE_ERROR) { 16045 switch (asc) { 16046 case 0x84: 16047 if (SD_IS_LSI(un)) { 16048 try_resetting_target = B_FALSE; 16049 } 16050 break; 16051 default: 16052 break; 16053 } 16054 } 16055 16056 if (try_resetting_target == B_TRUE) { 16057 int reset_retval = 0; 16058 if (un->un_f_lun_reset_enabled == TRUE) { 16059 SD_TRACE(SD_LOG_IO_CORE, un, 16060 "sd_sense_key_medium_or_hardware_" 16061 "error: issuing RESET_LUN\n"); 16062 reset_retval = 16063 scsi_reset(SD_ADDRESS(un), 16064 RESET_LUN); 16065 } 16066 if (reset_retval == 0) { 16067 SD_TRACE(SD_LOG_IO_CORE, un, 16068 "sd_sense_key_medium_or_hardware_" 16069 "error: issuing RESET_TARGET\n"); 16070 (void) scsi_reset(SD_ADDRESS(un), 16071 RESET_TARGET); 16072 } 16073 } 16074 } 16075 mutex_enter(SD_MUTEX(un)); 16076 } 16077 16078 /* 16079 * This really ought to be a fatal error, but we will retry anyway 16080 * as some drives report this as a spurious error. 16081 */ 16082 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16083 &si, EIO, (clock_t)0, NULL); 16084 } 16085 16086 16087 16088 /* 16089 * Function: sd_sense_key_illegal_request 16090 * 16091 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16092 * 16093 * Context: May be called from interrupt context 16094 */ 16095 16096 static void 16097 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16098 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16099 { 16100 struct sd_sense_info si; 16101 16102 ASSERT(un != NULL); 16103 ASSERT(mutex_owned(SD_MUTEX(un))); 16104 ASSERT(bp != NULL); 16105 ASSERT(xp != NULL); 16106 ASSERT(pktp != NULL); 16107 16108 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16109 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16110 16111 si.ssi_severity = SCSI_ERR_INFO; 16112 si.ssi_pfa_flag = FALSE; 16113 16114 /* Pointless to retry if the target thinks it's an illegal request */ 16115 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16116 sd_return_failed_command(un, bp, EIO); 16117 } 16118 16119 16120 16121 16122 /* 16123 * Function: sd_sense_key_unit_attention 16124 * 16125 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16126 * 16127 * Context: May be called from interrupt context 16128 */ 16129 16130 static void 16131 sd_sense_key_unit_attention(struct sd_lun *un, 16132 uint8_t *sense_datap, 16133 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16134 { 16135 /* 16136 * For UNIT ATTENTION we allow retries for one minute. Devices 16137 * like Sonoma can return UNIT ATTENTION close to a minute 16138 * under certain conditions. 16139 */ 16140 int retry_check_flag = SD_RETRIES_UA; 16141 boolean_t kstat_updated = B_FALSE; 16142 struct sd_sense_info si; 16143 uint8_t asc = scsi_sense_asc(sense_datap); 16144 16145 ASSERT(un != NULL); 16146 ASSERT(mutex_owned(SD_MUTEX(un))); 16147 ASSERT(bp != NULL); 16148 ASSERT(xp != NULL); 16149 ASSERT(pktp != NULL); 16150 16151 si.ssi_severity = SCSI_ERR_INFO; 16152 si.ssi_pfa_flag = FALSE; 16153 16154 16155 switch (asc) { 16156 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16157 if (sd_report_pfa != 0) { 16158 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16159 si.ssi_pfa_flag = TRUE; 16160 retry_check_flag = SD_RETRIES_STANDARD; 16161 goto do_retry; 16162 } 16163 16164 break; 16165 16166 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16167 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16168 un->un_resvd_status |= 16169 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16170 } 16171 #ifdef _LP64 16172 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16173 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16174 un, KM_NOSLEEP) == 0) { 16175 /* 16176 * If we can't dispatch the task we'll just 16177 * live without descriptor sense. We can 16178 * try again on the next "unit attention" 16179 */ 16180 SD_ERROR(SD_LOG_ERROR, un, 16181 "sd_sense_key_unit_attention: " 16182 "Could not dispatch " 16183 "sd_reenable_dsense_task\n"); 16184 } 16185 } 16186 #endif /* _LP64 */ 16187 /* FALLTHRU */ 16188 16189 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16190 if (!un->un_f_has_removable_media) { 16191 break; 16192 } 16193 16194 /* 16195 * When we get a unit attention from a removable-media device, 16196 * it may be in a state that will take a long time to recover 16197 * (e.g., from a reset). Since we are executing in interrupt 16198 * context here, we cannot wait around for the device to come 16199 * back. So hand this command off to sd_media_change_task() 16200 * for deferred processing under taskq thread context. (Note 16201 * that the command still may be failed if a problem is 16202 * encountered at a later time.) 16203 */ 16204 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16205 KM_NOSLEEP) == 0) { 16206 /* 16207 * Cannot dispatch the request so fail the command. 16208 */ 16209 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16210 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16211 si.ssi_severity = SCSI_ERR_FATAL; 16212 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16213 sd_return_failed_command(un, bp, EIO); 16214 } 16215 16216 /* 16217 * If failed to dispatch sd_media_change_task(), we already 16218 * updated kstat. If succeed to dispatch sd_media_change_task(), 16219 * we should update kstat later if it encounters an error. So, 16220 * we update kstat_updated flag here. 16221 */ 16222 kstat_updated = B_TRUE; 16223 16224 /* 16225 * Either the command has been successfully dispatched to a 16226 * task Q for retrying, or the dispatch failed. In either case 16227 * do NOT retry again by calling sd_retry_command. This sets up 16228 * two retries of the same command and when one completes and 16229 * frees the resources the other will access freed memory, 16230 * a bad thing. 16231 */ 16232 return; 16233 16234 default: 16235 break; 16236 } 16237 16238 /* 16239 * Update kstat if we haven't done that. 16240 */ 16241 if (!kstat_updated) { 16242 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16243 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16244 } 16245 16246 do_retry: 16247 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16248 EIO, SD_UA_RETRY_DELAY, NULL); 16249 } 16250 16251 16252 16253 /* 16254 * Function: sd_sense_key_fail_command 16255 * 16256 * Description: Use to fail a command when we don't like the sense key that 16257 * was returned. 16258 * 16259 * Context: May be called from interrupt context 16260 */ 16261 16262 static void 16263 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16264 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16265 { 16266 struct sd_sense_info si; 16267 16268 ASSERT(un != NULL); 16269 ASSERT(mutex_owned(SD_MUTEX(un))); 16270 ASSERT(bp != NULL); 16271 ASSERT(xp != NULL); 16272 ASSERT(pktp != NULL); 16273 16274 si.ssi_severity = SCSI_ERR_FATAL; 16275 si.ssi_pfa_flag = FALSE; 16276 16277 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16278 sd_return_failed_command(un, bp, EIO); 16279 } 16280 16281 16282 16283 /* 16284 * Function: sd_sense_key_blank_check 16285 * 16286 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16287 * Has no monetary connotation. 16288 * 16289 * Context: May be called from interrupt context 16290 */ 16291 16292 static void 16293 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16294 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16295 { 16296 struct sd_sense_info si; 16297 16298 ASSERT(un != NULL); 16299 ASSERT(mutex_owned(SD_MUTEX(un))); 16300 ASSERT(bp != NULL); 16301 ASSERT(xp != NULL); 16302 ASSERT(pktp != NULL); 16303 16304 /* 16305 * Blank check is not fatal for removable devices, therefore 16306 * it does not require a console message. 16307 */ 16308 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16309 SCSI_ERR_FATAL; 16310 si.ssi_pfa_flag = FALSE; 16311 16312 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16313 sd_return_failed_command(un, bp, EIO); 16314 } 16315 16316 16317 16318 16319 /* 16320 * Function: sd_sense_key_aborted_command 16321 * 16322 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16323 * 16324 * Context: May be called from interrupt context 16325 */ 16326 16327 static void 16328 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16329 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16330 { 16331 struct sd_sense_info si; 16332 16333 ASSERT(un != NULL); 16334 ASSERT(mutex_owned(SD_MUTEX(un))); 16335 ASSERT(bp != NULL); 16336 ASSERT(xp != NULL); 16337 ASSERT(pktp != NULL); 16338 16339 si.ssi_severity = SCSI_ERR_FATAL; 16340 si.ssi_pfa_flag = FALSE; 16341 16342 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16343 16344 /* 16345 * This really ought to be a fatal error, but we will retry anyway 16346 * as some drives report this as a spurious error. 16347 */ 16348 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16349 &si, EIO, (clock_t)0, NULL); 16350 } 16351 16352 16353 16354 /* 16355 * Function: sd_sense_key_default 16356 * 16357 * Description: Default recovery action for several SCSI sense keys (basically 16358 * attempts a retry). 16359 * 16360 * Context: May be called from interrupt context 16361 */ 16362 16363 static void 16364 sd_sense_key_default(struct sd_lun *un, 16365 uint8_t *sense_datap, 16366 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16367 { 16368 struct sd_sense_info si; 16369 uint8_t sense_key = scsi_sense_key(sense_datap); 16370 16371 ASSERT(un != NULL); 16372 ASSERT(mutex_owned(SD_MUTEX(un))); 16373 ASSERT(bp != NULL); 16374 ASSERT(xp != NULL); 16375 ASSERT(pktp != NULL); 16376 16377 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16378 16379 /* 16380 * Undecoded sense key. Attempt retries and hope that will fix 16381 * the problem. Otherwise, we're dead. 16382 */ 16383 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16385 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16386 } 16387 16388 si.ssi_severity = SCSI_ERR_FATAL; 16389 si.ssi_pfa_flag = FALSE; 16390 16391 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16392 &si, EIO, (clock_t)0, NULL); 16393 } 16394 16395 16396 16397 /* 16398 * Function: sd_print_retry_msg 16399 * 16400 * Description: Print a message indicating the retry action being taken. 16401 * 16402 * Arguments: un - ptr to associated softstate 16403 * bp - ptr to buf(9S) for the command 16404 * arg - not used. 16405 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16406 * or SD_NO_RETRY_ISSUED 16407 * 16408 * Context: May be called from interrupt context 16409 */ 16410 /* ARGSUSED */ 16411 static void 16412 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16413 { 16414 struct sd_xbuf *xp; 16415 struct scsi_pkt *pktp; 16416 char *reasonp; 16417 char *msgp; 16418 16419 ASSERT(un != NULL); 16420 ASSERT(mutex_owned(SD_MUTEX(un))); 16421 ASSERT(bp != NULL); 16422 pktp = SD_GET_PKTP(bp); 16423 ASSERT(pktp != NULL); 16424 xp = SD_GET_XBUF(bp); 16425 ASSERT(xp != NULL); 16426 16427 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16428 mutex_enter(&un->un_pm_mutex); 16429 if ((un->un_state == SD_STATE_SUSPENDED) || 16430 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16431 (pktp->pkt_flags & FLAG_SILENT)) { 16432 mutex_exit(&un->un_pm_mutex); 16433 goto update_pkt_reason; 16434 } 16435 mutex_exit(&un->un_pm_mutex); 16436 16437 /* 16438 * Suppress messages if they are all the same pkt_reason; with 16439 * TQ, many (up to 256) are returned with the same pkt_reason. 16440 * If we are in panic, then suppress the retry messages. 16441 */ 16442 switch (flag) { 16443 case SD_NO_RETRY_ISSUED: 16444 msgp = "giving up"; 16445 break; 16446 case SD_IMMEDIATE_RETRY_ISSUED: 16447 case SD_DELAYED_RETRY_ISSUED: 16448 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16449 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16450 (sd_error_level != SCSI_ERR_ALL))) { 16451 return; 16452 } 16453 msgp = "retrying command"; 16454 break; 16455 default: 16456 goto update_pkt_reason; 16457 } 16458 16459 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16460 scsi_rname(pktp->pkt_reason)); 16461 16462 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16463 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16464 16465 update_pkt_reason: 16466 /* 16467 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16468 * This is to prevent multiple console messages for the same failure 16469 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16470 * when the command is retried successfully because there still may be 16471 * more commands coming back with the same value of pktp->pkt_reason. 16472 */ 16473 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16474 un->un_last_pkt_reason = pktp->pkt_reason; 16475 } 16476 } 16477 16478 16479 /* 16480 * Function: sd_print_cmd_incomplete_msg 16481 * 16482 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16483 * 16484 * Arguments: un - ptr to associated softstate 16485 * bp - ptr to buf(9S) for the command 16486 * arg - passed to sd_print_retry_msg() 16487 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16488 * or SD_NO_RETRY_ISSUED 16489 * 16490 * Context: May be called from interrupt context 16491 */ 16492 16493 static void 16494 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16495 int code) 16496 { 16497 dev_info_t *dip; 16498 16499 ASSERT(un != NULL); 16500 ASSERT(mutex_owned(SD_MUTEX(un))); 16501 ASSERT(bp != NULL); 16502 16503 switch (code) { 16504 case SD_NO_RETRY_ISSUED: 16505 /* Command was failed. Someone turned off this target? */ 16506 if (un->un_state != SD_STATE_OFFLINE) { 16507 /* 16508 * Suppress message if we are detaching and 16509 * device has been disconnected 16510 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16511 * private interface and not part of the DDI 16512 */ 16513 dip = un->un_sd->sd_dev; 16514 if (!(DEVI_IS_DETACHING(dip) && 16515 DEVI_IS_DEVICE_REMOVED(dip))) { 16516 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16517 "disk not responding to selection\n"); 16518 } 16519 New_state(un, SD_STATE_OFFLINE); 16520 } 16521 break; 16522 16523 case SD_DELAYED_RETRY_ISSUED: 16524 case SD_IMMEDIATE_RETRY_ISSUED: 16525 default: 16526 /* Command was successfully queued for retry */ 16527 sd_print_retry_msg(un, bp, arg, code); 16528 break; 16529 } 16530 } 16531 16532 16533 /* 16534 * Function: sd_pkt_reason_cmd_incomplete 16535 * 16536 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16537 * 16538 * Context: May be called from interrupt context 16539 */ 16540 16541 static void 16542 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16543 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16544 { 16545 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16546 16547 ASSERT(un != NULL); 16548 ASSERT(mutex_owned(SD_MUTEX(un))); 16549 ASSERT(bp != NULL); 16550 ASSERT(xp != NULL); 16551 ASSERT(pktp != NULL); 16552 16553 /* Do not do a reset if selection did not complete */ 16554 /* Note: Should this not just check the bit? */ 16555 if (pktp->pkt_state != STATE_GOT_BUS) { 16556 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16557 sd_reset_target(un, pktp); 16558 } 16559 16560 /* 16561 * If the target was not successfully selected, then set 16562 * SD_RETRIES_FAILFAST to indicate that we lost communication 16563 * with the target, and further retries and/or commands are 16564 * likely to take a long time. 16565 */ 16566 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16567 flag |= SD_RETRIES_FAILFAST; 16568 } 16569 16570 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16571 16572 sd_retry_command(un, bp, flag, 16573 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16574 } 16575 16576 16577 16578 /* 16579 * Function: sd_pkt_reason_cmd_tran_err 16580 * 16581 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16582 * 16583 * Context: May be called from interrupt context 16584 */ 16585 16586 static void 16587 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16588 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16589 { 16590 ASSERT(un != NULL); 16591 ASSERT(mutex_owned(SD_MUTEX(un))); 16592 ASSERT(bp != NULL); 16593 ASSERT(xp != NULL); 16594 ASSERT(pktp != NULL); 16595 16596 /* 16597 * Do not reset if we got a parity error, or if 16598 * selection did not complete. 16599 */ 16600 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16601 /* Note: Should this not just check the bit for pkt_state? */ 16602 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16603 (pktp->pkt_state != STATE_GOT_BUS)) { 16604 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16605 sd_reset_target(un, pktp); 16606 } 16607 16608 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16609 16610 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16611 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16612 } 16613 16614 16615 16616 /* 16617 * Function: sd_pkt_reason_cmd_reset 16618 * 16619 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16620 * 16621 * Context: May be called from interrupt context 16622 */ 16623 16624 static void 16625 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16626 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16627 { 16628 ASSERT(un != NULL); 16629 ASSERT(mutex_owned(SD_MUTEX(un))); 16630 ASSERT(bp != NULL); 16631 ASSERT(xp != NULL); 16632 ASSERT(pktp != NULL); 16633 16634 /* The target may still be running the command, so try to reset. */ 16635 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16636 sd_reset_target(un, pktp); 16637 16638 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16639 16640 /* 16641 * If pkt_reason is CMD_RESET chances are that this pkt got 16642 * reset because another target on this bus caused it. The target 16643 * that caused it should get CMD_TIMEOUT with pkt_statistics 16644 * of STAT_TIMEOUT/STAT_DEV_RESET. 16645 */ 16646 16647 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16648 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16649 } 16650 16651 16652 16653 16654 /* 16655 * Function: sd_pkt_reason_cmd_aborted 16656 * 16657 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16658 * 16659 * Context: May be called from interrupt context 16660 */ 16661 16662 static void 16663 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16664 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16665 { 16666 ASSERT(un != NULL); 16667 ASSERT(mutex_owned(SD_MUTEX(un))); 16668 ASSERT(bp != NULL); 16669 ASSERT(xp != NULL); 16670 ASSERT(pktp != NULL); 16671 16672 /* The target may still be running the command, so try to reset. */ 16673 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16674 sd_reset_target(un, pktp); 16675 16676 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16677 16678 /* 16679 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16680 * aborted because another target on this bus caused it. The target 16681 * that caused it should get CMD_TIMEOUT with pkt_statistics 16682 * of STAT_TIMEOUT/STAT_DEV_RESET. 16683 */ 16684 16685 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16686 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16687 } 16688 16689 16690 16691 /* 16692 * Function: sd_pkt_reason_cmd_timeout 16693 * 16694 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16695 * 16696 * Context: May be called from interrupt context 16697 */ 16698 16699 static void 16700 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16701 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16702 { 16703 ASSERT(un != NULL); 16704 ASSERT(mutex_owned(SD_MUTEX(un))); 16705 ASSERT(bp != NULL); 16706 ASSERT(xp != NULL); 16707 ASSERT(pktp != NULL); 16708 16709 16710 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16711 sd_reset_target(un, pktp); 16712 16713 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16714 16715 /* 16716 * A command timeout indicates that we could not establish 16717 * communication with the target, so set SD_RETRIES_FAILFAST 16718 * as further retries/commands are likely to take a long time. 16719 */ 16720 sd_retry_command(un, bp, 16721 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16722 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16723 } 16724 16725 16726 16727 /* 16728 * Function: sd_pkt_reason_cmd_unx_bus_free 16729 * 16730 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16731 * 16732 * Context: May be called from interrupt context 16733 */ 16734 16735 static void 16736 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16737 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16738 { 16739 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16740 16741 ASSERT(un != NULL); 16742 ASSERT(mutex_owned(SD_MUTEX(un))); 16743 ASSERT(bp != NULL); 16744 ASSERT(xp != NULL); 16745 ASSERT(pktp != NULL); 16746 16747 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16748 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16749 16750 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16751 sd_print_retry_msg : NULL; 16752 16753 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16754 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16755 } 16756 16757 16758 /* 16759 * Function: sd_pkt_reason_cmd_tag_reject 16760 * 16761 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16762 * 16763 * Context: May be called from interrupt context 16764 */ 16765 16766 static void 16767 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16768 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16769 { 16770 ASSERT(un != NULL); 16771 ASSERT(mutex_owned(SD_MUTEX(un))); 16772 ASSERT(bp != NULL); 16773 ASSERT(xp != NULL); 16774 ASSERT(pktp != NULL); 16775 16776 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16777 pktp->pkt_flags = 0; 16778 un->un_tagflags = 0; 16779 if (un->un_f_opt_queueing == TRUE) { 16780 un->un_throttle = min(un->un_throttle, 3); 16781 } else { 16782 un->un_throttle = 1; 16783 } 16784 mutex_exit(SD_MUTEX(un)); 16785 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16786 mutex_enter(SD_MUTEX(un)); 16787 16788 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16789 16790 /* Legacy behavior not to check retry counts here. */ 16791 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16792 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16793 } 16794 16795 16796 /* 16797 * Function: sd_pkt_reason_default 16798 * 16799 * Description: Default recovery actions for SCSA pkt_reason values that 16800 * do not have more explicit recovery actions. 16801 * 16802 * Context: May be called from interrupt context 16803 */ 16804 16805 static void 16806 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16807 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16808 { 16809 ASSERT(un != NULL); 16810 ASSERT(mutex_owned(SD_MUTEX(un))); 16811 ASSERT(bp != NULL); 16812 ASSERT(xp != NULL); 16813 ASSERT(pktp != NULL); 16814 16815 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16816 sd_reset_target(un, pktp); 16817 16818 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16819 16820 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16821 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16822 } 16823 16824 16825 16826 /* 16827 * Function: sd_pkt_status_check_condition 16828 * 16829 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16830 * 16831 * Context: May be called from interrupt context 16832 */ 16833 16834 static void 16835 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16836 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16837 { 16838 ASSERT(un != NULL); 16839 ASSERT(mutex_owned(SD_MUTEX(un))); 16840 ASSERT(bp != NULL); 16841 ASSERT(xp != NULL); 16842 ASSERT(pktp != NULL); 16843 16844 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16845 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16846 16847 /* 16848 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16849 * command will be retried after the request sense). Otherwise, retry 16850 * the command. Note: we are issuing the request sense even though the 16851 * retry limit may have been reached for the failed command. 16852 */ 16853 if (un->un_f_arq_enabled == FALSE) { 16854 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16855 "no ARQ, sending request sense command\n"); 16856 sd_send_request_sense_command(un, bp, pktp); 16857 } else { 16858 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16859 "ARQ,retrying request sense command\n"); 16860 #if defined(__i386) || defined(__amd64) 16861 /* 16862 * The SD_RETRY_DELAY value need to be adjusted here 16863 * when SD_RETRY_DELAY change in sddef.h 16864 */ 16865 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16866 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16867 NULL); 16868 #else 16869 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16870 EIO, SD_RETRY_DELAY, NULL); 16871 #endif 16872 } 16873 16874 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16875 } 16876 16877 16878 /* 16879 * Function: sd_pkt_status_busy 16880 * 16881 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16882 * 16883 * Context: May be called from interrupt context 16884 */ 16885 16886 static void 16887 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16888 struct scsi_pkt *pktp) 16889 { 16890 ASSERT(un != NULL); 16891 ASSERT(mutex_owned(SD_MUTEX(un))); 16892 ASSERT(bp != NULL); 16893 ASSERT(xp != NULL); 16894 ASSERT(pktp != NULL); 16895 16896 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16897 "sd_pkt_status_busy: entry\n"); 16898 16899 /* If retries are exhausted, just fail the command. */ 16900 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16901 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16902 "device busy too long\n"); 16903 sd_return_failed_command(un, bp, EIO); 16904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16905 "sd_pkt_status_busy: exit\n"); 16906 return; 16907 } 16908 xp->xb_retry_count++; 16909 16910 /* 16911 * Try to reset the target. However, we do not want to perform 16912 * more than one reset if the device continues to fail. The reset 16913 * will be performed when the retry count reaches the reset 16914 * threshold. This threshold should be set such that at least 16915 * one retry is issued before the reset is performed. 16916 */ 16917 if (xp->xb_retry_count == 16918 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16919 int rval = 0; 16920 mutex_exit(SD_MUTEX(un)); 16921 if (un->un_f_allow_bus_device_reset == TRUE) { 16922 /* 16923 * First try to reset the LUN; if we cannot then 16924 * try to reset the target. 16925 */ 16926 if (un->un_f_lun_reset_enabled == TRUE) { 16927 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16928 "sd_pkt_status_busy: RESET_LUN\n"); 16929 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16930 } 16931 if (rval == 0) { 16932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16933 "sd_pkt_status_busy: RESET_TARGET\n"); 16934 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16935 } 16936 } 16937 if (rval == 0) { 16938 /* 16939 * If the RESET_LUN and/or RESET_TARGET failed, 16940 * try RESET_ALL 16941 */ 16942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16943 "sd_pkt_status_busy: RESET_ALL\n"); 16944 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 16945 } 16946 mutex_enter(SD_MUTEX(un)); 16947 if (rval == 0) { 16948 /* 16949 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 16950 * At this point we give up & fail the command. 16951 */ 16952 sd_return_failed_command(un, bp, EIO); 16953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16954 "sd_pkt_status_busy: exit (failed cmd)\n"); 16955 return; 16956 } 16957 } 16958 16959 /* 16960 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 16961 * we have already checked the retry counts above. 16962 */ 16963 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 16964 EIO, SD_BSY_TIMEOUT, NULL); 16965 16966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16967 "sd_pkt_status_busy: exit\n"); 16968 } 16969 16970 16971 /* 16972 * Function: sd_pkt_status_reservation_conflict 16973 * 16974 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 16975 * command status. 16976 * 16977 * Context: May be called from interrupt context 16978 */ 16979 16980 static void 16981 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 16982 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16983 { 16984 ASSERT(un != NULL); 16985 ASSERT(mutex_owned(SD_MUTEX(un))); 16986 ASSERT(bp != NULL); 16987 ASSERT(xp != NULL); 16988 ASSERT(pktp != NULL); 16989 16990 /* 16991 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 16992 * conflict could be due to various reasons like incorrect keys, not 16993 * registered or not reserved etc. So, we return EACCES to the caller. 16994 */ 16995 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 16996 int cmd = SD_GET_PKT_OPCODE(pktp); 16997 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 16998 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 16999 sd_return_failed_command(un, bp, EACCES); 17000 return; 17001 } 17002 } 17003 17004 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17005 17006 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17007 if (sd_failfast_enable != 0) { 17008 /* By definition, we must panic here.... */ 17009 sd_panic_for_res_conflict(un); 17010 /*NOTREACHED*/ 17011 } 17012 SD_ERROR(SD_LOG_IO, un, 17013 "sd_handle_resv_conflict: Disk Reserved\n"); 17014 sd_return_failed_command(un, bp, EACCES); 17015 return; 17016 } 17017 17018 /* 17019 * 1147670: retry only if sd_retry_on_reservation_conflict 17020 * property is set (default is 1). Retries will not succeed 17021 * on a disk reserved by another initiator. HA systems 17022 * may reset this via sd.conf to avoid these retries. 17023 * 17024 * Note: The legacy return code for this failure is EIO, however EACCES 17025 * seems more appropriate for a reservation conflict. 17026 */ 17027 if (sd_retry_on_reservation_conflict == 0) { 17028 SD_ERROR(SD_LOG_IO, un, 17029 "sd_handle_resv_conflict: Device Reserved\n"); 17030 sd_return_failed_command(un, bp, EIO); 17031 return; 17032 } 17033 17034 /* 17035 * Retry the command if we can. 17036 * 17037 * Note: The legacy return code for this failure is EIO, however EACCES 17038 * seems more appropriate for a reservation conflict. 17039 */ 17040 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17041 (clock_t)2, NULL); 17042 } 17043 17044 17045 17046 /* 17047 * Function: sd_pkt_status_qfull 17048 * 17049 * Description: Handle a QUEUE FULL condition from the target. This can 17050 * occur if the HBA does not handle the queue full condition. 17051 * (Basically this means third-party HBAs as Sun HBAs will 17052 * handle the queue full condition.) Note that if there are 17053 * some commands already in the transport, then the queue full 17054 * has occurred because the queue for this nexus is actually 17055 * full. If there are no commands in the transport, then the 17056 * queue full is resulting from some other initiator or lun 17057 * consuming all the resources at the target. 17058 * 17059 * Context: May be called from interrupt context 17060 */ 17061 17062 static void 17063 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17064 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17065 { 17066 ASSERT(un != NULL); 17067 ASSERT(mutex_owned(SD_MUTEX(un))); 17068 ASSERT(bp != NULL); 17069 ASSERT(xp != NULL); 17070 ASSERT(pktp != NULL); 17071 17072 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17073 "sd_pkt_status_qfull: entry\n"); 17074 17075 /* 17076 * Just lower the QFULL throttle and retry the command. Note that 17077 * we do not limit the number of retries here. 17078 */ 17079 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17080 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17081 SD_RESTART_TIMEOUT, NULL); 17082 17083 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17084 "sd_pkt_status_qfull: exit\n"); 17085 } 17086 17087 17088 /* 17089 * Function: sd_reset_target 17090 * 17091 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17092 * RESET_TARGET, or RESET_ALL. 17093 * 17094 * Context: May be called under interrupt context. 17095 */ 17096 17097 static void 17098 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17099 { 17100 int rval = 0; 17101 17102 ASSERT(un != NULL); 17103 ASSERT(mutex_owned(SD_MUTEX(un))); 17104 ASSERT(pktp != NULL); 17105 17106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17107 17108 /* 17109 * No need to reset if the transport layer has already done so. 17110 */ 17111 if ((pktp->pkt_statistics & 17112 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17114 "sd_reset_target: no reset\n"); 17115 return; 17116 } 17117 17118 mutex_exit(SD_MUTEX(un)); 17119 17120 if (un->un_f_allow_bus_device_reset == TRUE) { 17121 if (un->un_f_lun_reset_enabled == TRUE) { 17122 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17123 "sd_reset_target: RESET_LUN\n"); 17124 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17125 } 17126 if (rval == 0) { 17127 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17128 "sd_reset_target: RESET_TARGET\n"); 17129 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17130 } 17131 } 17132 17133 if (rval == 0) { 17134 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17135 "sd_reset_target: RESET_ALL\n"); 17136 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17137 } 17138 17139 mutex_enter(SD_MUTEX(un)); 17140 17141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17142 } 17143 17144 17145 /* 17146 * Function: sd_media_change_task 17147 * 17148 * Description: Recovery action for CDROM to become available. 17149 * 17150 * Context: Executes in a taskq() thread context 17151 */ 17152 17153 static void 17154 sd_media_change_task(void *arg) 17155 { 17156 struct scsi_pkt *pktp = arg; 17157 struct sd_lun *un; 17158 struct buf *bp; 17159 struct sd_xbuf *xp; 17160 int err = 0; 17161 int retry_count = 0; 17162 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17163 struct sd_sense_info si; 17164 17165 ASSERT(pktp != NULL); 17166 bp = (struct buf *)pktp->pkt_private; 17167 ASSERT(bp != NULL); 17168 xp = SD_GET_XBUF(bp); 17169 ASSERT(xp != NULL); 17170 un = SD_GET_UN(bp); 17171 ASSERT(un != NULL); 17172 ASSERT(!mutex_owned(SD_MUTEX(un))); 17173 ASSERT(un->un_f_monitor_media_state); 17174 17175 si.ssi_severity = SCSI_ERR_INFO; 17176 si.ssi_pfa_flag = FALSE; 17177 17178 /* 17179 * When a reset is issued on a CDROM, it takes a long time to 17180 * recover. First few attempts to read capacity and other things 17181 * related to handling unit attention fail (with a ASC 0x4 and 17182 * ASCQ 0x1). In that case we want to do enough retries and we want 17183 * to limit the retries in other cases of genuine failures like 17184 * no media in drive. 17185 */ 17186 while (retry_count++ < retry_limit) { 17187 if ((err = sd_handle_mchange(un)) == 0) { 17188 break; 17189 } 17190 if (err == EAGAIN) { 17191 retry_limit = SD_UNIT_ATTENTION_RETRY; 17192 } 17193 /* Sleep for 0.5 sec. & try again */ 17194 delay(drv_usectohz(500000)); 17195 } 17196 17197 /* 17198 * Dispatch (retry or fail) the original command here, 17199 * along with appropriate console messages.... 17200 * 17201 * Must grab the mutex before calling sd_retry_command, 17202 * sd_print_sense_msg and sd_return_failed_command. 17203 */ 17204 mutex_enter(SD_MUTEX(un)); 17205 if (err != SD_CMD_SUCCESS) { 17206 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17207 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17208 si.ssi_severity = SCSI_ERR_FATAL; 17209 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17210 sd_return_failed_command(un, bp, EIO); 17211 } else { 17212 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17213 &si, EIO, (clock_t)0, NULL); 17214 } 17215 mutex_exit(SD_MUTEX(un)); 17216 } 17217 17218 17219 17220 /* 17221 * Function: sd_handle_mchange 17222 * 17223 * Description: Perform geometry validation & other recovery when CDROM 17224 * has been removed from drive. 17225 * 17226 * Return Code: 0 for success 17227 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17228 * sd_send_scsi_READ_CAPACITY() 17229 * 17230 * Context: Executes in a taskq() thread context 17231 */ 17232 17233 static int 17234 sd_handle_mchange(struct sd_lun *un) 17235 { 17236 uint64_t capacity; 17237 uint32_t lbasize; 17238 int rval; 17239 17240 ASSERT(!mutex_owned(SD_MUTEX(un))); 17241 ASSERT(un->un_f_monitor_media_state); 17242 17243 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17244 SD_PATH_DIRECT_PRIORITY)) != 0) { 17245 return (rval); 17246 } 17247 17248 mutex_enter(SD_MUTEX(un)); 17249 sd_update_block_info(un, lbasize, capacity); 17250 17251 if (un->un_errstats != NULL) { 17252 struct sd_errstats *stp = 17253 (struct sd_errstats *)un->un_errstats->ks_data; 17254 stp->sd_capacity.value.ui64 = (uint64_t) 17255 ((uint64_t)un->un_blockcount * 17256 (uint64_t)un->un_tgt_blocksize); 17257 } 17258 17259 17260 /* 17261 * Check if the media in the device is writable or not 17262 */ 17263 if (ISCD(un)) 17264 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17265 17266 /* 17267 * Note: Maybe let the strategy/partitioning chain worry about getting 17268 * valid geometry. 17269 */ 17270 mutex_exit(SD_MUTEX(un)); 17271 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17272 17273 17274 if (cmlb_validate(un->un_cmlbhandle, 0, 17275 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17276 return (EIO); 17277 } else { 17278 if (un->un_f_pkstats_enabled) { 17279 sd_set_pstats(un); 17280 SD_TRACE(SD_LOG_IO_PARTITION, un, 17281 "sd_handle_mchange: un:0x%p pstats created and " 17282 "set\n", un); 17283 } 17284 } 17285 17286 17287 /* 17288 * Try to lock the door 17289 */ 17290 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17291 SD_PATH_DIRECT_PRIORITY)); 17292 } 17293 17294 17295 /* 17296 * Function: sd_send_scsi_DOORLOCK 17297 * 17298 * Description: Issue the scsi DOOR LOCK command 17299 * 17300 * Arguments: un - pointer to driver soft state (unit) structure for 17301 * this target. 17302 * flag - SD_REMOVAL_ALLOW 17303 * SD_REMOVAL_PREVENT 17304 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17305 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17306 * to use the USCSI "direct" chain and bypass the normal 17307 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17308 * command is issued as part of an error recovery action. 17309 * 17310 * Return Code: 0 - Success 17311 * errno return code from sd_send_scsi_cmd() 17312 * 17313 * Context: Can sleep. 17314 */ 17315 17316 static int 17317 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17318 { 17319 union scsi_cdb cdb; 17320 struct uscsi_cmd ucmd_buf; 17321 struct scsi_extended_sense sense_buf; 17322 int status; 17323 17324 ASSERT(un != NULL); 17325 ASSERT(!mutex_owned(SD_MUTEX(un))); 17326 17327 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17328 17329 /* already determined doorlock is not supported, fake success */ 17330 if (un->un_f_doorlock_supported == FALSE) { 17331 return (0); 17332 } 17333 17334 /* 17335 * If we are ejecting and see an SD_REMOVAL_PREVENT 17336 * ignore the command so we can complete the eject 17337 * operation. 17338 */ 17339 if (flag == SD_REMOVAL_PREVENT) { 17340 mutex_enter(SD_MUTEX(un)); 17341 if (un->un_f_ejecting == TRUE) { 17342 mutex_exit(SD_MUTEX(un)); 17343 return (EAGAIN); 17344 } 17345 mutex_exit(SD_MUTEX(un)); 17346 } 17347 17348 bzero(&cdb, sizeof (cdb)); 17349 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17350 17351 cdb.scc_cmd = SCMD_DOORLOCK; 17352 cdb.cdb_opaque[4] = (uchar_t)flag; 17353 17354 ucmd_buf.uscsi_cdb = (char *)&cdb; 17355 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17356 ucmd_buf.uscsi_bufaddr = NULL; 17357 ucmd_buf.uscsi_buflen = 0; 17358 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17359 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17360 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17361 ucmd_buf.uscsi_timeout = 15; 17362 17363 SD_TRACE(SD_LOG_IO, un, 17364 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17365 17366 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17367 UIO_SYSSPACE, path_flag); 17368 17369 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17370 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17371 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17372 /* fake success and skip subsequent doorlock commands */ 17373 un->un_f_doorlock_supported = FALSE; 17374 return (0); 17375 } 17376 17377 return (status); 17378 } 17379 17380 /* 17381 * Function: sd_send_scsi_READ_CAPACITY 17382 * 17383 * Description: This routine uses the scsi READ CAPACITY command to determine 17384 * the device capacity in number of blocks and the device native 17385 * block size. If this function returns a failure, then the 17386 * values in *capp and *lbap are undefined. If the capacity 17387 * returned is 0xffffffff then the lun is too large for a 17388 * normal READ CAPACITY command and the results of a 17389 * READ CAPACITY 16 will be used instead. 17390 * 17391 * Arguments: un - ptr to soft state struct for the target 17392 * capp - ptr to unsigned 64-bit variable to receive the 17393 * capacity value from the command. 17394 * lbap - ptr to unsigned 32-bit varaible to receive the 17395 * block size value from the command 17396 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17397 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17398 * to use the USCSI "direct" chain and bypass the normal 17399 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17400 * command is issued as part of an error recovery action. 17401 * 17402 * Return Code: 0 - Success 17403 * EIO - IO error 17404 * EACCES - Reservation conflict detected 17405 * EAGAIN - Device is becoming ready 17406 * errno return code from sd_send_scsi_cmd() 17407 * 17408 * Context: Can sleep. Blocks until command completes. 17409 */ 17410 17411 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17412 17413 static int 17414 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17415 int path_flag) 17416 { 17417 struct scsi_extended_sense sense_buf; 17418 struct uscsi_cmd ucmd_buf; 17419 union scsi_cdb cdb; 17420 uint32_t *capacity_buf; 17421 uint64_t capacity; 17422 uint32_t lbasize; 17423 int status; 17424 17425 ASSERT(un != NULL); 17426 ASSERT(!mutex_owned(SD_MUTEX(un))); 17427 ASSERT(capp != NULL); 17428 ASSERT(lbap != NULL); 17429 17430 SD_TRACE(SD_LOG_IO, un, 17431 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17432 17433 /* 17434 * First send a READ_CAPACITY command to the target. 17435 * (This command is mandatory under SCSI-2.) 17436 * 17437 * Set up the CDB for the READ_CAPACITY command. The Partial 17438 * Medium Indicator bit is cleared. The address field must be 17439 * zero if the PMI bit is zero. 17440 */ 17441 bzero(&cdb, sizeof (cdb)); 17442 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17443 17444 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17445 17446 cdb.scc_cmd = SCMD_READ_CAPACITY; 17447 17448 ucmd_buf.uscsi_cdb = (char *)&cdb; 17449 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17450 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17451 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17452 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17453 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17454 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17455 ucmd_buf.uscsi_timeout = 60; 17456 17457 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17458 UIO_SYSSPACE, path_flag); 17459 17460 switch (status) { 17461 case 0: 17462 /* Return failure if we did not get valid capacity data. */ 17463 if (ucmd_buf.uscsi_resid != 0) { 17464 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17465 return (EIO); 17466 } 17467 17468 /* 17469 * Read capacity and block size from the READ CAPACITY 10 data. 17470 * This data may be adjusted later due to device specific 17471 * issues. 17472 * 17473 * According to the SCSI spec, the READ CAPACITY 10 17474 * command returns the following: 17475 * 17476 * bytes 0-3: Maximum logical block address available. 17477 * (MSB in byte:0 & LSB in byte:3) 17478 * 17479 * bytes 4-7: Block length in bytes 17480 * (MSB in byte:4 & LSB in byte:7) 17481 * 17482 */ 17483 capacity = BE_32(capacity_buf[0]); 17484 lbasize = BE_32(capacity_buf[1]); 17485 17486 /* 17487 * Done with capacity_buf 17488 */ 17489 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17490 17491 /* 17492 * if the reported capacity is set to all 0xf's, then 17493 * this disk is too large and requires SBC-2 commands. 17494 * Reissue the request using READ CAPACITY 16. 17495 */ 17496 if (capacity == 0xffffffff) { 17497 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17498 &lbasize, path_flag); 17499 if (status != 0) { 17500 return (status); 17501 } 17502 } 17503 break; /* Success! */ 17504 case EIO: 17505 switch (ucmd_buf.uscsi_status) { 17506 case STATUS_RESERVATION_CONFLICT: 17507 status = EACCES; 17508 break; 17509 case STATUS_CHECK: 17510 /* 17511 * Check condition; look for ASC/ASCQ of 0x04/0x01 17512 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17513 */ 17514 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17515 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17516 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17517 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17518 return (EAGAIN); 17519 } 17520 break; 17521 default: 17522 break; 17523 } 17524 /* FALLTHRU */ 17525 default: 17526 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17527 return (status); 17528 } 17529 17530 /* 17531 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17532 * (2352 and 0 are common) so for these devices always force the value 17533 * to 2048 as required by the ATAPI specs. 17534 */ 17535 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17536 lbasize = 2048; 17537 } 17538 17539 /* 17540 * Get the maximum LBA value from the READ CAPACITY data. 17541 * Here we assume that the Partial Medium Indicator (PMI) bit 17542 * was cleared when issuing the command. This means that the LBA 17543 * returned from the device is the LBA of the last logical block 17544 * on the logical unit. The actual logical block count will be 17545 * this value plus one. 17546 * 17547 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17548 * so scale the capacity value to reflect this. 17549 */ 17550 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17551 17552 /* 17553 * Copy the values from the READ CAPACITY command into the space 17554 * provided by the caller. 17555 */ 17556 *capp = capacity; 17557 *lbap = lbasize; 17558 17559 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17560 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17561 17562 /* 17563 * Both the lbasize and capacity from the device must be nonzero, 17564 * otherwise we assume that the values are not valid and return 17565 * failure to the caller. (4203735) 17566 */ 17567 if ((capacity == 0) || (lbasize == 0)) { 17568 return (EIO); 17569 } 17570 17571 return (0); 17572 } 17573 17574 /* 17575 * Function: sd_send_scsi_READ_CAPACITY_16 17576 * 17577 * Description: This routine uses the scsi READ CAPACITY 16 command to 17578 * determine the device capacity in number of blocks and the 17579 * device native block size. If this function returns a failure, 17580 * then the values in *capp and *lbap are undefined. 17581 * This routine should always be called by 17582 * sd_send_scsi_READ_CAPACITY which will appy any device 17583 * specific adjustments to capacity and lbasize. 17584 * 17585 * Arguments: un - ptr to soft state struct for the target 17586 * capp - ptr to unsigned 64-bit variable to receive the 17587 * capacity value from the command. 17588 * lbap - ptr to unsigned 32-bit varaible to receive the 17589 * block size value from the command 17590 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17591 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17592 * to use the USCSI "direct" chain and bypass the normal 17593 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17594 * this command is issued as part of an error recovery 17595 * action. 17596 * 17597 * Return Code: 0 - Success 17598 * EIO - IO error 17599 * EACCES - Reservation conflict detected 17600 * EAGAIN - Device is becoming ready 17601 * errno return code from sd_send_scsi_cmd() 17602 * 17603 * Context: Can sleep. Blocks until command completes. 17604 */ 17605 17606 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17607 17608 static int 17609 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17610 uint32_t *lbap, int path_flag) 17611 { 17612 struct scsi_extended_sense sense_buf; 17613 struct uscsi_cmd ucmd_buf; 17614 union scsi_cdb cdb; 17615 uint64_t *capacity16_buf; 17616 uint64_t capacity; 17617 uint32_t lbasize; 17618 int status; 17619 17620 ASSERT(un != NULL); 17621 ASSERT(!mutex_owned(SD_MUTEX(un))); 17622 ASSERT(capp != NULL); 17623 ASSERT(lbap != NULL); 17624 17625 SD_TRACE(SD_LOG_IO, un, 17626 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17627 17628 /* 17629 * First send a READ_CAPACITY_16 command to the target. 17630 * 17631 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17632 * Medium Indicator bit is cleared. The address field must be 17633 * zero if the PMI bit is zero. 17634 */ 17635 bzero(&cdb, sizeof (cdb)); 17636 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17637 17638 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17639 17640 ucmd_buf.uscsi_cdb = (char *)&cdb; 17641 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17642 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17643 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17644 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17645 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17646 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17647 ucmd_buf.uscsi_timeout = 60; 17648 17649 /* 17650 * Read Capacity (16) is a Service Action In command. One 17651 * command byte (0x9E) is overloaded for multiple operations, 17652 * with the second CDB byte specifying the desired operation 17653 */ 17654 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17655 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17656 17657 /* 17658 * Fill in allocation length field 17659 */ 17660 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17661 17662 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17663 UIO_SYSSPACE, path_flag); 17664 17665 switch (status) { 17666 case 0: 17667 /* Return failure if we did not get valid capacity data. */ 17668 if (ucmd_buf.uscsi_resid > 20) { 17669 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17670 return (EIO); 17671 } 17672 17673 /* 17674 * Read capacity and block size from the READ CAPACITY 10 data. 17675 * This data may be adjusted later due to device specific 17676 * issues. 17677 * 17678 * According to the SCSI spec, the READ CAPACITY 10 17679 * command returns the following: 17680 * 17681 * bytes 0-7: Maximum logical block address available. 17682 * (MSB in byte:0 & LSB in byte:7) 17683 * 17684 * bytes 8-11: Block length in bytes 17685 * (MSB in byte:8 & LSB in byte:11) 17686 * 17687 */ 17688 capacity = BE_64(capacity16_buf[0]); 17689 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17690 17691 /* 17692 * Done with capacity16_buf 17693 */ 17694 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17695 17696 /* 17697 * if the reported capacity is set to all 0xf's, then 17698 * this disk is too large. This could only happen with 17699 * a device that supports LBAs larger than 64 bits which 17700 * are not defined by any current T10 standards. 17701 */ 17702 if (capacity == 0xffffffffffffffff) { 17703 return (EIO); 17704 } 17705 break; /* Success! */ 17706 case EIO: 17707 switch (ucmd_buf.uscsi_status) { 17708 case STATUS_RESERVATION_CONFLICT: 17709 status = EACCES; 17710 break; 17711 case STATUS_CHECK: 17712 /* 17713 * Check condition; look for ASC/ASCQ of 0x04/0x01 17714 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17715 */ 17716 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17717 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17718 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17719 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17720 return (EAGAIN); 17721 } 17722 break; 17723 default: 17724 break; 17725 } 17726 /* FALLTHRU */ 17727 default: 17728 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17729 return (status); 17730 } 17731 17732 *capp = capacity; 17733 *lbap = lbasize; 17734 17735 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17736 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17737 17738 return (0); 17739 } 17740 17741 17742 /* 17743 * Function: sd_send_scsi_START_STOP_UNIT 17744 * 17745 * Description: Issue a scsi START STOP UNIT command to the target. 17746 * 17747 * Arguments: un - pointer to driver soft state (unit) structure for 17748 * this target. 17749 * flag - SD_TARGET_START 17750 * SD_TARGET_STOP 17751 * SD_TARGET_EJECT 17752 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17753 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17754 * to use the USCSI "direct" chain and bypass the normal 17755 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17756 * command is issued as part of an error recovery action. 17757 * 17758 * Return Code: 0 - Success 17759 * EIO - IO error 17760 * EACCES - Reservation conflict detected 17761 * ENXIO - Not Ready, medium not present 17762 * errno return code from sd_send_scsi_cmd() 17763 * 17764 * Context: Can sleep. 17765 */ 17766 17767 static int 17768 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17769 { 17770 struct scsi_extended_sense sense_buf; 17771 union scsi_cdb cdb; 17772 struct uscsi_cmd ucmd_buf; 17773 int status; 17774 17775 ASSERT(un != NULL); 17776 ASSERT(!mutex_owned(SD_MUTEX(un))); 17777 17778 SD_TRACE(SD_LOG_IO, un, 17779 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17780 17781 if (un->un_f_check_start_stop && 17782 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17783 (un->un_f_start_stop_supported != TRUE)) { 17784 return (0); 17785 } 17786 17787 /* 17788 * If we are performing an eject operation and 17789 * we receive any command other than SD_TARGET_EJECT 17790 * we should immediately return. 17791 */ 17792 if (flag != SD_TARGET_EJECT) { 17793 mutex_enter(SD_MUTEX(un)); 17794 if (un->un_f_ejecting == TRUE) { 17795 mutex_exit(SD_MUTEX(un)); 17796 return (EAGAIN); 17797 } 17798 mutex_exit(SD_MUTEX(un)); 17799 } 17800 17801 bzero(&cdb, sizeof (cdb)); 17802 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17803 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17804 17805 cdb.scc_cmd = SCMD_START_STOP; 17806 cdb.cdb_opaque[4] = (uchar_t)flag; 17807 17808 ucmd_buf.uscsi_cdb = (char *)&cdb; 17809 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17810 ucmd_buf.uscsi_bufaddr = NULL; 17811 ucmd_buf.uscsi_buflen = 0; 17812 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17813 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17814 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17815 ucmd_buf.uscsi_timeout = 200; 17816 17817 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17818 UIO_SYSSPACE, path_flag); 17819 17820 switch (status) { 17821 case 0: 17822 break; /* Success! */ 17823 case EIO: 17824 switch (ucmd_buf.uscsi_status) { 17825 case STATUS_RESERVATION_CONFLICT: 17826 status = EACCES; 17827 break; 17828 case STATUS_CHECK: 17829 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17830 switch (scsi_sense_key( 17831 (uint8_t *)&sense_buf)) { 17832 case KEY_ILLEGAL_REQUEST: 17833 status = ENOTSUP; 17834 break; 17835 case KEY_NOT_READY: 17836 if (scsi_sense_asc( 17837 (uint8_t *)&sense_buf) 17838 == 0x3A) { 17839 status = ENXIO; 17840 } 17841 break; 17842 default: 17843 break; 17844 } 17845 } 17846 break; 17847 default: 17848 break; 17849 } 17850 break; 17851 default: 17852 break; 17853 } 17854 17855 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17856 17857 return (status); 17858 } 17859 17860 17861 /* 17862 * Function: sd_start_stop_unit_callback 17863 * 17864 * Description: timeout(9F) callback to begin recovery process for a 17865 * device that has spun down. 17866 * 17867 * Arguments: arg - pointer to associated softstate struct. 17868 * 17869 * Context: Executes in a timeout(9F) thread context 17870 */ 17871 17872 static void 17873 sd_start_stop_unit_callback(void *arg) 17874 { 17875 struct sd_lun *un = arg; 17876 ASSERT(un != NULL); 17877 ASSERT(!mutex_owned(SD_MUTEX(un))); 17878 17879 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17880 17881 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17882 } 17883 17884 17885 /* 17886 * Function: sd_start_stop_unit_task 17887 * 17888 * Description: Recovery procedure when a drive is spun down. 17889 * 17890 * Arguments: arg - pointer to associated softstate struct. 17891 * 17892 * Context: Executes in a taskq() thread context 17893 */ 17894 17895 static void 17896 sd_start_stop_unit_task(void *arg) 17897 { 17898 struct sd_lun *un = arg; 17899 17900 ASSERT(un != NULL); 17901 ASSERT(!mutex_owned(SD_MUTEX(un))); 17902 17903 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17904 17905 /* 17906 * Some unformatted drives report not ready error, no need to 17907 * restart if format has been initiated. 17908 */ 17909 mutex_enter(SD_MUTEX(un)); 17910 if (un->un_f_format_in_progress == TRUE) { 17911 mutex_exit(SD_MUTEX(un)); 17912 return; 17913 } 17914 mutex_exit(SD_MUTEX(un)); 17915 17916 /* 17917 * When a START STOP command is issued from here, it is part of a 17918 * failure recovery operation and must be issued before any other 17919 * commands, including any pending retries. Thus it must be sent 17920 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17921 * succeeds or not, we will start I/O after the attempt. 17922 */ 17923 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17924 SD_PATH_DIRECT_PRIORITY); 17925 17926 /* 17927 * The above call blocks until the START_STOP_UNIT command completes. 17928 * Now that it has completed, we must re-try the original IO that 17929 * received the NOT READY condition in the first place. There are 17930 * three possible conditions here: 17931 * 17932 * (1) The original IO is on un_retry_bp. 17933 * (2) The original IO is on the regular wait queue, and un_retry_bp 17934 * is NULL. 17935 * (3) The original IO is on the regular wait queue, and un_retry_bp 17936 * points to some other, unrelated bp. 17937 * 17938 * For each case, we must call sd_start_cmds() with un_retry_bp 17939 * as the argument. If un_retry_bp is NULL, this will initiate 17940 * processing of the regular wait queue. If un_retry_bp is not NULL, 17941 * then this will process the bp on un_retry_bp. That may or may not 17942 * be the original IO, but that does not matter: the important thing 17943 * is to keep the IO processing going at this point. 17944 * 17945 * Note: This is a very specific error recovery sequence associated 17946 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 17947 * serialize the I/O with completion of the spin-up. 17948 */ 17949 mutex_enter(SD_MUTEX(un)); 17950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17951 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 17952 un, un->un_retry_bp); 17953 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 17954 sd_start_cmds(un, un->un_retry_bp); 17955 mutex_exit(SD_MUTEX(un)); 17956 17957 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 17958 } 17959 17960 17961 /* 17962 * Function: sd_send_scsi_INQUIRY 17963 * 17964 * Description: Issue the scsi INQUIRY command. 17965 * 17966 * Arguments: un 17967 * bufaddr 17968 * buflen 17969 * evpd 17970 * page_code 17971 * page_length 17972 * 17973 * Return Code: 0 - Success 17974 * errno return code from sd_send_scsi_cmd() 17975 * 17976 * Context: Can sleep. Does not return until command is completed. 17977 */ 17978 17979 static int 17980 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 17981 uchar_t evpd, uchar_t page_code, size_t *residp) 17982 { 17983 union scsi_cdb cdb; 17984 struct uscsi_cmd ucmd_buf; 17985 int status; 17986 17987 ASSERT(un != NULL); 17988 ASSERT(!mutex_owned(SD_MUTEX(un))); 17989 ASSERT(bufaddr != NULL); 17990 17991 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 17992 17993 bzero(&cdb, sizeof (cdb)); 17994 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17995 bzero(bufaddr, buflen); 17996 17997 cdb.scc_cmd = SCMD_INQUIRY; 17998 cdb.cdb_opaque[1] = evpd; 17999 cdb.cdb_opaque[2] = page_code; 18000 FORMG0COUNT(&cdb, buflen); 18001 18002 ucmd_buf.uscsi_cdb = (char *)&cdb; 18003 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18004 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18005 ucmd_buf.uscsi_buflen = buflen; 18006 ucmd_buf.uscsi_rqbuf = NULL; 18007 ucmd_buf.uscsi_rqlen = 0; 18008 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18009 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18010 18011 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18012 UIO_SYSSPACE, SD_PATH_DIRECT); 18013 18014 if ((status == 0) && (residp != NULL)) { 18015 *residp = ucmd_buf.uscsi_resid; 18016 } 18017 18018 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18019 18020 return (status); 18021 } 18022 18023 18024 /* 18025 * Function: sd_send_scsi_TEST_UNIT_READY 18026 * 18027 * Description: Issue the scsi TEST UNIT READY command. 18028 * This routine can be told to set the flag USCSI_DIAGNOSE to 18029 * prevent retrying failed commands. Use this when the intent 18030 * is either to check for device readiness, to clear a Unit 18031 * Attention, or to clear any outstanding sense data. 18032 * However under specific conditions the expected behavior 18033 * is for retries to bring a device ready, so use the flag 18034 * with caution. 18035 * 18036 * Arguments: un 18037 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18038 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18039 * 0: dont check for media present, do retries on cmd. 18040 * 18041 * Return Code: 0 - Success 18042 * EIO - IO error 18043 * EACCES - Reservation conflict detected 18044 * ENXIO - Not Ready, medium not present 18045 * errno return code from sd_send_scsi_cmd() 18046 * 18047 * Context: Can sleep. Does not return until command is completed. 18048 */ 18049 18050 static int 18051 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18052 { 18053 struct scsi_extended_sense sense_buf; 18054 union scsi_cdb cdb; 18055 struct uscsi_cmd ucmd_buf; 18056 int status; 18057 18058 ASSERT(un != NULL); 18059 ASSERT(!mutex_owned(SD_MUTEX(un))); 18060 18061 SD_TRACE(SD_LOG_IO, un, 18062 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18063 18064 /* 18065 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18066 * timeouts when they receive a TUR and the queue is not empty. Check 18067 * the configuration flag set during attach (indicating the drive has 18068 * this firmware bug) and un_ncmds_in_transport before issuing the 18069 * TUR. If there are 18070 * pending commands return success, this is a bit arbitrary but is ok 18071 * for non-removables (i.e. the eliteI disks) and non-clustering 18072 * configurations. 18073 */ 18074 if (un->un_f_cfg_tur_check == TRUE) { 18075 mutex_enter(SD_MUTEX(un)); 18076 if (un->un_ncmds_in_transport != 0) { 18077 mutex_exit(SD_MUTEX(un)); 18078 return (0); 18079 } 18080 mutex_exit(SD_MUTEX(un)); 18081 } 18082 18083 bzero(&cdb, sizeof (cdb)); 18084 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18085 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18086 18087 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18088 18089 ucmd_buf.uscsi_cdb = (char *)&cdb; 18090 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18091 ucmd_buf.uscsi_bufaddr = NULL; 18092 ucmd_buf.uscsi_buflen = 0; 18093 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18094 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18095 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18096 18097 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18098 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18099 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18100 } 18101 ucmd_buf.uscsi_timeout = 60; 18102 18103 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18104 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18105 SD_PATH_STANDARD)); 18106 18107 switch (status) { 18108 case 0: 18109 break; /* Success! */ 18110 case EIO: 18111 switch (ucmd_buf.uscsi_status) { 18112 case STATUS_RESERVATION_CONFLICT: 18113 status = EACCES; 18114 break; 18115 case STATUS_CHECK: 18116 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18117 break; 18118 } 18119 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18120 (scsi_sense_key((uint8_t *)&sense_buf) == 18121 KEY_NOT_READY) && 18122 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18123 status = ENXIO; 18124 } 18125 break; 18126 default: 18127 break; 18128 } 18129 break; 18130 default: 18131 break; 18132 } 18133 18134 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18135 18136 return (status); 18137 } 18138 18139 18140 /* 18141 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18142 * 18143 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18144 * 18145 * Arguments: un 18146 * 18147 * Return Code: 0 - Success 18148 * EACCES 18149 * ENOTSUP 18150 * errno return code from sd_send_scsi_cmd() 18151 * 18152 * Context: Can sleep. Does not return until command is completed. 18153 */ 18154 18155 static int 18156 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18157 uint16_t data_len, uchar_t *data_bufp) 18158 { 18159 struct scsi_extended_sense sense_buf; 18160 union scsi_cdb cdb; 18161 struct uscsi_cmd ucmd_buf; 18162 int status; 18163 int no_caller_buf = FALSE; 18164 18165 ASSERT(un != NULL); 18166 ASSERT(!mutex_owned(SD_MUTEX(un))); 18167 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18168 18169 SD_TRACE(SD_LOG_IO, un, 18170 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18171 18172 bzero(&cdb, sizeof (cdb)); 18173 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18174 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18175 if (data_bufp == NULL) { 18176 /* Allocate a default buf if the caller did not give one */ 18177 ASSERT(data_len == 0); 18178 data_len = MHIOC_RESV_KEY_SIZE; 18179 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18180 no_caller_buf = TRUE; 18181 } 18182 18183 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18184 cdb.cdb_opaque[1] = usr_cmd; 18185 FORMG1COUNT(&cdb, data_len); 18186 18187 ucmd_buf.uscsi_cdb = (char *)&cdb; 18188 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18189 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18190 ucmd_buf.uscsi_buflen = data_len; 18191 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18192 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18193 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18194 ucmd_buf.uscsi_timeout = 60; 18195 18196 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18197 UIO_SYSSPACE, SD_PATH_STANDARD); 18198 18199 switch (status) { 18200 case 0: 18201 break; /* Success! */ 18202 case EIO: 18203 switch (ucmd_buf.uscsi_status) { 18204 case STATUS_RESERVATION_CONFLICT: 18205 status = EACCES; 18206 break; 18207 case STATUS_CHECK: 18208 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18209 (scsi_sense_key((uint8_t *)&sense_buf) == 18210 KEY_ILLEGAL_REQUEST)) { 18211 status = ENOTSUP; 18212 } 18213 break; 18214 default: 18215 break; 18216 } 18217 break; 18218 default: 18219 break; 18220 } 18221 18222 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18223 18224 if (no_caller_buf == TRUE) { 18225 kmem_free(data_bufp, data_len); 18226 } 18227 18228 return (status); 18229 } 18230 18231 18232 /* 18233 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18234 * 18235 * Description: This routine is the driver entry point for handling CD-ROM 18236 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18237 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18238 * device. 18239 * 18240 * Arguments: un - Pointer to soft state struct for the target. 18241 * usr_cmd SCSI-3 reservation facility command (one of 18242 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18243 * SD_SCSI3_PREEMPTANDABORT) 18244 * usr_bufp - user provided pointer register, reserve descriptor or 18245 * preempt and abort structure (mhioc_register_t, 18246 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18247 * 18248 * Return Code: 0 - Success 18249 * EACCES 18250 * ENOTSUP 18251 * errno return code from sd_send_scsi_cmd() 18252 * 18253 * Context: Can sleep. Does not return until command is completed. 18254 */ 18255 18256 static int 18257 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18258 uchar_t *usr_bufp) 18259 { 18260 struct scsi_extended_sense sense_buf; 18261 union scsi_cdb cdb; 18262 struct uscsi_cmd ucmd_buf; 18263 int status; 18264 uchar_t data_len = sizeof (sd_prout_t); 18265 sd_prout_t *prp; 18266 18267 ASSERT(un != NULL); 18268 ASSERT(!mutex_owned(SD_MUTEX(un))); 18269 ASSERT(data_len == 24); /* required by scsi spec */ 18270 18271 SD_TRACE(SD_LOG_IO, un, 18272 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18273 18274 if (usr_bufp == NULL) { 18275 return (EINVAL); 18276 } 18277 18278 bzero(&cdb, sizeof (cdb)); 18279 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18280 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18281 prp = kmem_zalloc(data_len, KM_SLEEP); 18282 18283 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18284 cdb.cdb_opaque[1] = usr_cmd; 18285 FORMG1COUNT(&cdb, data_len); 18286 18287 ucmd_buf.uscsi_cdb = (char *)&cdb; 18288 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18289 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18290 ucmd_buf.uscsi_buflen = data_len; 18291 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18292 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18293 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18294 ucmd_buf.uscsi_timeout = 60; 18295 18296 switch (usr_cmd) { 18297 case SD_SCSI3_REGISTER: { 18298 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18299 18300 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18301 bcopy(ptr->newkey.key, prp->service_key, 18302 MHIOC_RESV_KEY_SIZE); 18303 prp->aptpl = ptr->aptpl; 18304 break; 18305 } 18306 case SD_SCSI3_RESERVE: 18307 case SD_SCSI3_RELEASE: { 18308 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18309 18310 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18311 prp->scope_address = BE_32(ptr->scope_specific_addr); 18312 cdb.cdb_opaque[2] = ptr->type; 18313 break; 18314 } 18315 case SD_SCSI3_PREEMPTANDABORT: { 18316 mhioc_preemptandabort_t *ptr = 18317 (mhioc_preemptandabort_t *)usr_bufp; 18318 18319 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18320 bcopy(ptr->victim_key.key, prp->service_key, 18321 MHIOC_RESV_KEY_SIZE); 18322 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18323 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18324 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18325 break; 18326 } 18327 case SD_SCSI3_REGISTERANDIGNOREKEY: 18328 { 18329 mhioc_registerandignorekey_t *ptr; 18330 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18331 bcopy(ptr->newkey.key, 18332 prp->service_key, MHIOC_RESV_KEY_SIZE); 18333 prp->aptpl = ptr->aptpl; 18334 break; 18335 } 18336 default: 18337 ASSERT(FALSE); 18338 break; 18339 } 18340 18341 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18342 UIO_SYSSPACE, SD_PATH_STANDARD); 18343 18344 switch (status) { 18345 case 0: 18346 break; /* Success! */ 18347 case EIO: 18348 switch (ucmd_buf.uscsi_status) { 18349 case STATUS_RESERVATION_CONFLICT: 18350 status = EACCES; 18351 break; 18352 case STATUS_CHECK: 18353 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18354 (scsi_sense_key((uint8_t *)&sense_buf) == 18355 KEY_ILLEGAL_REQUEST)) { 18356 status = ENOTSUP; 18357 } 18358 break; 18359 default: 18360 break; 18361 } 18362 break; 18363 default: 18364 break; 18365 } 18366 18367 kmem_free(prp, data_len); 18368 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18369 return (status); 18370 } 18371 18372 18373 /* 18374 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18375 * 18376 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18377 * 18378 * Arguments: un - pointer to the target's soft state struct 18379 * 18380 * Return Code: 0 - success 18381 * errno-type error code 18382 * 18383 * Context: kernel thread context only. 18384 */ 18385 18386 static int 18387 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18388 { 18389 struct sd_uscsi_info *uip; 18390 struct uscsi_cmd *uscmd; 18391 union scsi_cdb *cdb; 18392 struct buf *bp; 18393 int rval = 0; 18394 18395 SD_TRACE(SD_LOG_IO, un, 18396 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18397 18398 ASSERT(un != NULL); 18399 ASSERT(!mutex_owned(SD_MUTEX(un))); 18400 18401 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18402 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18403 18404 /* 18405 * First get some memory for the uscsi_cmd struct and cdb 18406 * and initialize for SYNCHRONIZE_CACHE cmd. 18407 */ 18408 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18409 uscmd->uscsi_cdblen = CDB_GROUP1; 18410 uscmd->uscsi_cdb = (caddr_t)cdb; 18411 uscmd->uscsi_bufaddr = NULL; 18412 uscmd->uscsi_buflen = 0; 18413 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18414 uscmd->uscsi_rqlen = SENSE_LENGTH; 18415 uscmd->uscsi_rqresid = SENSE_LENGTH; 18416 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18417 uscmd->uscsi_timeout = sd_io_time; 18418 18419 /* 18420 * Allocate an sd_uscsi_info struct and fill it with the info 18421 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18422 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18423 * since we allocate the buf here in this function, we do not 18424 * need to preserve the prior contents of b_private. 18425 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18426 */ 18427 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18428 uip->ui_flags = SD_PATH_DIRECT; 18429 uip->ui_cmdp = uscmd; 18430 18431 bp = getrbuf(KM_SLEEP); 18432 bp->b_private = uip; 18433 18434 /* 18435 * Setup buffer to carry uscsi request. 18436 */ 18437 bp->b_flags = B_BUSY; 18438 bp->b_bcount = 0; 18439 bp->b_blkno = 0; 18440 18441 if (dkc != NULL) { 18442 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18443 uip->ui_dkc = *dkc; 18444 } 18445 18446 bp->b_edev = SD_GET_DEV(un); 18447 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18448 18449 (void) sd_uscsi_strategy(bp); 18450 18451 /* 18452 * If synchronous request, wait for completion 18453 * If async just return and let b_iodone callback 18454 * cleanup. 18455 * NOTE: On return, u_ncmds_in_driver will be decremented, 18456 * but it was also incremented in sd_uscsi_strategy(), so 18457 * we should be ok. 18458 */ 18459 if (dkc == NULL) { 18460 (void) biowait(bp); 18461 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18462 } 18463 18464 return (rval); 18465 } 18466 18467 18468 static int 18469 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18470 { 18471 struct sd_uscsi_info *uip; 18472 struct uscsi_cmd *uscmd; 18473 uint8_t *sense_buf; 18474 struct sd_lun *un; 18475 int status; 18476 18477 uip = (struct sd_uscsi_info *)(bp->b_private); 18478 ASSERT(uip != NULL); 18479 18480 uscmd = uip->ui_cmdp; 18481 ASSERT(uscmd != NULL); 18482 18483 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18484 ASSERT(sense_buf != NULL); 18485 18486 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18487 ASSERT(un != NULL); 18488 18489 status = geterror(bp); 18490 switch (status) { 18491 case 0: 18492 break; /* Success! */ 18493 case EIO: 18494 switch (uscmd->uscsi_status) { 18495 case STATUS_RESERVATION_CONFLICT: 18496 /* Ignore reservation conflict */ 18497 status = 0; 18498 goto done; 18499 18500 case STATUS_CHECK: 18501 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18502 (scsi_sense_key(sense_buf) == 18503 KEY_ILLEGAL_REQUEST)) { 18504 /* Ignore Illegal Request error */ 18505 mutex_enter(SD_MUTEX(un)); 18506 un->un_f_sync_cache_supported = FALSE; 18507 mutex_exit(SD_MUTEX(un)); 18508 status = ENOTSUP; 18509 goto done; 18510 } 18511 break; 18512 default: 18513 break; 18514 } 18515 /* FALLTHRU */ 18516 default: 18517 /* 18518 * Don't log an error message if this device 18519 * has removable media. 18520 */ 18521 if (!un->un_f_has_removable_media) { 18522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18523 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18524 } 18525 break; 18526 } 18527 18528 done: 18529 if (uip->ui_dkc.dkc_callback != NULL) { 18530 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18531 } 18532 18533 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18534 freerbuf(bp); 18535 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18536 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18537 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18538 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18539 18540 return (status); 18541 } 18542 18543 18544 /* 18545 * Function: sd_send_scsi_GET_CONFIGURATION 18546 * 18547 * Description: Issues the get configuration command to the device. 18548 * Called from sd_check_for_writable_cd & sd_get_media_info 18549 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18550 * Arguments: un 18551 * ucmdbuf 18552 * rqbuf 18553 * rqbuflen 18554 * bufaddr 18555 * buflen 18556 * path_flag 18557 * 18558 * Return Code: 0 - Success 18559 * errno return code from sd_send_scsi_cmd() 18560 * 18561 * Context: Can sleep. Does not return until command is completed. 18562 * 18563 */ 18564 18565 static int 18566 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18567 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18568 int path_flag) 18569 { 18570 char cdb[CDB_GROUP1]; 18571 int status; 18572 18573 ASSERT(un != NULL); 18574 ASSERT(!mutex_owned(SD_MUTEX(un))); 18575 ASSERT(bufaddr != NULL); 18576 ASSERT(ucmdbuf != NULL); 18577 ASSERT(rqbuf != NULL); 18578 18579 SD_TRACE(SD_LOG_IO, un, 18580 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18581 18582 bzero(cdb, sizeof (cdb)); 18583 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18584 bzero(rqbuf, rqbuflen); 18585 bzero(bufaddr, buflen); 18586 18587 /* 18588 * Set up cdb field for the get configuration command. 18589 */ 18590 cdb[0] = SCMD_GET_CONFIGURATION; 18591 cdb[1] = 0x02; /* Requested Type */ 18592 cdb[8] = SD_PROFILE_HEADER_LEN; 18593 ucmdbuf->uscsi_cdb = cdb; 18594 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18595 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18596 ucmdbuf->uscsi_buflen = buflen; 18597 ucmdbuf->uscsi_timeout = sd_io_time; 18598 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18599 ucmdbuf->uscsi_rqlen = rqbuflen; 18600 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18601 18602 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18603 UIO_SYSSPACE, path_flag); 18604 18605 switch (status) { 18606 case 0: 18607 break; /* Success! */ 18608 case EIO: 18609 switch (ucmdbuf->uscsi_status) { 18610 case STATUS_RESERVATION_CONFLICT: 18611 status = EACCES; 18612 break; 18613 default: 18614 break; 18615 } 18616 break; 18617 default: 18618 break; 18619 } 18620 18621 if (status == 0) { 18622 SD_DUMP_MEMORY(un, SD_LOG_IO, 18623 "sd_send_scsi_GET_CONFIGURATION: data", 18624 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18625 } 18626 18627 SD_TRACE(SD_LOG_IO, un, 18628 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18629 18630 return (status); 18631 } 18632 18633 /* 18634 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18635 * 18636 * Description: Issues the get configuration command to the device to 18637 * retrieve a specific feature. Called from 18638 * sd_check_for_writable_cd & sd_set_mmc_caps. 18639 * Arguments: un 18640 * ucmdbuf 18641 * rqbuf 18642 * rqbuflen 18643 * bufaddr 18644 * buflen 18645 * feature 18646 * 18647 * Return Code: 0 - Success 18648 * errno return code from sd_send_scsi_cmd() 18649 * 18650 * Context: Can sleep. Does not return until command is completed. 18651 * 18652 */ 18653 static int 18654 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18655 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18656 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18657 { 18658 char cdb[CDB_GROUP1]; 18659 int status; 18660 18661 ASSERT(un != NULL); 18662 ASSERT(!mutex_owned(SD_MUTEX(un))); 18663 ASSERT(bufaddr != NULL); 18664 ASSERT(ucmdbuf != NULL); 18665 ASSERT(rqbuf != NULL); 18666 18667 SD_TRACE(SD_LOG_IO, un, 18668 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18669 18670 bzero(cdb, sizeof (cdb)); 18671 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18672 bzero(rqbuf, rqbuflen); 18673 bzero(bufaddr, buflen); 18674 18675 /* 18676 * Set up cdb field for the get configuration command. 18677 */ 18678 cdb[0] = SCMD_GET_CONFIGURATION; 18679 cdb[1] = 0x02; /* Requested Type */ 18680 cdb[3] = feature; 18681 cdb[8] = buflen; 18682 ucmdbuf->uscsi_cdb = cdb; 18683 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18684 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18685 ucmdbuf->uscsi_buflen = buflen; 18686 ucmdbuf->uscsi_timeout = sd_io_time; 18687 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18688 ucmdbuf->uscsi_rqlen = rqbuflen; 18689 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18690 18691 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18692 UIO_SYSSPACE, path_flag); 18693 18694 switch (status) { 18695 case 0: 18696 break; /* Success! */ 18697 case EIO: 18698 switch (ucmdbuf->uscsi_status) { 18699 case STATUS_RESERVATION_CONFLICT: 18700 status = EACCES; 18701 break; 18702 default: 18703 break; 18704 } 18705 break; 18706 default: 18707 break; 18708 } 18709 18710 if (status == 0) { 18711 SD_DUMP_MEMORY(un, SD_LOG_IO, 18712 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18713 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18714 } 18715 18716 SD_TRACE(SD_LOG_IO, un, 18717 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18718 18719 return (status); 18720 } 18721 18722 18723 /* 18724 * Function: sd_send_scsi_MODE_SENSE 18725 * 18726 * Description: Utility function for issuing a scsi MODE SENSE command. 18727 * Note: This routine uses a consistent implementation for Group0, 18728 * Group1, and Group2 commands across all platforms. ATAPI devices 18729 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18730 * 18731 * Arguments: un - pointer to the softstate struct for the target. 18732 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18733 * CDB_GROUP[1|2] (10 byte). 18734 * bufaddr - buffer for page data retrieved from the target. 18735 * buflen - size of page to be retrieved. 18736 * page_code - page code of data to be retrieved from the target. 18737 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18738 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18739 * to use the USCSI "direct" chain and bypass the normal 18740 * command waitq. 18741 * 18742 * Return Code: 0 - Success 18743 * errno return code from sd_send_scsi_cmd() 18744 * 18745 * Context: Can sleep. Does not return until command is completed. 18746 */ 18747 18748 static int 18749 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18750 size_t buflen, uchar_t page_code, int path_flag) 18751 { 18752 struct scsi_extended_sense sense_buf; 18753 union scsi_cdb cdb; 18754 struct uscsi_cmd ucmd_buf; 18755 int status; 18756 int headlen; 18757 18758 ASSERT(un != NULL); 18759 ASSERT(!mutex_owned(SD_MUTEX(un))); 18760 ASSERT(bufaddr != NULL); 18761 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18762 (cdbsize == CDB_GROUP2)); 18763 18764 SD_TRACE(SD_LOG_IO, un, 18765 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18766 18767 bzero(&cdb, sizeof (cdb)); 18768 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18769 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18770 bzero(bufaddr, buflen); 18771 18772 if (cdbsize == CDB_GROUP0) { 18773 cdb.scc_cmd = SCMD_MODE_SENSE; 18774 cdb.cdb_opaque[2] = page_code; 18775 FORMG0COUNT(&cdb, buflen); 18776 headlen = MODE_HEADER_LENGTH; 18777 } else { 18778 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18779 cdb.cdb_opaque[2] = page_code; 18780 FORMG1COUNT(&cdb, buflen); 18781 headlen = MODE_HEADER_LENGTH_GRP2; 18782 } 18783 18784 ASSERT(headlen <= buflen); 18785 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18786 18787 ucmd_buf.uscsi_cdb = (char *)&cdb; 18788 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18789 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18790 ucmd_buf.uscsi_buflen = buflen; 18791 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18792 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18793 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18794 ucmd_buf.uscsi_timeout = 60; 18795 18796 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18797 UIO_SYSSPACE, path_flag); 18798 18799 switch (status) { 18800 case 0: 18801 /* 18802 * sr_check_wp() uses 0x3f page code and check the header of 18803 * mode page to determine if target device is write-protected. 18804 * But some USB devices return 0 bytes for 0x3f page code. For 18805 * this case, make sure that mode page header is returned at 18806 * least. 18807 */ 18808 if (buflen - ucmd_buf.uscsi_resid < headlen) 18809 status = EIO; 18810 break; /* Success! */ 18811 case EIO: 18812 switch (ucmd_buf.uscsi_status) { 18813 case STATUS_RESERVATION_CONFLICT: 18814 status = EACCES; 18815 break; 18816 default: 18817 break; 18818 } 18819 break; 18820 default: 18821 break; 18822 } 18823 18824 if (status == 0) { 18825 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18826 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18827 } 18828 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18829 18830 return (status); 18831 } 18832 18833 18834 /* 18835 * Function: sd_send_scsi_MODE_SELECT 18836 * 18837 * Description: Utility function for issuing a scsi MODE SELECT command. 18838 * Note: This routine uses a consistent implementation for Group0, 18839 * Group1, and Group2 commands across all platforms. ATAPI devices 18840 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18841 * 18842 * Arguments: un - pointer to the softstate struct for the target. 18843 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18844 * CDB_GROUP[1|2] (10 byte). 18845 * bufaddr - buffer for page data retrieved from the target. 18846 * buflen - size of page to be retrieved. 18847 * save_page - boolean to determin if SP bit should be set. 18848 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18849 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18850 * to use the USCSI "direct" chain and bypass the normal 18851 * command waitq. 18852 * 18853 * Return Code: 0 - Success 18854 * errno return code from sd_send_scsi_cmd() 18855 * 18856 * Context: Can sleep. Does not return until command is completed. 18857 */ 18858 18859 static int 18860 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18861 size_t buflen, uchar_t save_page, int path_flag) 18862 { 18863 struct scsi_extended_sense sense_buf; 18864 union scsi_cdb cdb; 18865 struct uscsi_cmd ucmd_buf; 18866 int status; 18867 18868 ASSERT(un != NULL); 18869 ASSERT(!mutex_owned(SD_MUTEX(un))); 18870 ASSERT(bufaddr != NULL); 18871 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18872 (cdbsize == CDB_GROUP2)); 18873 18874 SD_TRACE(SD_LOG_IO, un, 18875 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18876 18877 bzero(&cdb, sizeof (cdb)); 18878 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18879 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18880 18881 /* Set the PF bit for many third party drives */ 18882 cdb.cdb_opaque[1] = 0x10; 18883 18884 /* Set the savepage(SP) bit if given */ 18885 if (save_page == SD_SAVE_PAGE) { 18886 cdb.cdb_opaque[1] |= 0x01; 18887 } 18888 18889 if (cdbsize == CDB_GROUP0) { 18890 cdb.scc_cmd = SCMD_MODE_SELECT; 18891 FORMG0COUNT(&cdb, buflen); 18892 } else { 18893 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18894 FORMG1COUNT(&cdb, buflen); 18895 } 18896 18897 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18898 18899 ucmd_buf.uscsi_cdb = (char *)&cdb; 18900 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18901 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18902 ucmd_buf.uscsi_buflen = buflen; 18903 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18904 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18905 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18906 ucmd_buf.uscsi_timeout = 60; 18907 18908 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18909 UIO_SYSSPACE, path_flag); 18910 18911 switch (status) { 18912 case 0: 18913 break; /* Success! */ 18914 case EIO: 18915 switch (ucmd_buf.uscsi_status) { 18916 case STATUS_RESERVATION_CONFLICT: 18917 status = EACCES; 18918 break; 18919 default: 18920 break; 18921 } 18922 break; 18923 default: 18924 break; 18925 } 18926 18927 if (status == 0) { 18928 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18929 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18930 } 18931 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18932 18933 return (status); 18934 } 18935 18936 18937 /* 18938 * Function: sd_send_scsi_RDWR 18939 * 18940 * Description: Issue a scsi READ or WRITE command with the given parameters. 18941 * 18942 * Arguments: un: Pointer to the sd_lun struct for the target. 18943 * cmd: SCMD_READ or SCMD_WRITE 18944 * bufaddr: Address of caller's buffer to receive the RDWR data 18945 * buflen: Length of caller's buffer receive the RDWR data. 18946 * start_block: Block number for the start of the RDWR operation. 18947 * (Assumes target-native block size.) 18948 * residp: Pointer to variable to receive the redisual of the 18949 * RDWR operation (may be NULL of no residual requested). 18950 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18951 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18952 * to use the USCSI "direct" chain and bypass the normal 18953 * command waitq. 18954 * 18955 * Return Code: 0 - Success 18956 * errno return code from sd_send_scsi_cmd() 18957 * 18958 * Context: Can sleep. Does not return until command is completed. 18959 */ 18960 18961 static int 18962 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 18963 size_t buflen, daddr_t start_block, int path_flag) 18964 { 18965 struct scsi_extended_sense sense_buf; 18966 union scsi_cdb cdb; 18967 struct uscsi_cmd ucmd_buf; 18968 uint32_t block_count; 18969 int status; 18970 int cdbsize; 18971 uchar_t flag; 18972 18973 ASSERT(un != NULL); 18974 ASSERT(!mutex_owned(SD_MUTEX(un))); 18975 ASSERT(bufaddr != NULL); 18976 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 18977 18978 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 18979 18980 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 18981 return (EINVAL); 18982 } 18983 18984 mutex_enter(SD_MUTEX(un)); 18985 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 18986 mutex_exit(SD_MUTEX(un)); 18987 18988 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 18989 18990 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 18991 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 18992 bufaddr, buflen, start_block, block_count); 18993 18994 bzero(&cdb, sizeof (cdb)); 18995 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18996 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18997 18998 /* Compute CDB size to use */ 18999 if (start_block > 0xffffffff) 19000 cdbsize = CDB_GROUP4; 19001 else if ((start_block & 0xFFE00000) || 19002 (un->un_f_cfg_is_atapi == TRUE)) 19003 cdbsize = CDB_GROUP1; 19004 else 19005 cdbsize = CDB_GROUP0; 19006 19007 switch (cdbsize) { 19008 case CDB_GROUP0: /* 6-byte CDBs */ 19009 cdb.scc_cmd = cmd; 19010 FORMG0ADDR(&cdb, start_block); 19011 FORMG0COUNT(&cdb, block_count); 19012 break; 19013 case CDB_GROUP1: /* 10-byte CDBs */ 19014 cdb.scc_cmd = cmd | SCMD_GROUP1; 19015 FORMG1ADDR(&cdb, start_block); 19016 FORMG1COUNT(&cdb, block_count); 19017 break; 19018 case CDB_GROUP4: /* 16-byte CDBs */ 19019 cdb.scc_cmd = cmd | SCMD_GROUP4; 19020 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19021 FORMG4COUNT(&cdb, block_count); 19022 break; 19023 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19024 default: 19025 /* All others reserved */ 19026 return (EINVAL); 19027 } 19028 19029 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19030 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19031 19032 ucmd_buf.uscsi_cdb = (char *)&cdb; 19033 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19034 ucmd_buf.uscsi_bufaddr = bufaddr; 19035 ucmd_buf.uscsi_buflen = buflen; 19036 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19037 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19038 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19039 ucmd_buf.uscsi_timeout = 60; 19040 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19041 UIO_SYSSPACE, path_flag); 19042 switch (status) { 19043 case 0: 19044 break; /* Success! */ 19045 case EIO: 19046 switch (ucmd_buf.uscsi_status) { 19047 case STATUS_RESERVATION_CONFLICT: 19048 status = EACCES; 19049 break; 19050 default: 19051 break; 19052 } 19053 break; 19054 default: 19055 break; 19056 } 19057 19058 if (status == 0) { 19059 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19060 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19061 } 19062 19063 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19064 19065 return (status); 19066 } 19067 19068 19069 /* 19070 * Function: sd_send_scsi_LOG_SENSE 19071 * 19072 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19073 * 19074 * Arguments: un: Pointer to the sd_lun struct for the target. 19075 * 19076 * Return Code: 0 - Success 19077 * errno return code from sd_send_scsi_cmd() 19078 * 19079 * Context: Can sleep. Does not return until command is completed. 19080 */ 19081 19082 static int 19083 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19084 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19085 int path_flag) 19086 19087 { 19088 struct scsi_extended_sense sense_buf; 19089 union scsi_cdb cdb; 19090 struct uscsi_cmd ucmd_buf; 19091 int status; 19092 19093 ASSERT(un != NULL); 19094 ASSERT(!mutex_owned(SD_MUTEX(un))); 19095 19096 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19097 19098 bzero(&cdb, sizeof (cdb)); 19099 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19100 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19101 19102 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19103 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19104 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19105 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19106 FORMG1COUNT(&cdb, buflen); 19107 19108 ucmd_buf.uscsi_cdb = (char *)&cdb; 19109 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19110 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19111 ucmd_buf.uscsi_buflen = buflen; 19112 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19113 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19114 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19115 ucmd_buf.uscsi_timeout = 60; 19116 19117 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19118 UIO_SYSSPACE, path_flag); 19119 19120 switch (status) { 19121 case 0: 19122 break; 19123 case EIO: 19124 switch (ucmd_buf.uscsi_status) { 19125 case STATUS_RESERVATION_CONFLICT: 19126 status = EACCES; 19127 break; 19128 case STATUS_CHECK: 19129 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19130 (scsi_sense_key((uint8_t *)&sense_buf) == 19131 KEY_ILLEGAL_REQUEST) && 19132 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19133 /* 19134 * ASC 0x24: INVALID FIELD IN CDB 19135 */ 19136 switch (page_code) { 19137 case START_STOP_CYCLE_PAGE: 19138 /* 19139 * The start stop cycle counter is 19140 * implemented as page 0x31 in earlier 19141 * generation disks. In new generation 19142 * disks the start stop cycle counter is 19143 * implemented as page 0xE. To properly 19144 * handle this case if an attempt for 19145 * log page 0xE is made and fails we 19146 * will try again using page 0x31. 19147 * 19148 * Network storage BU committed to 19149 * maintain the page 0x31 for this 19150 * purpose and will not have any other 19151 * page implemented with page code 0x31 19152 * until all disks transition to the 19153 * standard page. 19154 */ 19155 mutex_enter(SD_MUTEX(un)); 19156 un->un_start_stop_cycle_page = 19157 START_STOP_CYCLE_VU_PAGE; 19158 cdb.cdb_opaque[2] = 19159 (char)(page_control << 6) | 19160 un->un_start_stop_cycle_page; 19161 mutex_exit(SD_MUTEX(un)); 19162 status = sd_send_scsi_cmd( 19163 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19164 UIO_SYSSPACE, path_flag); 19165 19166 break; 19167 case TEMPERATURE_PAGE: 19168 status = ENOTTY; 19169 break; 19170 default: 19171 break; 19172 } 19173 } 19174 break; 19175 default: 19176 break; 19177 } 19178 break; 19179 default: 19180 break; 19181 } 19182 19183 if (status == 0) { 19184 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19185 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19186 } 19187 19188 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19189 19190 return (status); 19191 } 19192 19193 19194 /* 19195 * Function: sdioctl 19196 * 19197 * Description: Driver's ioctl(9e) entry point function. 19198 * 19199 * Arguments: dev - device number 19200 * cmd - ioctl operation to be performed 19201 * arg - user argument, contains data to be set or reference 19202 * parameter for get 19203 * flag - bit flag, indicating open settings, 32/64 bit type 19204 * cred_p - user credential pointer 19205 * rval_p - calling process return value (OPT) 19206 * 19207 * Return Code: EINVAL 19208 * ENOTTY 19209 * ENXIO 19210 * EIO 19211 * EFAULT 19212 * ENOTSUP 19213 * EPERM 19214 * 19215 * Context: Called from the device switch at normal priority. 19216 */ 19217 19218 static int 19219 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19220 { 19221 struct sd_lun *un = NULL; 19222 int err = 0; 19223 int i = 0; 19224 cred_t *cr; 19225 int tmprval = EINVAL; 19226 int is_valid; 19227 19228 /* 19229 * All device accesses go thru sdstrategy where we check on suspend 19230 * status 19231 */ 19232 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19233 return (ENXIO); 19234 } 19235 19236 ASSERT(!mutex_owned(SD_MUTEX(un))); 19237 19238 19239 is_valid = SD_IS_VALID_LABEL(un); 19240 19241 /* 19242 * Moved this wait from sd_uscsi_strategy to here for 19243 * reasons of deadlock prevention. Internal driver commands, 19244 * specifically those to change a devices power level, result 19245 * in a call to sd_uscsi_strategy. 19246 */ 19247 mutex_enter(SD_MUTEX(un)); 19248 while ((un->un_state == SD_STATE_SUSPENDED) || 19249 (un->un_state == SD_STATE_PM_CHANGING)) { 19250 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19251 } 19252 /* 19253 * Twiddling the counter here protects commands from now 19254 * through to the top of sd_uscsi_strategy. Without the 19255 * counter inc. a power down, for example, could get in 19256 * after the above check for state is made and before 19257 * execution gets to the top of sd_uscsi_strategy. 19258 * That would cause problems. 19259 */ 19260 un->un_ncmds_in_driver++; 19261 19262 if (!is_valid && 19263 (flag & (FNDELAY | FNONBLOCK))) { 19264 switch (cmd) { 19265 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19266 case DKIOCGVTOC: 19267 case DKIOCGAPART: 19268 case DKIOCPARTINFO: 19269 case DKIOCSGEOM: 19270 case DKIOCSAPART: 19271 case DKIOCGETEFI: 19272 case DKIOCPARTITION: 19273 case DKIOCSVTOC: 19274 case DKIOCSETEFI: 19275 case DKIOCGMBOOT: 19276 case DKIOCSMBOOT: 19277 case DKIOCG_PHYGEOM: 19278 case DKIOCG_VIRTGEOM: 19279 /* let cmlb handle it */ 19280 goto skip_ready_valid; 19281 19282 case CDROMPAUSE: 19283 case CDROMRESUME: 19284 case CDROMPLAYMSF: 19285 case CDROMPLAYTRKIND: 19286 case CDROMREADTOCHDR: 19287 case CDROMREADTOCENTRY: 19288 case CDROMSTOP: 19289 case CDROMSTART: 19290 case CDROMVOLCTRL: 19291 case CDROMSUBCHNL: 19292 case CDROMREADMODE2: 19293 case CDROMREADMODE1: 19294 case CDROMREADOFFSET: 19295 case CDROMSBLKMODE: 19296 case CDROMGBLKMODE: 19297 case CDROMGDRVSPEED: 19298 case CDROMSDRVSPEED: 19299 case CDROMCDDA: 19300 case CDROMCDXA: 19301 case CDROMSUBCODE: 19302 if (!ISCD(un)) { 19303 un->un_ncmds_in_driver--; 19304 ASSERT(un->un_ncmds_in_driver >= 0); 19305 mutex_exit(SD_MUTEX(un)); 19306 return (ENOTTY); 19307 } 19308 break; 19309 case FDEJECT: 19310 case DKIOCEJECT: 19311 case CDROMEJECT: 19312 if (!un->un_f_eject_media_supported) { 19313 un->un_ncmds_in_driver--; 19314 ASSERT(un->un_ncmds_in_driver >= 0); 19315 mutex_exit(SD_MUTEX(un)); 19316 return (ENOTTY); 19317 } 19318 break; 19319 case DKIOCFLUSHWRITECACHE: 19320 mutex_exit(SD_MUTEX(un)); 19321 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19322 if (err != 0) { 19323 mutex_enter(SD_MUTEX(un)); 19324 un->un_ncmds_in_driver--; 19325 ASSERT(un->un_ncmds_in_driver >= 0); 19326 mutex_exit(SD_MUTEX(un)); 19327 return (EIO); 19328 } 19329 mutex_enter(SD_MUTEX(un)); 19330 /* FALLTHROUGH */ 19331 case DKIOCREMOVABLE: 19332 case DKIOCHOTPLUGGABLE: 19333 case DKIOCINFO: 19334 case DKIOCGMEDIAINFO: 19335 case MHIOCENFAILFAST: 19336 case MHIOCSTATUS: 19337 case MHIOCTKOWN: 19338 case MHIOCRELEASE: 19339 case MHIOCGRP_INKEYS: 19340 case MHIOCGRP_INRESV: 19341 case MHIOCGRP_REGISTER: 19342 case MHIOCGRP_RESERVE: 19343 case MHIOCGRP_PREEMPTANDABORT: 19344 case MHIOCGRP_REGISTERANDIGNOREKEY: 19345 case CDROMCLOSETRAY: 19346 case USCSICMD: 19347 goto skip_ready_valid; 19348 default: 19349 break; 19350 } 19351 19352 mutex_exit(SD_MUTEX(un)); 19353 err = sd_ready_and_valid(un); 19354 mutex_enter(SD_MUTEX(un)); 19355 19356 if (err != SD_READY_VALID) { 19357 switch (cmd) { 19358 case DKIOCSTATE: 19359 case CDROMGDRVSPEED: 19360 case CDROMSDRVSPEED: 19361 case FDEJECT: /* for eject command */ 19362 case DKIOCEJECT: 19363 case CDROMEJECT: 19364 case DKIOCREMOVABLE: 19365 case DKIOCHOTPLUGGABLE: 19366 break; 19367 default: 19368 if (un->un_f_has_removable_media) { 19369 err = ENXIO; 19370 } else { 19371 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19372 if (err == SD_RESERVED_BY_OTHERS) { 19373 err = EACCES; 19374 } else { 19375 err = EIO; 19376 } 19377 } 19378 un->un_ncmds_in_driver--; 19379 ASSERT(un->un_ncmds_in_driver >= 0); 19380 mutex_exit(SD_MUTEX(un)); 19381 return (err); 19382 } 19383 } 19384 } 19385 19386 skip_ready_valid: 19387 mutex_exit(SD_MUTEX(un)); 19388 19389 switch (cmd) { 19390 case DKIOCINFO: 19391 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19392 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19393 break; 19394 19395 case DKIOCGMEDIAINFO: 19396 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19397 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19398 break; 19399 19400 case DKIOCGGEOM: 19401 case DKIOCGVTOC: 19402 case DKIOCGAPART: 19403 case DKIOCPARTINFO: 19404 case DKIOCSGEOM: 19405 case DKIOCSAPART: 19406 case DKIOCGETEFI: 19407 case DKIOCPARTITION: 19408 case DKIOCSVTOC: 19409 case DKIOCSETEFI: 19410 case DKIOCGMBOOT: 19411 case DKIOCSMBOOT: 19412 case DKIOCG_PHYGEOM: 19413 case DKIOCG_VIRTGEOM: 19414 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19415 19416 /* TUR should spin up */ 19417 19418 if (un->un_f_has_removable_media) 19419 err = sd_send_scsi_TEST_UNIT_READY(un, 19420 SD_CHECK_FOR_MEDIA); 19421 else 19422 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19423 19424 if (err != 0) 19425 break; 19426 19427 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19428 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19429 19430 if ((err == 0) && 19431 ((cmd == DKIOCSETEFI) || 19432 (un->un_f_pkstats_enabled) && 19433 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19434 19435 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19436 (void *)SD_PATH_DIRECT); 19437 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19438 sd_set_pstats(un); 19439 SD_TRACE(SD_LOG_IO_PARTITION, un, 19440 "sd_ioctl: un:0x%p pstats created and " 19441 "set\n", un); 19442 } 19443 } 19444 19445 if ((cmd == DKIOCSVTOC) || 19446 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19447 19448 mutex_enter(SD_MUTEX(un)); 19449 if (un->un_f_devid_supported && 19450 (un->un_f_opt_fab_devid == TRUE)) { 19451 if (un->un_devid == NULL) { 19452 sd_register_devid(un, SD_DEVINFO(un), 19453 SD_TARGET_IS_UNRESERVED); 19454 } else { 19455 /* 19456 * The device id for this disk 19457 * has been fabricated. The 19458 * device id must be preserved 19459 * by writing it back out to 19460 * disk. 19461 */ 19462 if (sd_write_deviceid(un) != 0) { 19463 ddi_devid_free(un->un_devid); 19464 un->un_devid = NULL; 19465 } 19466 } 19467 } 19468 mutex_exit(SD_MUTEX(un)); 19469 } 19470 19471 break; 19472 19473 case DKIOCLOCK: 19474 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19475 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19476 SD_PATH_STANDARD); 19477 break; 19478 19479 case DKIOCUNLOCK: 19480 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19481 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19482 SD_PATH_STANDARD); 19483 break; 19484 19485 case DKIOCSTATE: { 19486 enum dkio_state state; 19487 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19488 19489 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19490 err = EFAULT; 19491 } else { 19492 err = sd_check_media(dev, state); 19493 if (err == 0) { 19494 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19495 sizeof (int), flag) != 0) 19496 err = EFAULT; 19497 } 19498 } 19499 break; 19500 } 19501 19502 case DKIOCREMOVABLE: 19503 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19504 i = un->un_f_has_removable_media ? 1 : 0; 19505 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19506 err = EFAULT; 19507 } else { 19508 err = 0; 19509 } 19510 break; 19511 19512 case DKIOCHOTPLUGGABLE: 19513 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19514 i = un->un_f_is_hotpluggable ? 1 : 0; 19515 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19516 err = EFAULT; 19517 } else { 19518 err = 0; 19519 } 19520 break; 19521 19522 case DKIOCGTEMPERATURE: 19523 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19524 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19525 break; 19526 19527 case MHIOCENFAILFAST: 19528 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19529 if ((err = drv_priv(cred_p)) == 0) { 19530 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19531 } 19532 break; 19533 19534 case MHIOCTKOWN: 19535 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19536 if ((err = drv_priv(cred_p)) == 0) { 19537 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19538 } 19539 break; 19540 19541 case MHIOCRELEASE: 19542 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19543 if ((err = drv_priv(cred_p)) == 0) { 19544 err = sd_mhdioc_release(dev); 19545 } 19546 break; 19547 19548 case MHIOCSTATUS: 19549 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19550 if ((err = drv_priv(cred_p)) == 0) { 19551 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19552 case 0: 19553 err = 0; 19554 break; 19555 case EACCES: 19556 *rval_p = 1; 19557 err = 0; 19558 break; 19559 default: 19560 err = EIO; 19561 break; 19562 } 19563 } 19564 break; 19565 19566 case MHIOCQRESERVE: 19567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19568 if ((err = drv_priv(cred_p)) == 0) { 19569 err = sd_reserve_release(dev, SD_RESERVE); 19570 } 19571 break; 19572 19573 case MHIOCREREGISTERDEVID: 19574 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19575 if (drv_priv(cred_p) == EPERM) { 19576 err = EPERM; 19577 } else if (!un->un_f_devid_supported) { 19578 err = ENOTTY; 19579 } else { 19580 err = sd_mhdioc_register_devid(dev); 19581 } 19582 break; 19583 19584 case MHIOCGRP_INKEYS: 19585 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19586 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19587 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19588 err = ENOTSUP; 19589 } else { 19590 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19591 flag); 19592 } 19593 } 19594 break; 19595 19596 case MHIOCGRP_INRESV: 19597 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19598 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19599 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19600 err = ENOTSUP; 19601 } else { 19602 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19603 } 19604 } 19605 break; 19606 19607 case MHIOCGRP_REGISTER: 19608 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19609 if ((err = drv_priv(cred_p)) != EPERM) { 19610 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19611 err = ENOTSUP; 19612 } else if (arg != NULL) { 19613 mhioc_register_t reg; 19614 if (ddi_copyin((void *)arg, ®, 19615 sizeof (mhioc_register_t), flag) != 0) { 19616 err = EFAULT; 19617 } else { 19618 err = 19619 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19620 un, SD_SCSI3_REGISTER, 19621 (uchar_t *)®); 19622 } 19623 } 19624 } 19625 break; 19626 19627 case MHIOCGRP_RESERVE: 19628 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19629 if ((err = drv_priv(cred_p)) != EPERM) { 19630 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19631 err = ENOTSUP; 19632 } else if (arg != NULL) { 19633 mhioc_resv_desc_t resv_desc; 19634 if (ddi_copyin((void *)arg, &resv_desc, 19635 sizeof (mhioc_resv_desc_t), flag) != 0) { 19636 err = EFAULT; 19637 } else { 19638 err = 19639 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19640 un, SD_SCSI3_RESERVE, 19641 (uchar_t *)&resv_desc); 19642 } 19643 } 19644 } 19645 break; 19646 19647 case MHIOCGRP_PREEMPTANDABORT: 19648 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19649 if ((err = drv_priv(cred_p)) != EPERM) { 19650 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19651 err = ENOTSUP; 19652 } else if (arg != NULL) { 19653 mhioc_preemptandabort_t preempt_abort; 19654 if (ddi_copyin((void *)arg, &preempt_abort, 19655 sizeof (mhioc_preemptandabort_t), 19656 flag) != 0) { 19657 err = EFAULT; 19658 } else { 19659 err = 19660 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19661 un, SD_SCSI3_PREEMPTANDABORT, 19662 (uchar_t *)&preempt_abort); 19663 } 19664 } 19665 } 19666 break; 19667 19668 case MHIOCGRP_REGISTERANDIGNOREKEY: 19669 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 19670 if ((err = drv_priv(cred_p)) != EPERM) { 19671 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19672 err = ENOTSUP; 19673 } else if (arg != NULL) { 19674 mhioc_registerandignorekey_t r_and_i; 19675 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19676 sizeof (mhioc_registerandignorekey_t), 19677 flag) != 0) { 19678 err = EFAULT; 19679 } else { 19680 err = 19681 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19682 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19683 (uchar_t *)&r_and_i); 19684 } 19685 } 19686 } 19687 break; 19688 19689 case USCSICMD: 19690 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19691 cr = ddi_get_cred(); 19692 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19693 err = EPERM; 19694 } else { 19695 enum uio_seg uioseg; 19696 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19697 UIO_USERSPACE; 19698 if (un->un_f_format_in_progress == TRUE) { 19699 err = EAGAIN; 19700 break; 19701 } 19702 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19703 flag, uioseg, SD_PATH_STANDARD); 19704 } 19705 break; 19706 19707 case CDROMPAUSE: 19708 case CDROMRESUME: 19709 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19710 if (!ISCD(un)) { 19711 err = ENOTTY; 19712 } else { 19713 err = sr_pause_resume(dev, cmd); 19714 } 19715 break; 19716 19717 case CDROMPLAYMSF: 19718 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19719 if (!ISCD(un)) { 19720 err = ENOTTY; 19721 } else { 19722 err = sr_play_msf(dev, (caddr_t)arg, flag); 19723 } 19724 break; 19725 19726 case CDROMPLAYTRKIND: 19727 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19728 #if defined(__i386) || defined(__amd64) 19729 /* 19730 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19731 */ 19732 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19733 #else 19734 if (!ISCD(un)) { 19735 #endif 19736 err = ENOTTY; 19737 } else { 19738 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19739 } 19740 break; 19741 19742 case CDROMREADTOCHDR: 19743 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19744 if (!ISCD(un)) { 19745 err = ENOTTY; 19746 } else { 19747 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19748 } 19749 break; 19750 19751 case CDROMREADTOCENTRY: 19752 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19753 if (!ISCD(un)) { 19754 err = ENOTTY; 19755 } else { 19756 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19757 } 19758 break; 19759 19760 case CDROMSTOP: 19761 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19762 if (!ISCD(un)) { 19763 err = ENOTTY; 19764 } else { 19765 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19766 SD_PATH_STANDARD); 19767 } 19768 break; 19769 19770 case CDROMSTART: 19771 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19772 if (!ISCD(un)) { 19773 err = ENOTTY; 19774 } else { 19775 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19776 SD_PATH_STANDARD); 19777 } 19778 break; 19779 19780 case CDROMCLOSETRAY: 19781 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19782 if (!ISCD(un)) { 19783 err = ENOTTY; 19784 } else { 19785 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19786 SD_PATH_STANDARD); 19787 } 19788 break; 19789 19790 case FDEJECT: /* for eject command */ 19791 case DKIOCEJECT: 19792 case CDROMEJECT: 19793 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19794 if (!un->un_f_eject_media_supported) { 19795 err = ENOTTY; 19796 } else { 19797 err = sr_eject(dev); 19798 } 19799 break; 19800 19801 case CDROMVOLCTRL: 19802 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19803 if (!ISCD(un)) { 19804 err = ENOTTY; 19805 } else { 19806 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19807 } 19808 break; 19809 19810 case CDROMSUBCHNL: 19811 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19812 if (!ISCD(un)) { 19813 err = ENOTTY; 19814 } else { 19815 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19816 } 19817 break; 19818 19819 case CDROMREADMODE2: 19820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19821 if (!ISCD(un)) { 19822 err = ENOTTY; 19823 } else if (un->un_f_cfg_is_atapi == TRUE) { 19824 /* 19825 * If the drive supports READ CD, use that instead of 19826 * switching the LBA size via a MODE SELECT 19827 * Block Descriptor 19828 */ 19829 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19830 } else { 19831 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19832 } 19833 break; 19834 19835 case CDROMREADMODE1: 19836 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19837 if (!ISCD(un)) { 19838 err = ENOTTY; 19839 } else { 19840 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19841 } 19842 break; 19843 19844 case CDROMREADOFFSET: 19845 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19846 if (!ISCD(un)) { 19847 err = ENOTTY; 19848 } else { 19849 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19850 flag); 19851 } 19852 break; 19853 19854 case CDROMSBLKMODE: 19855 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19856 /* 19857 * There is no means of changing block size in case of atapi 19858 * drives, thus return ENOTTY if drive type is atapi 19859 */ 19860 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19861 err = ENOTTY; 19862 } else if (un->un_f_mmc_cap == TRUE) { 19863 19864 /* 19865 * MMC Devices do not support changing the 19866 * logical block size 19867 * 19868 * Note: EINVAL is being returned instead of ENOTTY to 19869 * maintain consistancy with the original mmc 19870 * driver update. 19871 */ 19872 err = EINVAL; 19873 } else { 19874 mutex_enter(SD_MUTEX(un)); 19875 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19876 (un->un_ncmds_in_transport > 0)) { 19877 mutex_exit(SD_MUTEX(un)); 19878 err = EINVAL; 19879 } else { 19880 mutex_exit(SD_MUTEX(un)); 19881 err = sr_change_blkmode(dev, cmd, arg, flag); 19882 } 19883 } 19884 break; 19885 19886 case CDROMGBLKMODE: 19887 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19888 if (!ISCD(un)) { 19889 err = ENOTTY; 19890 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19891 (un->un_f_blockcount_is_valid != FALSE)) { 19892 /* 19893 * Drive is an ATAPI drive so return target block 19894 * size for ATAPI drives since we cannot change the 19895 * blocksize on ATAPI drives. Used primarily to detect 19896 * if an ATAPI cdrom is present. 19897 */ 19898 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19899 sizeof (int), flag) != 0) { 19900 err = EFAULT; 19901 } else { 19902 err = 0; 19903 } 19904 19905 } else { 19906 /* 19907 * Drive supports changing block sizes via a Mode 19908 * Select. 19909 */ 19910 err = sr_change_blkmode(dev, cmd, arg, flag); 19911 } 19912 break; 19913 19914 case CDROMGDRVSPEED: 19915 case CDROMSDRVSPEED: 19916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19917 if (!ISCD(un)) { 19918 err = ENOTTY; 19919 } else if (un->un_f_mmc_cap == TRUE) { 19920 /* 19921 * Note: In the future the driver implementation 19922 * for getting and 19923 * setting cd speed should entail: 19924 * 1) If non-mmc try the Toshiba mode page 19925 * (sr_change_speed) 19926 * 2) If mmc but no support for Real Time Streaming try 19927 * the SET CD SPEED (0xBB) command 19928 * (sr_atapi_change_speed) 19929 * 3) If mmc and support for Real Time Streaming 19930 * try the GET PERFORMANCE and SET STREAMING 19931 * commands (not yet implemented, 4380808) 19932 */ 19933 /* 19934 * As per recent MMC spec, CD-ROM speed is variable 19935 * and changes with LBA. Since there is no such 19936 * things as drive speed now, fail this ioctl. 19937 * 19938 * Note: EINVAL is returned for consistancy of original 19939 * implementation which included support for getting 19940 * the drive speed of mmc devices but not setting 19941 * the drive speed. Thus EINVAL would be returned 19942 * if a set request was made for an mmc device. 19943 * We no longer support get or set speed for 19944 * mmc but need to remain consistent with regard 19945 * to the error code returned. 19946 */ 19947 err = EINVAL; 19948 } else if (un->un_f_cfg_is_atapi == TRUE) { 19949 err = sr_atapi_change_speed(dev, cmd, arg, flag); 19950 } else { 19951 err = sr_change_speed(dev, cmd, arg, flag); 19952 } 19953 break; 19954 19955 case CDROMCDDA: 19956 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 19957 if (!ISCD(un)) { 19958 err = ENOTTY; 19959 } else { 19960 err = sr_read_cdda(dev, (void *)arg, flag); 19961 } 19962 break; 19963 19964 case CDROMCDXA: 19965 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 19966 if (!ISCD(un)) { 19967 err = ENOTTY; 19968 } else { 19969 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 19970 } 19971 break; 19972 19973 case CDROMSUBCODE: 19974 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 19975 if (!ISCD(un)) { 19976 err = ENOTTY; 19977 } else { 19978 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 19979 } 19980 break; 19981 19982 19983 #ifdef SDDEBUG 19984 /* RESET/ABORTS testing ioctls */ 19985 case DKIOCRESET: { 19986 int reset_level; 19987 19988 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 19989 err = EFAULT; 19990 } else { 19991 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 19992 "reset_level = 0x%lx\n", reset_level); 19993 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 19994 err = 0; 19995 } else { 19996 err = EIO; 19997 } 19998 } 19999 break; 20000 } 20001 20002 case DKIOCABORT: 20003 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20004 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20005 err = 0; 20006 } else { 20007 err = EIO; 20008 } 20009 break; 20010 #endif 20011 20012 #ifdef SD_FAULT_INJECTION 20013 /* SDIOC FaultInjection testing ioctls */ 20014 case SDIOCSTART: 20015 case SDIOCSTOP: 20016 case SDIOCINSERTPKT: 20017 case SDIOCINSERTXB: 20018 case SDIOCINSERTUN: 20019 case SDIOCINSERTARQ: 20020 case SDIOCPUSH: 20021 case SDIOCRETRIEVE: 20022 case SDIOCRUN: 20023 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20024 "SDIOC detected cmd:0x%X:\n", cmd); 20025 /* call error generator */ 20026 sd_faultinjection_ioctl(cmd, arg, un); 20027 err = 0; 20028 break; 20029 20030 #endif /* SD_FAULT_INJECTION */ 20031 20032 case DKIOCFLUSHWRITECACHE: 20033 { 20034 struct dk_callback *dkc = (struct dk_callback *)arg; 20035 20036 mutex_enter(SD_MUTEX(un)); 20037 if (!un->un_f_sync_cache_supported || 20038 !un->un_f_write_cache_enabled) { 20039 err = un->un_f_sync_cache_supported ? 20040 0 : ENOTSUP; 20041 mutex_exit(SD_MUTEX(un)); 20042 if ((flag & FKIOCTL) && dkc != NULL && 20043 dkc->dkc_callback != NULL) { 20044 (*dkc->dkc_callback)(dkc->dkc_cookie, 20045 err); 20046 /* 20047 * Did callback and reported error. 20048 * Since we did a callback, ioctl 20049 * should return 0. 20050 */ 20051 err = 0; 20052 } 20053 break; 20054 } 20055 mutex_exit(SD_MUTEX(un)); 20056 20057 if ((flag & FKIOCTL) && dkc != NULL && 20058 dkc->dkc_callback != NULL) { 20059 /* async SYNC CACHE request */ 20060 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20061 } else { 20062 /* synchronous SYNC CACHE request */ 20063 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20064 } 20065 } 20066 break; 20067 20068 case DKIOCGETWCE: { 20069 20070 int wce; 20071 20072 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20073 break; 20074 } 20075 20076 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20077 err = EFAULT; 20078 } 20079 break; 20080 } 20081 20082 case DKIOCSETWCE: { 20083 20084 int wce, sync_supported; 20085 20086 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20087 err = EFAULT; 20088 break; 20089 } 20090 20091 /* 20092 * Synchronize multiple threads trying to enable 20093 * or disable the cache via the un_f_wcc_cv 20094 * condition variable. 20095 */ 20096 mutex_enter(SD_MUTEX(un)); 20097 20098 /* 20099 * Don't allow the cache to be enabled if the 20100 * config file has it disabled. 20101 */ 20102 if (un->un_f_opt_disable_cache && wce) { 20103 mutex_exit(SD_MUTEX(un)); 20104 err = EINVAL; 20105 break; 20106 } 20107 20108 /* 20109 * Wait for write cache change in progress 20110 * bit to be clear before proceeding. 20111 */ 20112 while (un->un_f_wcc_inprog) 20113 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20114 20115 un->un_f_wcc_inprog = 1; 20116 20117 if (un->un_f_write_cache_enabled && wce == 0) { 20118 /* 20119 * Disable the write cache. Don't clear 20120 * un_f_write_cache_enabled until after 20121 * the mode select and flush are complete. 20122 */ 20123 sync_supported = un->un_f_sync_cache_supported; 20124 mutex_exit(SD_MUTEX(un)); 20125 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20126 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20127 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20128 } 20129 20130 mutex_enter(SD_MUTEX(un)); 20131 if (err == 0) { 20132 un->un_f_write_cache_enabled = 0; 20133 } 20134 20135 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20136 /* 20137 * Set un_f_write_cache_enabled first, so there is 20138 * no window where the cache is enabled, but the 20139 * bit says it isn't. 20140 */ 20141 un->un_f_write_cache_enabled = 1; 20142 mutex_exit(SD_MUTEX(un)); 20143 20144 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20145 SD_CACHE_ENABLE); 20146 20147 mutex_enter(SD_MUTEX(un)); 20148 20149 if (err) { 20150 un->un_f_write_cache_enabled = 0; 20151 } 20152 } 20153 20154 un->un_f_wcc_inprog = 0; 20155 cv_broadcast(&un->un_wcc_cv); 20156 mutex_exit(SD_MUTEX(un)); 20157 break; 20158 } 20159 20160 default: 20161 err = ENOTTY; 20162 break; 20163 } 20164 mutex_enter(SD_MUTEX(un)); 20165 un->un_ncmds_in_driver--; 20166 ASSERT(un->un_ncmds_in_driver >= 0); 20167 mutex_exit(SD_MUTEX(un)); 20168 20169 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20170 return (err); 20171 } 20172 20173 20174 /* 20175 * Function: sd_dkio_ctrl_info 20176 * 20177 * Description: This routine is the driver entry point for handling controller 20178 * information ioctl requests (DKIOCINFO). 20179 * 20180 * Arguments: dev - the device number 20181 * arg - pointer to user provided dk_cinfo structure 20182 * specifying the controller type and attributes. 20183 * flag - this argument is a pass through to ddi_copyxxx() 20184 * directly from the mode argument of ioctl(). 20185 * 20186 * Return Code: 0 20187 * EFAULT 20188 * ENXIO 20189 */ 20190 20191 static int 20192 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20193 { 20194 struct sd_lun *un = NULL; 20195 struct dk_cinfo *info; 20196 dev_info_t *pdip; 20197 int lun, tgt; 20198 20199 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20200 return (ENXIO); 20201 } 20202 20203 info = (struct dk_cinfo *) 20204 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20205 20206 switch (un->un_ctype) { 20207 case CTYPE_CDROM: 20208 info->dki_ctype = DKC_CDROM; 20209 break; 20210 default: 20211 info->dki_ctype = DKC_SCSI_CCS; 20212 break; 20213 } 20214 pdip = ddi_get_parent(SD_DEVINFO(un)); 20215 info->dki_cnum = ddi_get_instance(pdip); 20216 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20217 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20218 } else { 20219 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20220 DK_DEVLEN - 1); 20221 } 20222 20223 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20224 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20225 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20226 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20227 20228 /* Unit Information */ 20229 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20230 info->dki_slave = ((tgt << 3) | lun); 20231 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20232 DK_DEVLEN - 1); 20233 info->dki_flags = DKI_FMTVOL; 20234 info->dki_partition = SDPART(dev); 20235 20236 /* Max Transfer size of this device in blocks */ 20237 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20238 info->dki_addr = 0; 20239 info->dki_space = 0; 20240 info->dki_prio = 0; 20241 info->dki_vec = 0; 20242 20243 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20244 kmem_free(info, sizeof (struct dk_cinfo)); 20245 return (EFAULT); 20246 } else { 20247 kmem_free(info, sizeof (struct dk_cinfo)); 20248 return (0); 20249 } 20250 } 20251 20252 20253 /* 20254 * Function: sd_get_media_info 20255 * 20256 * Description: This routine is the driver entry point for handling ioctl 20257 * requests for the media type or command set profile used by the 20258 * drive to operate on the media (DKIOCGMEDIAINFO). 20259 * 20260 * Arguments: dev - the device number 20261 * arg - pointer to user provided dk_minfo structure 20262 * specifying the media type, logical block size and 20263 * drive capacity. 20264 * flag - this argument is a pass through to ddi_copyxxx() 20265 * directly from the mode argument of ioctl(). 20266 * 20267 * Return Code: 0 20268 * EACCESS 20269 * EFAULT 20270 * ENXIO 20271 * EIO 20272 */ 20273 20274 static int 20275 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20276 { 20277 struct sd_lun *un = NULL; 20278 struct uscsi_cmd com; 20279 struct scsi_inquiry *sinq; 20280 struct dk_minfo media_info; 20281 u_longlong_t media_capacity; 20282 uint64_t capacity; 20283 uint_t lbasize; 20284 uchar_t *out_data; 20285 uchar_t *rqbuf; 20286 int rval = 0; 20287 int rtn; 20288 20289 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20290 (un->un_state == SD_STATE_OFFLINE)) { 20291 return (ENXIO); 20292 } 20293 20294 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20295 20296 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20297 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20298 20299 /* Issue a TUR to determine if the drive is ready with media present */ 20300 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20301 if (rval == ENXIO) { 20302 goto done; 20303 } 20304 20305 /* Now get configuration data */ 20306 if (ISCD(un)) { 20307 media_info.dki_media_type = DK_CDROM; 20308 20309 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20310 if (un->un_f_mmc_cap == TRUE) { 20311 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20312 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20313 SD_PATH_STANDARD); 20314 20315 if (rtn) { 20316 /* 20317 * Failed for other than an illegal request 20318 * or command not supported 20319 */ 20320 if ((com.uscsi_status == STATUS_CHECK) && 20321 (com.uscsi_rqstatus == STATUS_GOOD)) { 20322 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20323 (rqbuf[12] != 0x20)) { 20324 rval = EIO; 20325 goto done; 20326 } 20327 } 20328 } else { 20329 /* 20330 * The GET CONFIGURATION command succeeded 20331 * so set the media type according to the 20332 * returned data 20333 */ 20334 media_info.dki_media_type = out_data[6]; 20335 media_info.dki_media_type <<= 8; 20336 media_info.dki_media_type |= out_data[7]; 20337 } 20338 } 20339 } else { 20340 /* 20341 * The profile list is not available, so we attempt to identify 20342 * the media type based on the inquiry data 20343 */ 20344 sinq = un->un_sd->sd_inq; 20345 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20346 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20347 /* This is a direct access device or optical disk */ 20348 media_info.dki_media_type = DK_FIXED_DISK; 20349 20350 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20351 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20352 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20353 media_info.dki_media_type = DK_ZIP; 20354 } else if ( 20355 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20356 media_info.dki_media_type = DK_JAZ; 20357 } 20358 } 20359 } else { 20360 /* 20361 * Not a CD, direct access or optical disk so return 20362 * unknown media 20363 */ 20364 media_info.dki_media_type = DK_UNKNOWN; 20365 } 20366 } 20367 20368 /* Now read the capacity so we can provide the lbasize and capacity */ 20369 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20370 SD_PATH_DIRECT)) { 20371 case 0: 20372 break; 20373 case EACCES: 20374 rval = EACCES; 20375 goto done; 20376 default: 20377 rval = EIO; 20378 goto done; 20379 } 20380 20381 media_info.dki_lbsize = lbasize; 20382 media_capacity = capacity; 20383 20384 /* 20385 * sd_send_scsi_READ_CAPACITY() reports capacity in 20386 * un->un_sys_blocksize chunks. So we need to convert it into 20387 * cap.lbasize chunks. 20388 */ 20389 media_capacity *= un->un_sys_blocksize; 20390 media_capacity /= lbasize; 20391 media_info.dki_capacity = media_capacity; 20392 20393 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20394 rval = EFAULT; 20395 /* Put goto. Anybody might add some code below in future */ 20396 goto done; 20397 } 20398 done: 20399 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20400 kmem_free(rqbuf, SENSE_LENGTH); 20401 return (rval); 20402 } 20403 20404 20405 /* 20406 * Function: sd_check_media 20407 * 20408 * Description: This utility routine implements the functionality for the 20409 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20410 * driver state changes from that specified by the user 20411 * (inserted or ejected). For example, if the user specifies 20412 * DKIO_EJECTED and the current media state is inserted this 20413 * routine will immediately return DKIO_INSERTED. However, if the 20414 * current media state is not inserted the user thread will be 20415 * blocked until the drive state changes. If DKIO_NONE is specified 20416 * the user thread will block until a drive state change occurs. 20417 * 20418 * Arguments: dev - the device number 20419 * state - user pointer to a dkio_state, updated with the current 20420 * drive state at return. 20421 * 20422 * Return Code: ENXIO 20423 * EIO 20424 * EAGAIN 20425 * EINTR 20426 */ 20427 20428 static int 20429 sd_check_media(dev_t dev, enum dkio_state state) 20430 { 20431 struct sd_lun *un = NULL; 20432 enum dkio_state prev_state; 20433 opaque_t token = NULL; 20434 int rval = 0; 20435 20436 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20437 return (ENXIO); 20438 } 20439 20440 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20441 20442 mutex_enter(SD_MUTEX(un)); 20443 20444 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20445 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20446 20447 prev_state = un->un_mediastate; 20448 20449 /* is there anything to do? */ 20450 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20451 /* 20452 * submit the request to the scsi_watch service; 20453 * scsi_media_watch_cb() does the real work 20454 */ 20455 mutex_exit(SD_MUTEX(un)); 20456 20457 /* 20458 * This change handles the case where a scsi watch request is 20459 * added to a device that is powered down. To accomplish this 20460 * we power up the device before adding the scsi watch request, 20461 * since the scsi watch sends a TUR directly to the device 20462 * which the device cannot handle if it is powered down. 20463 */ 20464 if (sd_pm_entry(un) != DDI_SUCCESS) { 20465 mutex_enter(SD_MUTEX(un)); 20466 goto done; 20467 } 20468 20469 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20470 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20471 (caddr_t)dev); 20472 20473 sd_pm_exit(un); 20474 20475 mutex_enter(SD_MUTEX(un)); 20476 if (token == NULL) { 20477 rval = EAGAIN; 20478 goto done; 20479 } 20480 20481 /* 20482 * This is a special case IOCTL that doesn't return 20483 * until the media state changes. Routine sdpower 20484 * knows about and handles this so don't count it 20485 * as an active cmd in the driver, which would 20486 * keep the device busy to the pm framework. 20487 * If the count isn't decremented the device can't 20488 * be powered down. 20489 */ 20490 un->un_ncmds_in_driver--; 20491 ASSERT(un->un_ncmds_in_driver >= 0); 20492 20493 /* 20494 * if a prior request had been made, this will be the same 20495 * token, as scsi_watch was designed that way. 20496 */ 20497 un->un_swr_token = token; 20498 un->un_specified_mediastate = state; 20499 20500 /* 20501 * now wait for media change 20502 * we will not be signalled unless mediastate == state but it is 20503 * still better to test for this condition, since there is a 20504 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20505 */ 20506 SD_TRACE(SD_LOG_COMMON, un, 20507 "sd_check_media: waiting for media state change\n"); 20508 while (un->un_mediastate == state) { 20509 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20510 SD_TRACE(SD_LOG_COMMON, un, 20511 "sd_check_media: waiting for media state " 20512 "was interrupted\n"); 20513 un->un_ncmds_in_driver++; 20514 rval = EINTR; 20515 goto done; 20516 } 20517 SD_TRACE(SD_LOG_COMMON, un, 20518 "sd_check_media: received signal, state=%x\n", 20519 un->un_mediastate); 20520 } 20521 /* 20522 * Inc the counter to indicate the device once again 20523 * has an active outstanding cmd. 20524 */ 20525 un->un_ncmds_in_driver++; 20526 } 20527 20528 /* invalidate geometry */ 20529 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20530 sr_ejected(un); 20531 } 20532 20533 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20534 uint64_t capacity; 20535 uint_t lbasize; 20536 20537 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20538 mutex_exit(SD_MUTEX(un)); 20539 /* 20540 * Since the following routines use SD_PATH_DIRECT, we must 20541 * call PM directly before the upcoming disk accesses. This 20542 * may cause the disk to be power/spin up. 20543 */ 20544 20545 if (sd_pm_entry(un) == DDI_SUCCESS) { 20546 rval = sd_send_scsi_READ_CAPACITY(un, 20547 &capacity, 20548 &lbasize, SD_PATH_DIRECT); 20549 if (rval != 0) { 20550 sd_pm_exit(un); 20551 mutex_enter(SD_MUTEX(un)); 20552 goto done; 20553 } 20554 } else { 20555 rval = EIO; 20556 mutex_enter(SD_MUTEX(un)); 20557 goto done; 20558 } 20559 mutex_enter(SD_MUTEX(un)); 20560 20561 sd_update_block_info(un, lbasize, capacity); 20562 20563 /* 20564 * Check if the media in the device is writable or not 20565 */ 20566 if (ISCD(un)) 20567 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20568 20569 mutex_exit(SD_MUTEX(un)); 20570 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20571 if ((cmlb_validate(un->un_cmlbhandle, 0, 20572 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20573 sd_set_pstats(un); 20574 SD_TRACE(SD_LOG_IO_PARTITION, un, 20575 "sd_check_media: un:0x%p pstats created and " 20576 "set\n", un); 20577 } 20578 20579 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20580 SD_PATH_DIRECT); 20581 sd_pm_exit(un); 20582 20583 mutex_enter(SD_MUTEX(un)); 20584 } 20585 done: 20586 un->un_f_watcht_stopped = FALSE; 20587 if (un->un_swr_token) { 20588 /* 20589 * Use of this local token and the mutex ensures that we avoid 20590 * some race conditions associated with terminating the 20591 * scsi watch. 20592 */ 20593 token = un->un_swr_token; 20594 un->un_swr_token = (opaque_t)NULL; 20595 mutex_exit(SD_MUTEX(un)); 20596 (void) scsi_watch_request_terminate(token, 20597 SCSI_WATCH_TERMINATE_WAIT); 20598 mutex_enter(SD_MUTEX(un)); 20599 } 20600 20601 /* 20602 * Update the capacity kstat value, if no media previously 20603 * (capacity kstat is 0) and a media has been inserted 20604 * (un_f_blockcount_is_valid == TRUE) 20605 */ 20606 if (un->un_errstats) { 20607 struct sd_errstats *stp = NULL; 20608 20609 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20610 if ((stp->sd_capacity.value.ui64 == 0) && 20611 (un->un_f_blockcount_is_valid == TRUE)) { 20612 stp->sd_capacity.value.ui64 = 20613 (uint64_t)((uint64_t)un->un_blockcount * 20614 un->un_sys_blocksize); 20615 } 20616 } 20617 mutex_exit(SD_MUTEX(un)); 20618 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20619 return (rval); 20620 } 20621 20622 20623 /* 20624 * Function: sd_delayed_cv_broadcast 20625 * 20626 * Description: Delayed cv_broadcast to allow for target to recover from media 20627 * insertion. 20628 * 20629 * Arguments: arg - driver soft state (unit) structure 20630 */ 20631 20632 static void 20633 sd_delayed_cv_broadcast(void *arg) 20634 { 20635 struct sd_lun *un = arg; 20636 20637 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20638 20639 mutex_enter(SD_MUTEX(un)); 20640 un->un_dcvb_timeid = NULL; 20641 cv_broadcast(&un->un_state_cv); 20642 mutex_exit(SD_MUTEX(un)); 20643 } 20644 20645 20646 /* 20647 * Function: sd_media_watch_cb 20648 * 20649 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20650 * routine processes the TUR sense data and updates the driver 20651 * state if a transition has occurred. The user thread 20652 * (sd_check_media) is then signalled. 20653 * 20654 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20655 * among multiple watches that share this callback function 20656 * resultp - scsi watch facility result packet containing scsi 20657 * packet, status byte and sense data 20658 * 20659 * Return Code: 0 for success, -1 for failure 20660 */ 20661 20662 static int 20663 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20664 { 20665 struct sd_lun *un; 20666 struct scsi_status *statusp = resultp->statusp; 20667 uint8_t *sensep = (uint8_t *)resultp->sensep; 20668 enum dkio_state state = DKIO_NONE; 20669 dev_t dev = (dev_t)arg; 20670 uchar_t actual_sense_length; 20671 uint8_t skey, asc, ascq; 20672 20673 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20674 return (-1); 20675 } 20676 actual_sense_length = resultp->actual_sense_length; 20677 20678 mutex_enter(SD_MUTEX(un)); 20679 SD_TRACE(SD_LOG_COMMON, un, 20680 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20681 *((char *)statusp), (void *)sensep, actual_sense_length); 20682 20683 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20684 un->un_mediastate = DKIO_DEV_GONE; 20685 cv_broadcast(&un->un_state_cv); 20686 mutex_exit(SD_MUTEX(un)); 20687 20688 return (0); 20689 } 20690 20691 /* 20692 * If there was a check condition then sensep points to valid sense data 20693 * If status was not a check condition but a reservation or busy status 20694 * then the new state is DKIO_NONE 20695 */ 20696 if (sensep != NULL) { 20697 skey = scsi_sense_key(sensep); 20698 asc = scsi_sense_asc(sensep); 20699 ascq = scsi_sense_ascq(sensep); 20700 20701 SD_INFO(SD_LOG_COMMON, un, 20702 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20703 skey, asc, ascq); 20704 /* This routine only uses up to 13 bytes of sense data. */ 20705 if (actual_sense_length >= 13) { 20706 if (skey == KEY_UNIT_ATTENTION) { 20707 if (asc == 0x28) { 20708 state = DKIO_INSERTED; 20709 } 20710 } else if (skey == KEY_NOT_READY) { 20711 /* 20712 * if 02/04/02 means that the host 20713 * should send start command. Explicitly 20714 * leave the media state as is 20715 * (inserted) as the media is inserted 20716 * and host has stopped device for PM 20717 * reasons. Upon next true read/write 20718 * to this media will bring the 20719 * device to the right state good for 20720 * media access. 20721 */ 20722 if (asc == 0x3a) { 20723 state = DKIO_EJECTED; 20724 } else { 20725 /* 20726 * If the drive is busy with an 20727 * operation or long write, keep the 20728 * media in an inserted state. 20729 */ 20730 20731 if ((asc == 0x04) && 20732 ((ascq == 0x02) || 20733 (ascq == 0x07) || 20734 (ascq == 0x08))) { 20735 state = DKIO_INSERTED; 20736 } 20737 } 20738 } else if (skey == KEY_NO_SENSE) { 20739 if ((asc == 0x00) && (ascq == 0x00)) { 20740 /* 20741 * Sense Data 00/00/00 does not provide 20742 * any information about the state of 20743 * the media. Ignore it. 20744 */ 20745 mutex_exit(SD_MUTEX(un)); 20746 return (0); 20747 } 20748 } 20749 } 20750 } else if ((*((char *)statusp) == STATUS_GOOD) && 20751 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20752 state = DKIO_INSERTED; 20753 } 20754 20755 SD_TRACE(SD_LOG_COMMON, un, 20756 "sd_media_watch_cb: state=%x, specified=%x\n", 20757 state, un->un_specified_mediastate); 20758 20759 /* 20760 * now signal the waiting thread if this is *not* the specified state; 20761 * delay the signal if the state is DKIO_INSERTED to allow the target 20762 * to recover 20763 */ 20764 if (state != un->un_specified_mediastate) { 20765 un->un_mediastate = state; 20766 if (state == DKIO_INSERTED) { 20767 /* 20768 * delay the signal to give the drive a chance 20769 * to do what it apparently needs to do 20770 */ 20771 SD_TRACE(SD_LOG_COMMON, un, 20772 "sd_media_watch_cb: delayed cv_broadcast\n"); 20773 if (un->un_dcvb_timeid == NULL) { 20774 un->un_dcvb_timeid = 20775 timeout(sd_delayed_cv_broadcast, un, 20776 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20777 } 20778 } else { 20779 SD_TRACE(SD_LOG_COMMON, un, 20780 "sd_media_watch_cb: immediate cv_broadcast\n"); 20781 cv_broadcast(&un->un_state_cv); 20782 } 20783 } 20784 mutex_exit(SD_MUTEX(un)); 20785 return (0); 20786 } 20787 20788 20789 /* 20790 * Function: sd_dkio_get_temp 20791 * 20792 * Description: This routine is the driver entry point for handling ioctl 20793 * requests to get the disk temperature. 20794 * 20795 * Arguments: dev - the device number 20796 * arg - pointer to user provided dk_temperature structure. 20797 * flag - this argument is a pass through to ddi_copyxxx() 20798 * directly from the mode argument of ioctl(). 20799 * 20800 * Return Code: 0 20801 * EFAULT 20802 * ENXIO 20803 * EAGAIN 20804 */ 20805 20806 static int 20807 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20808 { 20809 struct sd_lun *un = NULL; 20810 struct dk_temperature *dktemp = NULL; 20811 uchar_t *temperature_page; 20812 int rval = 0; 20813 int path_flag = SD_PATH_STANDARD; 20814 20815 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20816 return (ENXIO); 20817 } 20818 20819 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20820 20821 /* copyin the disk temp argument to get the user flags */ 20822 if (ddi_copyin((void *)arg, dktemp, 20823 sizeof (struct dk_temperature), flag) != 0) { 20824 rval = EFAULT; 20825 goto done; 20826 } 20827 20828 /* Initialize the temperature to invalid. */ 20829 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20830 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20831 20832 /* 20833 * Note: Investigate removing the "bypass pm" semantic. 20834 * Can we just bypass PM always? 20835 */ 20836 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20837 path_flag = SD_PATH_DIRECT; 20838 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20839 mutex_enter(&un->un_pm_mutex); 20840 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20841 /* 20842 * If DKT_BYPASS_PM is set, and the drive happens to be 20843 * in low power mode, we can not wake it up, Need to 20844 * return EAGAIN. 20845 */ 20846 mutex_exit(&un->un_pm_mutex); 20847 rval = EAGAIN; 20848 goto done; 20849 } else { 20850 /* 20851 * Indicate to PM the device is busy. This is required 20852 * to avoid a race - i.e. the ioctl is issuing a 20853 * command and the pm framework brings down the device 20854 * to low power mode (possible power cut-off on some 20855 * platforms). 20856 */ 20857 mutex_exit(&un->un_pm_mutex); 20858 if (sd_pm_entry(un) != DDI_SUCCESS) { 20859 rval = EAGAIN; 20860 goto done; 20861 } 20862 } 20863 } 20864 20865 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20866 20867 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20868 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20869 goto done2; 20870 } 20871 20872 /* 20873 * For the current temperature verify that the parameter length is 0x02 20874 * and the parameter code is 0x00 20875 */ 20876 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20877 (temperature_page[5] == 0x00)) { 20878 if (temperature_page[9] == 0xFF) { 20879 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20880 } else { 20881 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20882 } 20883 } 20884 20885 /* 20886 * For the reference temperature verify that the parameter 20887 * length is 0x02 and the parameter code is 0x01 20888 */ 20889 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20890 (temperature_page[11] == 0x01)) { 20891 if (temperature_page[15] == 0xFF) { 20892 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20893 } else { 20894 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20895 } 20896 } 20897 20898 /* Do the copyout regardless of the temperature commands status. */ 20899 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20900 flag) != 0) { 20901 rval = EFAULT; 20902 } 20903 20904 done2: 20905 if (path_flag == SD_PATH_DIRECT) { 20906 sd_pm_exit(un); 20907 } 20908 20909 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20910 done: 20911 if (dktemp != NULL) { 20912 kmem_free(dktemp, sizeof (struct dk_temperature)); 20913 } 20914 20915 return (rval); 20916 } 20917 20918 20919 /* 20920 * Function: sd_log_page_supported 20921 * 20922 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20923 * supported log pages. 20924 * 20925 * Arguments: un - 20926 * log_page - 20927 * 20928 * Return Code: -1 - on error (log sense is optional and may not be supported). 20929 * 0 - log page not found. 20930 * 1 - log page found. 20931 */ 20932 20933 static int 20934 sd_log_page_supported(struct sd_lun *un, int log_page) 20935 { 20936 uchar_t *log_page_data; 20937 int i; 20938 int match = 0; 20939 int log_size; 20940 20941 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 20942 20943 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 20944 SD_PATH_DIRECT) != 0) { 20945 SD_ERROR(SD_LOG_COMMON, un, 20946 "sd_log_page_supported: failed log page retrieval\n"); 20947 kmem_free(log_page_data, 0xFF); 20948 return (-1); 20949 } 20950 log_size = log_page_data[3]; 20951 20952 /* 20953 * The list of supported log pages start from the fourth byte. Check 20954 * until we run out of log pages or a match is found. 20955 */ 20956 for (i = 4; (i < (log_size + 4)) && !match; i++) { 20957 if (log_page_data[i] == log_page) { 20958 match++; 20959 } 20960 } 20961 kmem_free(log_page_data, 0xFF); 20962 return (match); 20963 } 20964 20965 20966 /* 20967 * Function: sd_mhdioc_failfast 20968 * 20969 * Description: This routine is the driver entry point for handling ioctl 20970 * requests to enable/disable the multihost failfast option. 20971 * (MHIOCENFAILFAST) 20972 * 20973 * Arguments: dev - the device number 20974 * arg - user specified probing interval. 20975 * flag - this argument is a pass through to ddi_copyxxx() 20976 * directly from the mode argument of ioctl(). 20977 * 20978 * Return Code: 0 20979 * EFAULT 20980 * ENXIO 20981 */ 20982 20983 static int 20984 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 20985 { 20986 struct sd_lun *un = NULL; 20987 int mh_time; 20988 int rval = 0; 20989 20990 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20991 return (ENXIO); 20992 } 20993 20994 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 20995 return (EFAULT); 20996 20997 if (mh_time) { 20998 mutex_enter(SD_MUTEX(un)); 20999 un->un_resvd_status |= SD_FAILFAST; 21000 mutex_exit(SD_MUTEX(un)); 21001 /* 21002 * If mh_time is INT_MAX, then this ioctl is being used for 21003 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21004 */ 21005 if (mh_time != INT_MAX) { 21006 rval = sd_check_mhd(dev, mh_time); 21007 } 21008 } else { 21009 (void) sd_check_mhd(dev, 0); 21010 mutex_enter(SD_MUTEX(un)); 21011 un->un_resvd_status &= ~SD_FAILFAST; 21012 mutex_exit(SD_MUTEX(un)); 21013 } 21014 return (rval); 21015 } 21016 21017 21018 /* 21019 * Function: sd_mhdioc_takeown 21020 * 21021 * Description: This routine is the driver entry point for handling ioctl 21022 * requests to forcefully acquire exclusive access rights to the 21023 * multihost disk (MHIOCTKOWN). 21024 * 21025 * Arguments: dev - the device number 21026 * arg - user provided structure specifying the delay 21027 * parameters in milliseconds 21028 * flag - this argument is a pass through to ddi_copyxxx() 21029 * directly from the mode argument of ioctl(). 21030 * 21031 * Return Code: 0 21032 * EFAULT 21033 * ENXIO 21034 */ 21035 21036 static int 21037 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21038 { 21039 struct sd_lun *un = NULL; 21040 struct mhioctkown *tkown = NULL; 21041 int rval = 0; 21042 21043 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21044 return (ENXIO); 21045 } 21046 21047 if (arg != NULL) { 21048 tkown = (struct mhioctkown *) 21049 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21050 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21051 if (rval != 0) { 21052 rval = EFAULT; 21053 goto error; 21054 } 21055 } 21056 21057 rval = sd_take_ownership(dev, tkown); 21058 mutex_enter(SD_MUTEX(un)); 21059 if (rval == 0) { 21060 un->un_resvd_status |= SD_RESERVE; 21061 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21062 sd_reinstate_resv_delay = 21063 tkown->reinstate_resv_delay * 1000; 21064 } else { 21065 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21066 } 21067 /* 21068 * Give the scsi_watch routine interval set by 21069 * the MHIOCENFAILFAST ioctl precedence here. 21070 */ 21071 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21072 mutex_exit(SD_MUTEX(un)); 21073 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21074 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21075 "sd_mhdioc_takeown : %d\n", 21076 sd_reinstate_resv_delay); 21077 } else { 21078 mutex_exit(SD_MUTEX(un)); 21079 } 21080 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21081 sd_mhd_reset_notify_cb, (caddr_t)un); 21082 } else { 21083 un->un_resvd_status &= ~SD_RESERVE; 21084 mutex_exit(SD_MUTEX(un)); 21085 } 21086 21087 error: 21088 if (tkown != NULL) { 21089 kmem_free(tkown, sizeof (struct mhioctkown)); 21090 } 21091 return (rval); 21092 } 21093 21094 21095 /* 21096 * Function: sd_mhdioc_release 21097 * 21098 * Description: This routine is the driver entry point for handling ioctl 21099 * requests to release exclusive access rights to the multihost 21100 * disk (MHIOCRELEASE). 21101 * 21102 * Arguments: dev - the device number 21103 * 21104 * Return Code: 0 21105 * ENXIO 21106 */ 21107 21108 static int 21109 sd_mhdioc_release(dev_t dev) 21110 { 21111 struct sd_lun *un = NULL; 21112 timeout_id_t resvd_timeid_save; 21113 int resvd_status_save; 21114 int rval = 0; 21115 21116 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21117 return (ENXIO); 21118 } 21119 21120 mutex_enter(SD_MUTEX(un)); 21121 resvd_status_save = un->un_resvd_status; 21122 un->un_resvd_status &= 21123 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21124 if (un->un_resvd_timeid) { 21125 resvd_timeid_save = un->un_resvd_timeid; 21126 un->un_resvd_timeid = NULL; 21127 mutex_exit(SD_MUTEX(un)); 21128 (void) untimeout(resvd_timeid_save); 21129 } else { 21130 mutex_exit(SD_MUTEX(un)); 21131 } 21132 21133 /* 21134 * destroy any pending timeout thread that may be attempting to 21135 * reinstate reservation on this device. 21136 */ 21137 sd_rmv_resv_reclaim_req(dev); 21138 21139 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21140 mutex_enter(SD_MUTEX(un)); 21141 if ((un->un_mhd_token) && 21142 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21143 mutex_exit(SD_MUTEX(un)); 21144 (void) sd_check_mhd(dev, 0); 21145 } else { 21146 mutex_exit(SD_MUTEX(un)); 21147 } 21148 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21149 sd_mhd_reset_notify_cb, (caddr_t)un); 21150 } else { 21151 /* 21152 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21153 */ 21154 mutex_enter(SD_MUTEX(un)); 21155 un->un_resvd_status = resvd_status_save; 21156 mutex_exit(SD_MUTEX(un)); 21157 } 21158 return (rval); 21159 } 21160 21161 21162 /* 21163 * Function: sd_mhdioc_register_devid 21164 * 21165 * Description: This routine is the driver entry point for handling ioctl 21166 * requests to register the device id (MHIOCREREGISTERDEVID). 21167 * 21168 * Note: The implementation for this ioctl has been updated to 21169 * be consistent with the original PSARC case (1999/357) 21170 * (4375899, 4241671, 4220005) 21171 * 21172 * Arguments: dev - the device number 21173 * 21174 * Return Code: 0 21175 * ENXIO 21176 */ 21177 21178 static int 21179 sd_mhdioc_register_devid(dev_t dev) 21180 { 21181 struct sd_lun *un = NULL; 21182 int rval = 0; 21183 21184 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21185 return (ENXIO); 21186 } 21187 21188 ASSERT(!mutex_owned(SD_MUTEX(un))); 21189 21190 mutex_enter(SD_MUTEX(un)); 21191 21192 /* If a devid already exists, de-register it */ 21193 if (un->un_devid != NULL) { 21194 ddi_devid_unregister(SD_DEVINFO(un)); 21195 /* 21196 * After unregister devid, needs to free devid memory 21197 */ 21198 ddi_devid_free(un->un_devid); 21199 un->un_devid = NULL; 21200 } 21201 21202 /* Check for reservation conflict */ 21203 mutex_exit(SD_MUTEX(un)); 21204 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21205 mutex_enter(SD_MUTEX(un)); 21206 21207 switch (rval) { 21208 case 0: 21209 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21210 break; 21211 case EACCES: 21212 break; 21213 default: 21214 rval = EIO; 21215 } 21216 21217 mutex_exit(SD_MUTEX(un)); 21218 return (rval); 21219 } 21220 21221 21222 /* 21223 * Function: sd_mhdioc_inkeys 21224 * 21225 * Description: This routine is the driver entry point for handling ioctl 21226 * requests to issue the SCSI-3 Persistent In Read Keys command 21227 * to the device (MHIOCGRP_INKEYS). 21228 * 21229 * Arguments: dev - the device number 21230 * arg - user provided in_keys structure 21231 * flag - this argument is a pass through to ddi_copyxxx() 21232 * directly from the mode argument of ioctl(). 21233 * 21234 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21235 * ENXIO 21236 * EFAULT 21237 */ 21238 21239 static int 21240 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21241 { 21242 struct sd_lun *un; 21243 mhioc_inkeys_t inkeys; 21244 int rval = 0; 21245 21246 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21247 return (ENXIO); 21248 } 21249 21250 #ifdef _MULTI_DATAMODEL 21251 switch (ddi_model_convert_from(flag & FMODELS)) { 21252 case DDI_MODEL_ILP32: { 21253 struct mhioc_inkeys32 inkeys32; 21254 21255 if (ddi_copyin(arg, &inkeys32, 21256 sizeof (struct mhioc_inkeys32), flag) != 0) { 21257 return (EFAULT); 21258 } 21259 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21260 if ((rval = sd_persistent_reservation_in_read_keys(un, 21261 &inkeys, flag)) != 0) { 21262 return (rval); 21263 } 21264 inkeys32.generation = inkeys.generation; 21265 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21266 flag) != 0) { 21267 return (EFAULT); 21268 } 21269 break; 21270 } 21271 case DDI_MODEL_NONE: 21272 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21273 flag) != 0) { 21274 return (EFAULT); 21275 } 21276 if ((rval = sd_persistent_reservation_in_read_keys(un, 21277 &inkeys, flag)) != 0) { 21278 return (rval); 21279 } 21280 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21281 flag) != 0) { 21282 return (EFAULT); 21283 } 21284 break; 21285 } 21286 21287 #else /* ! _MULTI_DATAMODEL */ 21288 21289 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21290 return (EFAULT); 21291 } 21292 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21293 if (rval != 0) { 21294 return (rval); 21295 } 21296 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21297 return (EFAULT); 21298 } 21299 21300 #endif /* _MULTI_DATAMODEL */ 21301 21302 return (rval); 21303 } 21304 21305 21306 /* 21307 * Function: sd_mhdioc_inresv 21308 * 21309 * Description: This routine is the driver entry point for handling ioctl 21310 * requests to issue the SCSI-3 Persistent In Read Reservations 21311 * command to the device (MHIOCGRP_INKEYS). 21312 * 21313 * Arguments: dev - the device number 21314 * arg - user provided in_resv structure 21315 * flag - this argument is a pass through to ddi_copyxxx() 21316 * directly from the mode argument of ioctl(). 21317 * 21318 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21319 * ENXIO 21320 * EFAULT 21321 */ 21322 21323 static int 21324 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21325 { 21326 struct sd_lun *un; 21327 mhioc_inresvs_t inresvs; 21328 int rval = 0; 21329 21330 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21331 return (ENXIO); 21332 } 21333 21334 #ifdef _MULTI_DATAMODEL 21335 21336 switch (ddi_model_convert_from(flag & FMODELS)) { 21337 case DDI_MODEL_ILP32: { 21338 struct mhioc_inresvs32 inresvs32; 21339 21340 if (ddi_copyin(arg, &inresvs32, 21341 sizeof (struct mhioc_inresvs32), flag) != 0) { 21342 return (EFAULT); 21343 } 21344 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21345 if ((rval = sd_persistent_reservation_in_read_resv(un, 21346 &inresvs, flag)) != 0) { 21347 return (rval); 21348 } 21349 inresvs32.generation = inresvs.generation; 21350 if (ddi_copyout(&inresvs32, arg, 21351 sizeof (struct mhioc_inresvs32), flag) != 0) { 21352 return (EFAULT); 21353 } 21354 break; 21355 } 21356 case DDI_MODEL_NONE: 21357 if (ddi_copyin(arg, &inresvs, 21358 sizeof (mhioc_inresvs_t), flag) != 0) { 21359 return (EFAULT); 21360 } 21361 if ((rval = sd_persistent_reservation_in_read_resv(un, 21362 &inresvs, flag)) != 0) { 21363 return (rval); 21364 } 21365 if (ddi_copyout(&inresvs, arg, 21366 sizeof (mhioc_inresvs_t), flag) != 0) { 21367 return (EFAULT); 21368 } 21369 break; 21370 } 21371 21372 #else /* ! _MULTI_DATAMODEL */ 21373 21374 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21375 return (EFAULT); 21376 } 21377 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21378 if (rval != 0) { 21379 return (rval); 21380 } 21381 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21382 return (EFAULT); 21383 } 21384 21385 #endif /* ! _MULTI_DATAMODEL */ 21386 21387 return (rval); 21388 } 21389 21390 21391 /* 21392 * The following routines support the clustering functionality described below 21393 * and implement lost reservation reclaim functionality. 21394 * 21395 * Clustering 21396 * ---------- 21397 * The clustering code uses two different, independent forms of SCSI 21398 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21399 * Persistent Group Reservations. For any particular disk, it will use either 21400 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21401 * 21402 * SCSI-2 21403 * The cluster software takes ownership of a multi-hosted disk by issuing the 21404 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21405 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21406 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21407 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21408 * driver. The meaning of failfast is that if the driver (on this host) ever 21409 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21410 * it should immediately panic the host. The motivation for this ioctl is that 21411 * if this host does encounter reservation conflict, the underlying cause is 21412 * that some other host of the cluster has decided that this host is no longer 21413 * in the cluster and has seized control of the disks for itself. Since this 21414 * host is no longer in the cluster, it ought to panic itself. The 21415 * MHIOCENFAILFAST ioctl does two things: 21416 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21417 * error to panic the host 21418 * (b) it sets up a periodic timer to test whether this host still has 21419 * "access" (in that no other host has reserved the device): if the 21420 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21421 * purpose of that periodic timer is to handle scenarios where the host is 21422 * otherwise temporarily quiescent, temporarily doing no real i/o. 21423 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21424 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21425 * the device itself. 21426 * 21427 * SCSI-3 PGR 21428 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21429 * facility is supported through the shared multihost disk ioctls 21430 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21431 * MHIOCGRP_PREEMPTANDABORT) 21432 * 21433 * Reservation Reclaim: 21434 * -------------------- 21435 * To support the lost reservation reclaim operations this driver creates a 21436 * single thread to handle reinstating reservations on all devices that have 21437 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21438 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21439 * and the reservation reclaim thread loops through the requests to regain the 21440 * lost reservations. 21441 */ 21442 21443 /* 21444 * Function: sd_check_mhd() 21445 * 21446 * Description: This function sets up and submits a scsi watch request or 21447 * terminates an existing watch request. This routine is used in 21448 * support of reservation reclaim. 21449 * 21450 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21451 * among multiple watches that share the callback function 21452 * interval - the number of microseconds specifying the watch 21453 * interval for issuing TEST UNIT READY commands. If 21454 * set to 0 the watch should be terminated. If the 21455 * interval is set to 0 and if the device is required 21456 * to hold reservation while disabling failfast, the 21457 * watch is restarted with an interval of 21458 * reinstate_resv_delay. 21459 * 21460 * Return Code: 0 - Successful submit/terminate of scsi watch request 21461 * ENXIO - Indicates an invalid device was specified 21462 * EAGAIN - Unable to submit the scsi watch request 21463 */ 21464 21465 static int 21466 sd_check_mhd(dev_t dev, int interval) 21467 { 21468 struct sd_lun *un; 21469 opaque_t token; 21470 21471 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21472 return (ENXIO); 21473 } 21474 21475 /* is this a watch termination request? */ 21476 if (interval == 0) { 21477 mutex_enter(SD_MUTEX(un)); 21478 /* if there is an existing watch task then terminate it */ 21479 if (un->un_mhd_token) { 21480 token = un->un_mhd_token; 21481 un->un_mhd_token = NULL; 21482 mutex_exit(SD_MUTEX(un)); 21483 (void) scsi_watch_request_terminate(token, 21484 SCSI_WATCH_TERMINATE_WAIT); 21485 mutex_enter(SD_MUTEX(un)); 21486 } else { 21487 mutex_exit(SD_MUTEX(un)); 21488 /* 21489 * Note: If we return here we don't check for the 21490 * failfast case. This is the original legacy 21491 * implementation but perhaps we should be checking 21492 * the failfast case. 21493 */ 21494 return (0); 21495 } 21496 /* 21497 * If the device is required to hold reservation while 21498 * disabling failfast, we need to restart the scsi_watch 21499 * routine with an interval of reinstate_resv_delay. 21500 */ 21501 if (un->un_resvd_status & SD_RESERVE) { 21502 interval = sd_reinstate_resv_delay/1000; 21503 } else { 21504 /* no failfast so bail */ 21505 mutex_exit(SD_MUTEX(un)); 21506 return (0); 21507 } 21508 mutex_exit(SD_MUTEX(un)); 21509 } 21510 21511 /* 21512 * adjust minimum time interval to 1 second, 21513 * and convert from msecs to usecs 21514 */ 21515 if (interval > 0 && interval < 1000) { 21516 interval = 1000; 21517 } 21518 interval *= 1000; 21519 21520 /* 21521 * submit the request to the scsi_watch service 21522 */ 21523 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21524 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21525 if (token == NULL) { 21526 return (EAGAIN); 21527 } 21528 21529 /* 21530 * save token for termination later on 21531 */ 21532 mutex_enter(SD_MUTEX(un)); 21533 un->un_mhd_token = token; 21534 mutex_exit(SD_MUTEX(un)); 21535 return (0); 21536 } 21537 21538 21539 /* 21540 * Function: sd_mhd_watch_cb() 21541 * 21542 * Description: This function is the call back function used by the scsi watch 21543 * facility. The scsi watch facility sends the "Test Unit Ready" 21544 * and processes the status. If applicable (i.e. a "Unit Attention" 21545 * status and automatic "Request Sense" not used) the scsi watch 21546 * facility will send a "Request Sense" and retrieve the sense data 21547 * to be passed to this callback function. In either case the 21548 * automatic "Request Sense" or the facility submitting one, this 21549 * callback is passed the status and sense data. 21550 * 21551 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21552 * among multiple watches that share this callback function 21553 * resultp - scsi watch facility result packet containing scsi 21554 * packet, status byte and sense data 21555 * 21556 * Return Code: 0 - continue the watch task 21557 * non-zero - terminate the watch task 21558 */ 21559 21560 static int 21561 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21562 { 21563 struct sd_lun *un; 21564 struct scsi_status *statusp; 21565 uint8_t *sensep; 21566 struct scsi_pkt *pkt; 21567 uchar_t actual_sense_length; 21568 dev_t dev = (dev_t)arg; 21569 21570 ASSERT(resultp != NULL); 21571 statusp = resultp->statusp; 21572 sensep = (uint8_t *)resultp->sensep; 21573 pkt = resultp->pkt; 21574 actual_sense_length = resultp->actual_sense_length; 21575 21576 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21577 return (ENXIO); 21578 } 21579 21580 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21581 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21582 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21583 21584 /* Begin processing of the status and/or sense data */ 21585 if (pkt->pkt_reason != CMD_CMPLT) { 21586 /* Handle the incomplete packet */ 21587 sd_mhd_watch_incomplete(un, pkt); 21588 return (0); 21589 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21590 if (*((unsigned char *)statusp) 21591 == STATUS_RESERVATION_CONFLICT) { 21592 /* 21593 * Handle a reservation conflict by panicking if 21594 * configured for failfast or by logging the conflict 21595 * and updating the reservation status 21596 */ 21597 mutex_enter(SD_MUTEX(un)); 21598 if ((un->un_resvd_status & SD_FAILFAST) && 21599 (sd_failfast_enable)) { 21600 sd_panic_for_res_conflict(un); 21601 /*NOTREACHED*/ 21602 } 21603 SD_INFO(SD_LOG_IOCTL_MHD, un, 21604 "sd_mhd_watch_cb: Reservation Conflict\n"); 21605 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21606 mutex_exit(SD_MUTEX(un)); 21607 } 21608 } 21609 21610 if (sensep != NULL) { 21611 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21612 mutex_enter(SD_MUTEX(un)); 21613 if ((scsi_sense_asc(sensep) == 21614 SD_SCSI_RESET_SENSE_CODE) && 21615 (un->un_resvd_status & SD_RESERVE)) { 21616 /* 21617 * The additional sense code indicates a power 21618 * on or bus device reset has occurred; update 21619 * the reservation status. 21620 */ 21621 un->un_resvd_status |= 21622 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21623 SD_INFO(SD_LOG_IOCTL_MHD, un, 21624 "sd_mhd_watch_cb: Lost Reservation\n"); 21625 } 21626 } else { 21627 return (0); 21628 } 21629 } else { 21630 mutex_enter(SD_MUTEX(un)); 21631 } 21632 21633 if ((un->un_resvd_status & SD_RESERVE) && 21634 (un->un_resvd_status & SD_LOST_RESERVE)) { 21635 if (un->un_resvd_status & SD_WANT_RESERVE) { 21636 /* 21637 * A reset occurred in between the last probe and this 21638 * one so if a timeout is pending cancel it. 21639 */ 21640 if (un->un_resvd_timeid) { 21641 timeout_id_t temp_id = un->un_resvd_timeid; 21642 un->un_resvd_timeid = NULL; 21643 mutex_exit(SD_MUTEX(un)); 21644 (void) untimeout(temp_id); 21645 mutex_enter(SD_MUTEX(un)); 21646 } 21647 un->un_resvd_status &= ~SD_WANT_RESERVE; 21648 } 21649 if (un->un_resvd_timeid == 0) { 21650 /* Schedule a timeout to handle the lost reservation */ 21651 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21652 (void *)dev, 21653 drv_usectohz(sd_reinstate_resv_delay)); 21654 } 21655 } 21656 mutex_exit(SD_MUTEX(un)); 21657 return (0); 21658 } 21659 21660 21661 /* 21662 * Function: sd_mhd_watch_incomplete() 21663 * 21664 * Description: This function is used to find out why a scsi pkt sent by the 21665 * scsi watch facility was not completed. Under some scenarios this 21666 * routine will return. Otherwise it will send a bus reset to see 21667 * if the drive is still online. 21668 * 21669 * Arguments: un - driver soft state (unit) structure 21670 * pkt - incomplete scsi pkt 21671 */ 21672 21673 static void 21674 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21675 { 21676 int be_chatty; 21677 int perr; 21678 21679 ASSERT(pkt != NULL); 21680 ASSERT(un != NULL); 21681 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21682 perr = (pkt->pkt_statistics & STAT_PERR); 21683 21684 mutex_enter(SD_MUTEX(un)); 21685 if (un->un_state == SD_STATE_DUMPING) { 21686 mutex_exit(SD_MUTEX(un)); 21687 return; 21688 } 21689 21690 switch (pkt->pkt_reason) { 21691 case CMD_UNX_BUS_FREE: 21692 /* 21693 * If we had a parity error that caused the target to drop BSY*, 21694 * don't be chatty about it. 21695 */ 21696 if (perr && be_chatty) { 21697 be_chatty = 0; 21698 } 21699 break; 21700 case CMD_TAG_REJECT: 21701 /* 21702 * The SCSI-2 spec states that a tag reject will be sent by the 21703 * target if tagged queuing is not supported. A tag reject may 21704 * also be sent during certain initialization periods or to 21705 * control internal resources. For the latter case the target 21706 * may also return Queue Full. 21707 * 21708 * If this driver receives a tag reject from a target that is 21709 * going through an init period or controlling internal 21710 * resources tagged queuing will be disabled. This is a less 21711 * than optimal behavior but the driver is unable to determine 21712 * the target state and assumes tagged queueing is not supported 21713 */ 21714 pkt->pkt_flags = 0; 21715 un->un_tagflags = 0; 21716 21717 if (un->un_f_opt_queueing == TRUE) { 21718 un->un_throttle = min(un->un_throttle, 3); 21719 } else { 21720 un->un_throttle = 1; 21721 } 21722 mutex_exit(SD_MUTEX(un)); 21723 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21724 mutex_enter(SD_MUTEX(un)); 21725 break; 21726 case CMD_INCOMPLETE: 21727 /* 21728 * The transport stopped with an abnormal state, fallthrough and 21729 * reset the target and/or bus unless selection did not complete 21730 * (indicated by STATE_GOT_BUS) in which case we don't want to 21731 * go through a target/bus reset 21732 */ 21733 if (pkt->pkt_state == STATE_GOT_BUS) { 21734 break; 21735 } 21736 /*FALLTHROUGH*/ 21737 21738 case CMD_TIMEOUT: 21739 default: 21740 /* 21741 * The lun may still be running the command, so a lun reset 21742 * should be attempted. If the lun reset fails or cannot be 21743 * issued, than try a target reset. Lastly try a bus reset. 21744 */ 21745 if ((pkt->pkt_statistics & 21746 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21747 int reset_retval = 0; 21748 mutex_exit(SD_MUTEX(un)); 21749 if (un->un_f_allow_bus_device_reset == TRUE) { 21750 if (un->un_f_lun_reset_enabled == TRUE) { 21751 reset_retval = 21752 scsi_reset(SD_ADDRESS(un), 21753 RESET_LUN); 21754 } 21755 if (reset_retval == 0) { 21756 reset_retval = 21757 scsi_reset(SD_ADDRESS(un), 21758 RESET_TARGET); 21759 } 21760 } 21761 if (reset_retval == 0) { 21762 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21763 } 21764 mutex_enter(SD_MUTEX(un)); 21765 } 21766 break; 21767 } 21768 21769 /* A device/bus reset has occurred; update the reservation status. */ 21770 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21771 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21772 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21773 un->un_resvd_status |= 21774 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21775 SD_INFO(SD_LOG_IOCTL_MHD, un, 21776 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21777 } 21778 } 21779 21780 /* 21781 * The disk has been turned off; Update the device state. 21782 * 21783 * Note: Should we be offlining the disk here? 21784 */ 21785 if (pkt->pkt_state == STATE_GOT_BUS) { 21786 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21787 "Disk not responding to selection\n"); 21788 if (un->un_state != SD_STATE_OFFLINE) { 21789 New_state(un, SD_STATE_OFFLINE); 21790 } 21791 } else if (be_chatty) { 21792 /* 21793 * suppress messages if they are all the same pkt reason; 21794 * with TQ, many (up to 256) are returned with the same 21795 * pkt_reason 21796 */ 21797 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21798 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21799 "sd_mhd_watch_incomplete: " 21800 "SCSI transport failed: reason '%s'\n", 21801 scsi_rname(pkt->pkt_reason)); 21802 } 21803 } 21804 un->un_last_pkt_reason = pkt->pkt_reason; 21805 mutex_exit(SD_MUTEX(un)); 21806 } 21807 21808 21809 /* 21810 * Function: sd_sname() 21811 * 21812 * Description: This is a simple little routine to return a string containing 21813 * a printable description of command status byte for use in 21814 * logging. 21815 * 21816 * Arguments: status - pointer to a status byte 21817 * 21818 * Return Code: char * - string containing status description. 21819 */ 21820 21821 static char * 21822 sd_sname(uchar_t status) 21823 { 21824 switch (status & STATUS_MASK) { 21825 case STATUS_GOOD: 21826 return ("good status"); 21827 case STATUS_CHECK: 21828 return ("check condition"); 21829 case STATUS_MET: 21830 return ("condition met"); 21831 case STATUS_BUSY: 21832 return ("busy"); 21833 case STATUS_INTERMEDIATE: 21834 return ("intermediate"); 21835 case STATUS_INTERMEDIATE_MET: 21836 return ("intermediate - condition met"); 21837 case STATUS_RESERVATION_CONFLICT: 21838 return ("reservation_conflict"); 21839 case STATUS_TERMINATED: 21840 return ("command terminated"); 21841 case STATUS_QFULL: 21842 return ("queue full"); 21843 default: 21844 return ("<unknown status>"); 21845 } 21846 } 21847 21848 21849 /* 21850 * Function: sd_mhd_resvd_recover() 21851 * 21852 * Description: This function adds a reservation entry to the 21853 * sd_resv_reclaim_request list and signals the reservation 21854 * reclaim thread that there is work pending. If the reservation 21855 * reclaim thread has not been previously created this function 21856 * will kick it off. 21857 * 21858 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21859 * among multiple watches that share this callback function 21860 * 21861 * Context: This routine is called by timeout() and is run in interrupt 21862 * context. It must not sleep or call other functions which may 21863 * sleep. 21864 */ 21865 21866 static void 21867 sd_mhd_resvd_recover(void *arg) 21868 { 21869 dev_t dev = (dev_t)arg; 21870 struct sd_lun *un; 21871 struct sd_thr_request *sd_treq = NULL; 21872 struct sd_thr_request *sd_cur = NULL; 21873 struct sd_thr_request *sd_prev = NULL; 21874 int already_there = 0; 21875 21876 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21877 return; 21878 } 21879 21880 mutex_enter(SD_MUTEX(un)); 21881 un->un_resvd_timeid = NULL; 21882 if (un->un_resvd_status & SD_WANT_RESERVE) { 21883 /* 21884 * There was a reset so don't issue the reserve, allow the 21885 * sd_mhd_watch_cb callback function to notice this and 21886 * reschedule the timeout for reservation. 21887 */ 21888 mutex_exit(SD_MUTEX(un)); 21889 return; 21890 } 21891 mutex_exit(SD_MUTEX(un)); 21892 21893 /* 21894 * Add this device to the sd_resv_reclaim_request list and the 21895 * sd_resv_reclaim_thread should take care of the rest. 21896 * 21897 * Note: We can't sleep in this context so if the memory allocation 21898 * fails allow the sd_mhd_watch_cb callback function to notice this and 21899 * reschedule the timeout for reservation. (4378460) 21900 */ 21901 sd_treq = (struct sd_thr_request *) 21902 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21903 if (sd_treq == NULL) { 21904 return; 21905 } 21906 21907 sd_treq->sd_thr_req_next = NULL; 21908 sd_treq->dev = dev; 21909 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21910 if (sd_tr.srq_thr_req_head == NULL) { 21911 sd_tr.srq_thr_req_head = sd_treq; 21912 } else { 21913 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21914 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21915 if (sd_cur->dev == dev) { 21916 /* 21917 * already in Queue so don't log 21918 * another request for the device 21919 */ 21920 already_there = 1; 21921 break; 21922 } 21923 sd_prev = sd_cur; 21924 } 21925 if (!already_there) { 21926 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21927 "logging request for %lx\n", dev); 21928 sd_prev->sd_thr_req_next = sd_treq; 21929 } else { 21930 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21931 } 21932 } 21933 21934 /* 21935 * Create a kernel thread to do the reservation reclaim and free up this 21936 * thread. We cannot block this thread while we go away to do the 21937 * reservation reclaim 21938 */ 21939 if (sd_tr.srq_resv_reclaim_thread == NULL) 21940 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 21941 sd_resv_reclaim_thread, NULL, 21942 0, &p0, TS_RUN, v.v_maxsyspri - 2); 21943 21944 /* Tell the reservation reclaim thread that it has work to do */ 21945 cv_signal(&sd_tr.srq_resv_reclaim_cv); 21946 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21947 } 21948 21949 /* 21950 * Function: sd_resv_reclaim_thread() 21951 * 21952 * Description: This function implements the reservation reclaim operations 21953 * 21954 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21955 * among multiple watches that share this callback function 21956 */ 21957 21958 static void 21959 sd_resv_reclaim_thread() 21960 { 21961 struct sd_lun *un; 21962 struct sd_thr_request *sd_mhreq; 21963 21964 /* Wait for work */ 21965 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21966 if (sd_tr.srq_thr_req_head == NULL) { 21967 cv_wait(&sd_tr.srq_resv_reclaim_cv, 21968 &sd_tr.srq_resv_reclaim_mutex); 21969 } 21970 21971 /* Loop while we have work */ 21972 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 21973 un = ddi_get_soft_state(sd_state, 21974 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 21975 if (un == NULL) { 21976 /* 21977 * softstate structure is NULL so just 21978 * dequeue the request and continue 21979 */ 21980 sd_tr.srq_thr_req_head = 21981 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21982 kmem_free(sd_tr.srq_thr_cur_req, 21983 sizeof (struct sd_thr_request)); 21984 continue; 21985 } 21986 21987 /* dequeue the request */ 21988 sd_mhreq = sd_tr.srq_thr_cur_req; 21989 sd_tr.srq_thr_req_head = 21990 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21991 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21992 21993 /* 21994 * Reclaim reservation only if SD_RESERVE is still set. There 21995 * may have been a call to MHIOCRELEASE before we got here. 21996 */ 21997 mutex_enter(SD_MUTEX(un)); 21998 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21999 /* 22000 * Note: The SD_LOST_RESERVE flag is cleared before 22001 * reclaiming the reservation. If this is done after the 22002 * call to sd_reserve_release a reservation loss in the 22003 * window between pkt completion of reserve cmd and 22004 * mutex_enter below may not be recognized 22005 */ 22006 un->un_resvd_status &= ~SD_LOST_RESERVE; 22007 mutex_exit(SD_MUTEX(un)); 22008 22009 if (sd_reserve_release(sd_mhreq->dev, 22010 SD_RESERVE) == 0) { 22011 mutex_enter(SD_MUTEX(un)); 22012 un->un_resvd_status |= SD_RESERVE; 22013 mutex_exit(SD_MUTEX(un)); 22014 SD_INFO(SD_LOG_IOCTL_MHD, un, 22015 "sd_resv_reclaim_thread: " 22016 "Reservation Recovered\n"); 22017 } else { 22018 mutex_enter(SD_MUTEX(un)); 22019 un->un_resvd_status |= SD_LOST_RESERVE; 22020 mutex_exit(SD_MUTEX(un)); 22021 SD_INFO(SD_LOG_IOCTL_MHD, un, 22022 "sd_resv_reclaim_thread: Failed " 22023 "Reservation Recovery\n"); 22024 } 22025 } else { 22026 mutex_exit(SD_MUTEX(un)); 22027 } 22028 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22029 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22030 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22031 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22032 /* 22033 * wakeup the destroy thread if anyone is waiting on 22034 * us to complete. 22035 */ 22036 cv_signal(&sd_tr.srq_inprocess_cv); 22037 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22038 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22039 } 22040 22041 /* 22042 * cleanup the sd_tr structure now that this thread will not exist 22043 */ 22044 ASSERT(sd_tr.srq_thr_req_head == NULL); 22045 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22046 sd_tr.srq_resv_reclaim_thread = NULL; 22047 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22048 thread_exit(); 22049 } 22050 22051 22052 /* 22053 * Function: sd_rmv_resv_reclaim_req() 22054 * 22055 * Description: This function removes any pending reservation reclaim requests 22056 * for the specified device. 22057 * 22058 * Arguments: dev - the device 'dev_t' 22059 */ 22060 22061 static void 22062 sd_rmv_resv_reclaim_req(dev_t dev) 22063 { 22064 struct sd_thr_request *sd_mhreq; 22065 struct sd_thr_request *sd_prev; 22066 22067 /* Remove a reservation reclaim request from the list */ 22068 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22069 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22070 /* 22071 * We are attempting to reinstate reservation for 22072 * this device. We wait for sd_reserve_release() 22073 * to return before we return. 22074 */ 22075 cv_wait(&sd_tr.srq_inprocess_cv, 22076 &sd_tr.srq_resv_reclaim_mutex); 22077 } else { 22078 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22079 if (sd_mhreq && sd_mhreq->dev == dev) { 22080 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22081 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22082 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22083 return; 22084 } 22085 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22086 if (sd_mhreq && sd_mhreq->dev == dev) { 22087 break; 22088 } 22089 sd_prev = sd_mhreq; 22090 } 22091 if (sd_mhreq != NULL) { 22092 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22093 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22094 } 22095 } 22096 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22097 } 22098 22099 22100 /* 22101 * Function: sd_mhd_reset_notify_cb() 22102 * 22103 * Description: This is a call back function for scsi_reset_notify. This 22104 * function updates the softstate reserved status and logs the 22105 * reset. The driver scsi watch facility callback function 22106 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22107 * will reclaim the reservation. 22108 * 22109 * Arguments: arg - driver soft state (unit) structure 22110 */ 22111 22112 static void 22113 sd_mhd_reset_notify_cb(caddr_t arg) 22114 { 22115 struct sd_lun *un = (struct sd_lun *)arg; 22116 22117 mutex_enter(SD_MUTEX(un)); 22118 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22119 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22120 SD_INFO(SD_LOG_IOCTL_MHD, un, 22121 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22122 } 22123 mutex_exit(SD_MUTEX(un)); 22124 } 22125 22126 22127 /* 22128 * Function: sd_take_ownership() 22129 * 22130 * Description: This routine implements an algorithm to achieve a stable 22131 * reservation on disks which don't implement priority reserve, 22132 * and makes sure that other host lose re-reservation attempts. 22133 * This algorithm contains of a loop that keeps issuing the RESERVE 22134 * for some period of time (min_ownership_delay, default 6 seconds) 22135 * During that loop, it looks to see if there has been a bus device 22136 * reset or bus reset (both of which cause an existing reservation 22137 * to be lost). If the reservation is lost issue RESERVE until a 22138 * period of min_ownership_delay with no resets has gone by, or 22139 * until max_ownership_delay has expired. This loop ensures that 22140 * the host really did manage to reserve the device, in spite of 22141 * resets. The looping for min_ownership_delay (default six 22142 * seconds) is important to early generation clustering products, 22143 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22144 * MHIOCENFAILFAST periodic timer of two seconds. By having 22145 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22146 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22147 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22148 * have already noticed, via the MHIOCENFAILFAST polling, that it 22149 * no longer "owns" the disk and will have panicked itself. Thus, 22150 * the host issuing the MHIOCTKOWN is assured (with timing 22151 * dependencies) that by the time it actually starts to use the 22152 * disk for real work, the old owner is no longer accessing it. 22153 * 22154 * min_ownership_delay is the minimum amount of time for which the 22155 * disk must be reserved continuously devoid of resets before the 22156 * MHIOCTKOWN ioctl will return success. 22157 * 22158 * max_ownership_delay indicates the amount of time by which the 22159 * take ownership should succeed or timeout with an error. 22160 * 22161 * Arguments: dev - the device 'dev_t' 22162 * *p - struct containing timing info. 22163 * 22164 * Return Code: 0 for success or error code 22165 */ 22166 22167 static int 22168 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22169 { 22170 struct sd_lun *un; 22171 int rval; 22172 int err; 22173 int reservation_count = 0; 22174 int min_ownership_delay = 6000000; /* in usec */ 22175 int max_ownership_delay = 30000000; /* in usec */ 22176 clock_t start_time; /* starting time of this algorithm */ 22177 clock_t end_time; /* time limit for giving up */ 22178 clock_t ownership_time; /* time limit for stable ownership */ 22179 clock_t current_time; 22180 clock_t previous_current_time; 22181 22182 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22183 return (ENXIO); 22184 } 22185 22186 /* 22187 * Attempt a device reservation. A priority reservation is requested. 22188 */ 22189 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22190 != SD_SUCCESS) { 22191 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22192 "sd_take_ownership: return(1)=%d\n", rval); 22193 return (rval); 22194 } 22195 22196 /* Update the softstate reserved status to indicate the reservation */ 22197 mutex_enter(SD_MUTEX(un)); 22198 un->un_resvd_status |= SD_RESERVE; 22199 un->un_resvd_status &= 22200 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22201 mutex_exit(SD_MUTEX(un)); 22202 22203 if (p != NULL) { 22204 if (p->min_ownership_delay != 0) { 22205 min_ownership_delay = p->min_ownership_delay * 1000; 22206 } 22207 if (p->max_ownership_delay != 0) { 22208 max_ownership_delay = p->max_ownership_delay * 1000; 22209 } 22210 } 22211 SD_INFO(SD_LOG_IOCTL_MHD, un, 22212 "sd_take_ownership: min, max delays: %d, %d\n", 22213 min_ownership_delay, max_ownership_delay); 22214 22215 start_time = ddi_get_lbolt(); 22216 current_time = start_time; 22217 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22218 end_time = start_time + drv_usectohz(max_ownership_delay); 22219 22220 while (current_time - end_time < 0) { 22221 delay(drv_usectohz(500000)); 22222 22223 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22224 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22225 mutex_enter(SD_MUTEX(un)); 22226 rval = (un->un_resvd_status & 22227 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22228 mutex_exit(SD_MUTEX(un)); 22229 break; 22230 } 22231 } 22232 previous_current_time = current_time; 22233 current_time = ddi_get_lbolt(); 22234 mutex_enter(SD_MUTEX(un)); 22235 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22236 ownership_time = ddi_get_lbolt() + 22237 drv_usectohz(min_ownership_delay); 22238 reservation_count = 0; 22239 } else { 22240 reservation_count++; 22241 } 22242 un->un_resvd_status |= SD_RESERVE; 22243 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22244 mutex_exit(SD_MUTEX(un)); 22245 22246 SD_INFO(SD_LOG_IOCTL_MHD, un, 22247 "sd_take_ownership: ticks for loop iteration=%ld, " 22248 "reservation=%s\n", (current_time - previous_current_time), 22249 reservation_count ? "ok" : "reclaimed"); 22250 22251 if (current_time - ownership_time >= 0 && 22252 reservation_count >= 4) { 22253 rval = 0; /* Achieved a stable ownership */ 22254 break; 22255 } 22256 if (current_time - end_time >= 0) { 22257 rval = EACCES; /* No ownership in max possible time */ 22258 break; 22259 } 22260 } 22261 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22262 "sd_take_ownership: return(2)=%d\n", rval); 22263 return (rval); 22264 } 22265 22266 22267 /* 22268 * Function: sd_reserve_release() 22269 * 22270 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22271 * PRIORITY RESERVE commands based on a user specified command type 22272 * 22273 * Arguments: dev - the device 'dev_t' 22274 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22275 * SD_RESERVE, SD_RELEASE 22276 * 22277 * Return Code: 0 or Error Code 22278 */ 22279 22280 static int 22281 sd_reserve_release(dev_t dev, int cmd) 22282 { 22283 struct uscsi_cmd *com = NULL; 22284 struct sd_lun *un = NULL; 22285 char cdb[CDB_GROUP0]; 22286 int rval; 22287 22288 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22289 (cmd == SD_PRIORITY_RESERVE)); 22290 22291 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22292 return (ENXIO); 22293 } 22294 22295 /* instantiate and initialize the command and cdb */ 22296 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22297 bzero(cdb, CDB_GROUP0); 22298 com->uscsi_flags = USCSI_SILENT; 22299 com->uscsi_timeout = un->un_reserve_release_time; 22300 com->uscsi_cdblen = CDB_GROUP0; 22301 com->uscsi_cdb = cdb; 22302 if (cmd == SD_RELEASE) { 22303 cdb[0] = SCMD_RELEASE; 22304 } else { 22305 cdb[0] = SCMD_RESERVE; 22306 } 22307 22308 /* Send the command. */ 22309 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22310 SD_PATH_STANDARD); 22311 22312 /* 22313 * "break" a reservation that is held by another host, by issuing a 22314 * reset if priority reserve is desired, and we could not get the 22315 * device. 22316 */ 22317 if ((cmd == SD_PRIORITY_RESERVE) && 22318 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22319 /* 22320 * First try to reset the LUN. If we cannot, then try a target 22321 * reset, followed by a bus reset if the target reset fails. 22322 */ 22323 int reset_retval = 0; 22324 if (un->un_f_lun_reset_enabled == TRUE) { 22325 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22326 } 22327 if (reset_retval == 0) { 22328 /* The LUN reset either failed or was not issued */ 22329 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22330 } 22331 if ((reset_retval == 0) && 22332 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22333 rval = EIO; 22334 kmem_free(com, sizeof (*com)); 22335 return (rval); 22336 } 22337 22338 bzero(com, sizeof (struct uscsi_cmd)); 22339 com->uscsi_flags = USCSI_SILENT; 22340 com->uscsi_cdb = cdb; 22341 com->uscsi_cdblen = CDB_GROUP0; 22342 com->uscsi_timeout = 5; 22343 22344 /* 22345 * Reissue the last reserve command, this time without request 22346 * sense. Assume that it is just a regular reserve command. 22347 */ 22348 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22349 SD_PATH_STANDARD); 22350 } 22351 22352 /* Return an error if still getting a reservation conflict. */ 22353 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22354 rval = EACCES; 22355 } 22356 22357 kmem_free(com, sizeof (*com)); 22358 return (rval); 22359 } 22360 22361 22362 #define SD_NDUMP_RETRIES 12 22363 /* 22364 * System Crash Dump routine 22365 */ 22366 22367 static int 22368 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22369 { 22370 int instance; 22371 int partition; 22372 int i; 22373 int err; 22374 struct sd_lun *un; 22375 struct scsi_pkt *wr_pktp; 22376 struct buf *wr_bp; 22377 struct buf wr_buf; 22378 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22379 daddr_t tgt_blkno; /* rmw - blkno for target */ 22380 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22381 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22382 size_t io_start_offset; 22383 int doing_rmw = FALSE; 22384 int rval; 22385 #if defined(__i386) || defined(__amd64) 22386 ssize_t dma_resid; 22387 daddr_t oblkno; 22388 #endif 22389 diskaddr_t nblks = 0; 22390 diskaddr_t start_block; 22391 22392 instance = SDUNIT(dev); 22393 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22394 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22395 return (ENXIO); 22396 } 22397 22398 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22399 22400 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22401 22402 partition = SDPART(dev); 22403 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22404 22405 /* Validate blocks to dump at against partition size. */ 22406 22407 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22408 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22409 22410 if ((blkno + nblk) > nblks) { 22411 SD_TRACE(SD_LOG_DUMP, un, 22412 "sddump: dump range larger than partition: " 22413 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22414 blkno, nblk, nblks); 22415 return (EINVAL); 22416 } 22417 22418 mutex_enter(&un->un_pm_mutex); 22419 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22420 struct scsi_pkt *start_pktp; 22421 22422 mutex_exit(&un->un_pm_mutex); 22423 22424 /* 22425 * use pm framework to power on HBA 1st 22426 */ 22427 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22428 22429 /* 22430 * Dump no long uses sdpower to power on a device, it's 22431 * in-line here so it can be done in polled mode. 22432 */ 22433 22434 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22435 22436 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22437 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22438 22439 if (start_pktp == NULL) { 22440 /* We were not given a SCSI packet, fail. */ 22441 return (EIO); 22442 } 22443 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22444 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22445 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22446 start_pktp->pkt_flags = FLAG_NOINTR; 22447 22448 mutex_enter(SD_MUTEX(un)); 22449 SD_FILL_SCSI1_LUN(un, start_pktp); 22450 mutex_exit(SD_MUTEX(un)); 22451 /* 22452 * Scsi_poll returns 0 (success) if the command completes and 22453 * the status block is STATUS_GOOD. 22454 */ 22455 if (sd_scsi_poll(un, start_pktp) != 0) { 22456 scsi_destroy_pkt(start_pktp); 22457 return (EIO); 22458 } 22459 scsi_destroy_pkt(start_pktp); 22460 (void) sd_ddi_pm_resume(un); 22461 } else { 22462 mutex_exit(&un->un_pm_mutex); 22463 } 22464 22465 mutex_enter(SD_MUTEX(un)); 22466 un->un_throttle = 0; 22467 22468 /* 22469 * The first time through, reset the specific target device. 22470 * However, when cpr calls sddump we know that sd is in a 22471 * a good state so no bus reset is required. 22472 * Clear sense data via Request Sense cmd. 22473 * In sddump we don't care about allow_bus_device_reset anymore 22474 */ 22475 22476 if ((un->un_state != SD_STATE_SUSPENDED) && 22477 (un->un_state != SD_STATE_DUMPING)) { 22478 22479 New_state(un, SD_STATE_DUMPING); 22480 22481 if (un->un_f_is_fibre == FALSE) { 22482 mutex_exit(SD_MUTEX(un)); 22483 /* 22484 * Attempt a bus reset for parallel scsi. 22485 * 22486 * Note: A bus reset is required because on some host 22487 * systems (i.e. E420R) a bus device reset is 22488 * insufficient to reset the state of the target. 22489 * 22490 * Note: Don't issue the reset for fibre-channel, 22491 * because this tends to hang the bus (loop) for 22492 * too long while everyone is logging out and in 22493 * and the deadman timer for dumping will fire 22494 * before the dump is complete. 22495 */ 22496 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22497 mutex_enter(SD_MUTEX(un)); 22498 Restore_state(un); 22499 mutex_exit(SD_MUTEX(un)); 22500 return (EIO); 22501 } 22502 22503 /* Delay to give the device some recovery time. */ 22504 drv_usecwait(10000); 22505 22506 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22507 SD_INFO(SD_LOG_DUMP, un, 22508 "sddump: sd_send_polled_RQS failed\n"); 22509 } 22510 mutex_enter(SD_MUTEX(un)); 22511 } 22512 } 22513 22514 /* 22515 * Convert the partition-relative block number to a 22516 * disk physical block number. 22517 */ 22518 blkno += start_block; 22519 22520 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22521 22522 22523 /* 22524 * Check if the device has a non-512 block size. 22525 */ 22526 wr_bp = NULL; 22527 if (NOT_DEVBSIZE(un)) { 22528 tgt_byte_offset = blkno * un->un_sys_blocksize; 22529 tgt_byte_count = nblk * un->un_sys_blocksize; 22530 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22531 (tgt_byte_count % un->un_tgt_blocksize)) { 22532 doing_rmw = TRUE; 22533 /* 22534 * Calculate the block number and number of block 22535 * in terms of the media block size. 22536 */ 22537 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22538 tgt_nblk = 22539 ((tgt_byte_offset + tgt_byte_count + 22540 (un->un_tgt_blocksize - 1)) / 22541 un->un_tgt_blocksize) - tgt_blkno; 22542 22543 /* 22544 * Invoke the routine which is going to do read part 22545 * of read-modify-write. 22546 * Note that this routine returns a pointer to 22547 * a valid bp in wr_bp. 22548 */ 22549 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22550 &wr_bp); 22551 if (err) { 22552 mutex_exit(SD_MUTEX(un)); 22553 return (err); 22554 } 22555 /* 22556 * Offset is being calculated as - 22557 * (original block # * system block size) - 22558 * (new block # * target block size) 22559 */ 22560 io_start_offset = 22561 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22562 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22563 22564 ASSERT((io_start_offset >= 0) && 22565 (io_start_offset < un->un_tgt_blocksize)); 22566 /* 22567 * Do the modify portion of read modify write. 22568 */ 22569 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22570 (size_t)nblk * un->un_sys_blocksize); 22571 } else { 22572 doing_rmw = FALSE; 22573 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22574 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22575 } 22576 22577 /* Convert blkno and nblk to target blocks */ 22578 blkno = tgt_blkno; 22579 nblk = tgt_nblk; 22580 } else { 22581 wr_bp = &wr_buf; 22582 bzero(wr_bp, sizeof (struct buf)); 22583 wr_bp->b_flags = B_BUSY; 22584 wr_bp->b_un.b_addr = addr; 22585 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22586 wr_bp->b_resid = 0; 22587 } 22588 22589 mutex_exit(SD_MUTEX(un)); 22590 22591 /* 22592 * Obtain a SCSI packet for the write command. 22593 * It should be safe to call the allocator here without 22594 * worrying about being locked for DVMA mapping because 22595 * the address we're passed is already a DVMA mapping 22596 * 22597 * We are also not going to worry about semaphore ownership 22598 * in the dump buffer. Dumping is single threaded at present. 22599 */ 22600 22601 wr_pktp = NULL; 22602 22603 #if defined(__i386) || defined(__amd64) 22604 dma_resid = wr_bp->b_bcount; 22605 oblkno = blkno; 22606 while (dma_resid != 0) { 22607 #endif 22608 22609 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22610 wr_bp->b_flags &= ~B_ERROR; 22611 22612 #if defined(__i386) || defined(__amd64) 22613 blkno = oblkno + 22614 ((wr_bp->b_bcount - dma_resid) / 22615 un->un_tgt_blocksize); 22616 nblk = dma_resid / un->un_tgt_blocksize; 22617 22618 if (wr_pktp) { 22619 /* Partial DMA transfers after initial transfer */ 22620 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22621 blkno, nblk); 22622 } else { 22623 /* Initial transfer */ 22624 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22625 un->un_pkt_flags, NULL_FUNC, NULL, 22626 blkno, nblk); 22627 } 22628 #else 22629 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22630 0, NULL_FUNC, NULL, blkno, nblk); 22631 #endif 22632 22633 if (rval == 0) { 22634 /* We were given a SCSI packet, continue. */ 22635 break; 22636 } 22637 22638 if (i == 0) { 22639 if (wr_bp->b_flags & B_ERROR) { 22640 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22641 "no resources for dumping; " 22642 "error code: 0x%x, retrying", 22643 geterror(wr_bp)); 22644 } else { 22645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22646 "no resources for dumping; retrying"); 22647 } 22648 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22649 if (wr_bp->b_flags & B_ERROR) { 22650 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22651 "no resources for dumping; error code: " 22652 "0x%x, retrying\n", geterror(wr_bp)); 22653 } 22654 } else { 22655 if (wr_bp->b_flags & B_ERROR) { 22656 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22657 "no resources for dumping; " 22658 "error code: 0x%x, retries failed, " 22659 "giving up.\n", geterror(wr_bp)); 22660 } else { 22661 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22662 "no resources for dumping; " 22663 "retries failed, giving up.\n"); 22664 } 22665 mutex_enter(SD_MUTEX(un)); 22666 Restore_state(un); 22667 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22668 mutex_exit(SD_MUTEX(un)); 22669 scsi_free_consistent_buf(wr_bp); 22670 } else { 22671 mutex_exit(SD_MUTEX(un)); 22672 } 22673 return (EIO); 22674 } 22675 drv_usecwait(10000); 22676 } 22677 22678 #if defined(__i386) || defined(__amd64) 22679 /* 22680 * save the resid from PARTIAL_DMA 22681 */ 22682 dma_resid = wr_pktp->pkt_resid; 22683 if (dma_resid != 0) 22684 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22685 wr_pktp->pkt_resid = 0; 22686 #endif 22687 22688 /* SunBug 1222170 */ 22689 wr_pktp->pkt_flags = FLAG_NOINTR; 22690 22691 err = EIO; 22692 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22693 22694 /* 22695 * Scsi_poll returns 0 (success) if the command completes and 22696 * the status block is STATUS_GOOD. We should only check 22697 * errors if this condition is not true. Even then we should 22698 * send our own request sense packet only if we have a check 22699 * condition and auto request sense has not been performed by 22700 * the hba. 22701 */ 22702 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22703 22704 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22705 (wr_pktp->pkt_resid == 0)) { 22706 err = SD_SUCCESS; 22707 break; 22708 } 22709 22710 /* 22711 * Check CMD_DEV_GONE 1st, give up if device is gone. 22712 */ 22713 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22714 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22715 "Device is gone\n"); 22716 break; 22717 } 22718 22719 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22720 SD_INFO(SD_LOG_DUMP, un, 22721 "sddump: write failed with CHECK, try # %d\n", i); 22722 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22723 (void) sd_send_polled_RQS(un); 22724 } 22725 22726 continue; 22727 } 22728 22729 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22730 int reset_retval = 0; 22731 22732 SD_INFO(SD_LOG_DUMP, un, 22733 "sddump: write failed with BUSY, try # %d\n", i); 22734 22735 if (un->un_f_lun_reset_enabled == TRUE) { 22736 reset_retval = scsi_reset(SD_ADDRESS(un), 22737 RESET_LUN); 22738 } 22739 if (reset_retval == 0) { 22740 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22741 } 22742 (void) sd_send_polled_RQS(un); 22743 22744 } else { 22745 SD_INFO(SD_LOG_DUMP, un, 22746 "sddump: write failed with 0x%x, try # %d\n", 22747 SD_GET_PKT_STATUS(wr_pktp), i); 22748 mutex_enter(SD_MUTEX(un)); 22749 sd_reset_target(un, wr_pktp); 22750 mutex_exit(SD_MUTEX(un)); 22751 } 22752 22753 /* 22754 * If we are not getting anywhere with lun/target resets, 22755 * let's reset the bus. 22756 */ 22757 if (i == SD_NDUMP_RETRIES/2) { 22758 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22759 (void) sd_send_polled_RQS(un); 22760 } 22761 22762 } 22763 #if defined(__i386) || defined(__amd64) 22764 } /* dma_resid */ 22765 #endif 22766 22767 scsi_destroy_pkt(wr_pktp); 22768 mutex_enter(SD_MUTEX(un)); 22769 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22770 mutex_exit(SD_MUTEX(un)); 22771 scsi_free_consistent_buf(wr_bp); 22772 } else { 22773 mutex_exit(SD_MUTEX(un)); 22774 } 22775 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22776 return (err); 22777 } 22778 22779 /* 22780 * Function: sd_scsi_poll() 22781 * 22782 * Description: This is a wrapper for the scsi_poll call. 22783 * 22784 * Arguments: sd_lun - The unit structure 22785 * scsi_pkt - The scsi packet being sent to the device. 22786 * 22787 * Return Code: 0 - Command completed successfully with good status 22788 * -1 - Command failed. This could indicate a check condition 22789 * or other status value requiring recovery action. 22790 * 22791 */ 22792 22793 static int 22794 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22795 { 22796 int status; 22797 22798 ASSERT(un != NULL); 22799 ASSERT(!mutex_owned(SD_MUTEX(un))); 22800 ASSERT(pktp != NULL); 22801 22802 status = SD_SUCCESS; 22803 22804 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22805 pktp->pkt_flags |= un->un_tagflags; 22806 pktp->pkt_flags &= ~FLAG_NODISCON; 22807 } 22808 22809 status = sd_ddi_scsi_poll(pktp); 22810 /* 22811 * Scsi_poll returns 0 (success) if the command completes and the 22812 * status block is STATUS_GOOD. We should only check errors if this 22813 * condition is not true. Even then we should send our own request 22814 * sense packet only if we have a check condition and auto 22815 * request sense has not been performed by the hba. 22816 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22817 */ 22818 if ((status != SD_SUCCESS) && 22819 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22820 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22821 (pktp->pkt_reason != CMD_DEV_GONE)) 22822 (void) sd_send_polled_RQS(un); 22823 22824 return (status); 22825 } 22826 22827 /* 22828 * Function: sd_send_polled_RQS() 22829 * 22830 * Description: This sends the request sense command to a device. 22831 * 22832 * Arguments: sd_lun - The unit structure 22833 * 22834 * Return Code: 0 - Command completed successfully with good status 22835 * -1 - Command failed. 22836 * 22837 */ 22838 22839 static int 22840 sd_send_polled_RQS(struct sd_lun *un) 22841 { 22842 int ret_val; 22843 struct scsi_pkt *rqs_pktp; 22844 struct buf *rqs_bp; 22845 22846 ASSERT(un != NULL); 22847 ASSERT(!mutex_owned(SD_MUTEX(un))); 22848 22849 ret_val = SD_SUCCESS; 22850 22851 rqs_pktp = un->un_rqs_pktp; 22852 rqs_bp = un->un_rqs_bp; 22853 22854 mutex_enter(SD_MUTEX(un)); 22855 22856 if (un->un_sense_isbusy) { 22857 ret_val = SD_FAILURE; 22858 mutex_exit(SD_MUTEX(un)); 22859 return (ret_val); 22860 } 22861 22862 /* 22863 * If the request sense buffer (and packet) is not in use, 22864 * let's set the un_sense_isbusy and send our packet 22865 */ 22866 un->un_sense_isbusy = 1; 22867 rqs_pktp->pkt_resid = 0; 22868 rqs_pktp->pkt_reason = 0; 22869 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22870 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22871 22872 mutex_exit(SD_MUTEX(un)); 22873 22874 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22875 " 0x%p\n", rqs_bp->b_un.b_addr); 22876 22877 /* 22878 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22879 * axle - it has a call into us! 22880 */ 22881 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22882 SD_INFO(SD_LOG_COMMON, un, 22883 "sd_send_polled_RQS: RQS failed\n"); 22884 } 22885 22886 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22887 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22888 22889 mutex_enter(SD_MUTEX(un)); 22890 un->un_sense_isbusy = 0; 22891 mutex_exit(SD_MUTEX(un)); 22892 22893 return (ret_val); 22894 } 22895 22896 /* 22897 * Defines needed for localized version of the scsi_poll routine. 22898 */ 22899 #define SD_CSEC 10000 /* usecs */ 22900 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22901 22902 22903 /* 22904 * Function: sd_ddi_scsi_poll() 22905 * 22906 * Description: Localized version of the scsi_poll routine. The purpose is to 22907 * send a scsi_pkt to a device as a polled command. This version 22908 * is to ensure more robust handling of transport errors. 22909 * Specifically this routine cures not ready, coming ready 22910 * transition for power up and reset of sonoma's. This can take 22911 * up to 45 seconds for power-on and 20 seconds for reset of a 22912 * sonoma lun. 22913 * 22914 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22915 * 22916 * Return Code: 0 - Command completed successfully with good status 22917 * -1 - Command failed. 22918 * 22919 */ 22920 22921 static int 22922 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22923 { 22924 int busy_count; 22925 int timeout; 22926 int rval = SD_FAILURE; 22927 int savef; 22928 uint8_t *sensep; 22929 long savet; 22930 void (*savec)(); 22931 /* 22932 * The following is defined in machdep.c and is used in determining if 22933 * the scsi transport system will do polled I/O instead of interrupt 22934 * I/O when called from xx_dump(). 22935 */ 22936 extern int do_polled_io; 22937 22938 /* 22939 * save old flags in pkt, to restore at end 22940 */ 22941 savef = pkt->pkt_flags; 22942 savec = pkt->pkt_comp; 22943 savet = pkt->pkt_time; 22944 22945 pkt->pkt_flags |= FLAG_NOINTR; 22946 22947 /* 22948 * XXX there is nothing in the SCSA spec that states that we should not 22949 * do a callback for polled cmds; however, removing this will break sd 22950 * and probably other target drivers 22951 */ 22952 pkt->pkt_comp = NULL; 22953 22954 /* 22955 * we don't like a polled command without timeout. 22956 * 60 seconds seems long enough. 22957 */ 22958 if (pkt->pkt_time == 0) { 22959 pkt->pkt_time = SCSI_POLL_TIMEOUT; 22960 } 22961 22962 /* 22963 * Send polled cmd. 22964 * 22965 * We do some error recovery for various errors. Tran_busy, 22966 * queue full, and non-dispatched commands are retried every 10 msec. 22967 * as they are typically transient failures. Busy status and Not 22968 * Ready are retried every second as this status takes a while to 22969 * change. Unit attention is retried for pkt_time (60) times 22970 * with no delay. 22971 */ 22972 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 22973 22974 for (busy_count = 0; busy_count < timeout; busy_count++) { 22975 int rc; 22976 int poll_delay; 22977 22978 /* 22979 * Initialize pkt status variables. 22980 */ 22981 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 22982 22983 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 22984 if (rc != TRAN_BUSY) { 22985 /* Transport failed - give up. */ 22986 break; 22987 } else { 22988 /* Transport busy - try again. */ 22989 poll_delay = 1 * SD_CSEC; /* 10 msec */ 22990 } 22991 } else { 22992 /* 22993 * Transport accepted - check pkt status. 22994 */ 22995 rc = (*pkt->pkt_scbp) & STATUS_MASK; 22996 if (pkt->pkt_reason == CMD_CMPLT && 22997 rc == STATUS_CHECK && 22998 pkt->pkt_state & STATE_ARQ_DONE) { 22999 struct scsi_arq_status *arqstat = 23000 (struct scsi_arq_status *)(pkt->pkt_scbp); 23001 23002 sensep = (uint8_t *)&arqstat->sts_sensedata; 23003 } else { 23004 sensep = NULL; 23005 } 23006 23007 if ((pkt->pkt_reason == CMD_CMPLT) && 23008 (rc == STATUS_GOOD)) { 23009 /* No error - we're done */ 23010 rval = SD_SUCCESS; 23011 break; 23012 23013 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23014 /* Lost connection - give up */ 23015 break; 23016 23017 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23018 (pkt->pkt_state == 0)) { 23019 /* Pkt not dispatched - try again. */ 23020 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23021 23022 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23023 (rc == STATUS_QFULL)) { 23024 /* Queue full - try again. */ 23025 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23026 23027 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23028 (rc == STATUS_BUSY)) { 23029 /* Busy - try again. */ 23030 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23031 busy_count += (SD_SEC_TO_CSEC - 1); 23032 23033 } else if ((sensep != NULL) && 23034 (scsi_sense_key(sensep) == 23035 KEY_UNIT_ATTENTION)) { 23036 /* Unit Attention - try again */ 23037 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23038 continue; 23039 23040 } else if ((sensep != NULL) && 23041 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23042 (scsi_sense_asc(sensep) == 0x04) && 23043 (scsi_sense_ascq(sensep) == 0x01)) { 23044 /* Not ready -> ready - try again. */ 23045 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23046 busy_count += (SD_SEC_TO_CSEC - 1); 23047 23048 } else { 23049 /* BAD status - give up. */ 23050 break; 23051 } 23052 } 23053 23054 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23055 !do_polled_io) { 23056 delay(drv_usectohz(poll_delay)); 23057 } else { 23058 /* we busy wait during cpr_dump or interrupt threads */ 23059 drv_usecwait(poll_delay); 23060 } 23061 } 23062 23063 pkt->pkt_flags = savef; 23064 pkt->pkt_comp = savec; 23065 pkt->pkt_time = savet; 23066 return (rval); 23067 } 23068 23069 23070 /* 23071 * Function: sd_persistent_reservation_in_read_keys 23072 * 23073 * Description: This routine is the driver entry point for handling CD-ROM 23074 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23075 * by sending the SCSI-3 PRIN commands to the device. 23076 * Processes the read keys command response by copying the 23077 * reservation key information into the user provided buffer. 23078 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23079 * 23080 * Arguments: un - Pointer to soft state struct for the target. 23081 * usrp - user provided pointer to multihost Persistent In Read 23082 * Keys structure (mhioc_inkeys_t) 23083 * flag - this argument is a pass through to ddi_copyxxx() 23084 * directly from the mode argument of ioctl(). 23085 * 23086 * Return Code: 0 - Success 23087 * EACCES 23088 * ENOTSUP 23089 * errno return code from sd_send_scsi_cmd() 23090 * 23091 * Context: Can sleep. Does not return until command is completed. 23092 */ 23093 23094 static int 23095 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23096 mhioc_inkeys_t *usrp, int flag) 23097 { 23098 #ifdef _MULTI_DATAMODEL 23099 struct mhioc_key_list32 li32; 23100 #endif 23101 sd_prin_readkeys_t *in; 23102 mhioc_inkeys_t *ptr; 23103 mhioc_key_list_t li; 23104 uchar_t *data_bufp; 23105 int data_len; 23106 int rval; 23107 size_t copysz; 23108 23109 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23110 return (EINVAL); 23111 } 23112 bzero(&li, sizeof (mhioc_key_list_t)); 23113 23114 /* 23115 * Get the listsize from user 23116 */ 23117 #ifdef _MULTI_DATAMODEL 23118 23119 switch (ddi_model_convert_from(flag & FMODELS)) { 23120 case DDI_MODEL_ILP32: 23121 copysz = sizeof (struct mhioc_key_list32); 23122 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23123 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23124 "sd_persistent_reservation_in_read_keys: " 23125 "failed ddi_copyin: mhioc_key_list32_t\n"); 23126 rval = EFAULT; 23127 goto done; 23128 } 23129 li.listsize = li32.listsize; 23130 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23131 break; 23132 23133 case DDI_MODEL_NONE: 23134 copysz = sizeof (mhioc_key_list_t); 23135 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23136 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23137 "sd_persistent_reservation_in_read_keys: " 23138 "failed ddi_copyin: mhioc_key_list_t\n"); 23139 rval = EFAULT; 23140 goto done; 23141 } 23142 break; 23143 } 23144 23145 #else /* ! _MULTI_DATAMODEL */ 23146 copysz = sizeof (mhioc_key_list_t); 23147 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23148 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23149 "sd_persistent_reservation_in_read_keys: " 23150 "failed ddi_copyin: mhioc_key_list_t\n"); 23151 rval = EFAULT; 23152 goto done; 23153 } 23154 #endif 23155 23156 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23157 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23158 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23159 23160 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23161 data_len, data_bufp)) != 0) { 23162 goto done; 23163 } 23164 in = (sd_prin_readkeys_t *)data_bufp; 23165 ptr->generation = BE_32(in->generation); 23166 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23167 23168 /* 23169 * Return the min(listsize, listlen) keys 23170 */ 23171 #ifdef _MULTI_DATAMODEL 23172 23173 switch (ddi_model_convert_from(flag & FMODELS)) { 23174 case DDI_MODEL_ILP32: 23175 li32.listlen = li.listlen; 23176 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23177 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23178 "sd_persistent_reservation_in_read_keys: " 23179 "failed ddi_copyout: mhioc_key_list32_t\n"); 23180 rval = EFAULT; 23181 goto done; 23182 } 23183 break; 23184 23185 case DDI_MODEL_NONE: 23186 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23187 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23188 "sd_persistent_reservation_in_read_keys: " 23189 "failed ddi_copyout: mhioc_key_list_t\n"); 23190 rval = EFAULT; 23191 goto done; 23192 } 23193 break; 23194 } 23195 23196 #else /* ! _MULTI_DATAMODEL */ 23197 23198 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23199 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23200 "sd_persistent_reservation_in_read_keys: " 23201 "failed ddi_copyout: mhioc_key_list_t\n"); 23202 rval = EFAULT; 23203 goto done; 23204 } 23205 23206 #endif /* _MULTI_DATAMODEL */ 23207 23208 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23209 li.listsize * MHIOC_RESV_KEY_SIZE); 23210 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23211 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23212 "sd_persistent_reservation_in_read_keys: " 23213 "failed ddi_copyout: keylist\n"); 23214 rval = EFAULT; 23215 } 23216 done: 23217 kmem_free(data_bufp, data_len); 23218 return (rval); 23219 } 23220 23221 23222 /* 23223 * Function: sd_persistent_reservation_in_read_resv 23224 * 23225 * Description: This routine is the driver entry point for handling CD-ROM 23226 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23227 * by sending the SCSI-3 PRIN commands to the device. 23228 * Process the read persistent reservations command response by 23229 * copying the reservation information into the user provided 23230 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23231 * 23232 * Arguments: un - Pointer to soft state struct for the target. 23233 * usrp - user provided pointer to multihost Persistent In Read 23234 * Keys structure (mhioc_inkeys_t) 23235 * flag - this argument is a pass through to ddi_copyxxx() 23236 * directly from the mode argument of ioctl(). 23237 * 23238 * Return Code: 0 - Success 23239 * EACCES 23240 * ENOTSUP 23241 * errno return code from sd_send_scsi_cmd() 23242 * 23243 * Context: Can sleep. Does not return until command is completed. 23244 */ 23245 23246 static int 23247 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23248 mhioc_inresvs_t *usrp, int flag) 23249 { 23250 #ifdef _MULTI_DATAMODEL 23251 struct mhioc_resv_desc_list32 resvlist32; 23252 #endif 23253 sd_prin_readresv_t *in; 23254 mhioc_inresvs_t *ptr; 23255 sd_readresv_desc_t *readresv_ptr; 23256 mhioc_resv_desc_list_t resvlist; 23257 mhioc_resv_desc_t resvdesc; 23258 uchar_t *data_bufp; 23259 int data_len; 23260 int rval; 23261 int i; 23262 size_t copysz; 23263 mhioc_resv_desc_t *bufp; 23264 23265 if ((ptr = usrp) == NULL) { 23266 return (EINVAL); 23267 } 23268 23269 /* 23270 * Get the listsize from user 23271 */ 23272 #ifdef _MULTI_DATAMODEL 23273 switch (ddi_model_convert_from(flag & FMODELS)) { 23274 case DDI_MODEL_ILP32: 23275 copysz = sizeof (struct mhioc_resv_desc_list32); 23276 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23277 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23278 "sd_persistent_reservation_in_read_resv: " 23279 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23280 rval = EFAULT; 23281 goto done; 23282 } 23283 resvlist.listsize = resvlist32.listsize; 23284 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23285 break; 23286 23287 case DDI_MODEL_NONE: 23288 copysz = sizeof (mhioc_resv_desc_list_t); 23289 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23290 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23291 "sd_persistent_reservation_in_read_resv: " 23292 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23293 rval = EFAULT; 23294 goto done; 23295 } 23296 break; 23297 } 23298 #else /* ! _MULTI_DATAMODEL */ 23299 copysz = sizeof (mhioc_resv_desc_list_t); 23300 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23301 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23302 "sd_persistent_reservation_in_read_resv: " 23303 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23304 rval = EFAULT; 23305 goto done; 23306 } 23307 #endif /* ! _MULTI_DATAMODEL */ 23308 23309 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23310 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23311 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23312 23313 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23314 data_len, data_bufp)) != 0) { 23315 goto done; 23316 } 23317 in = (sd_prin_readresv_t *)data_bufp; 23318 ptr->generation = BE_32(in->generation); 23319 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23320 23321 /* 23322 * Return the min(listsize, listlen( keys 23323 */ 23324 #ifdef _MULTI_DATAMODEL 23325 23326 switch (ddi_model_convert_from(flag & FMODELS)) { 23327 case DDI_MODEL_ILP32: 23328 resvlist32.listlen = resvlist.listlen; 23329 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23330 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23331 "sd_persistent_reservation_in_read_resv: " 23332 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23333 rval = EFAULT; 23334 goto done; 23335 } 23336 break; 23337 23338 case DDI_MODEL_NONE: 23339 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23340 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23341 "sd_persistent_reservation_in_read_resv: " 23342 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23343 rval = EFAULT; 23344 goto done; 23345 } 23346 break; 23347 } 23348 23349 #else /* ! _MULTI_DATAMODEL */ 23350 23351 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23352 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23353 "sd_persistent_reservation_in_read_resv: " 23354 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23355 rval = EFAULT; 23356 goto done; 23357 } 23358 23359 #endif /* ! _MULTI_DATAMODEL */ 23360 23361 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23362 bufp = resvlist.list; 23363 copysz = sizeof (mhioc_resv_desc_t); 23364 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23365 i++, readresv_ptr++, bufp++) { 23366 23367 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23368 MHIOC_RESV_KEY_SIZE); 23369 resvdesc.type = readresv_ptr->type; 23370 resvdesc.scope = readresv_ptr->scope; 23371 resvdesc.scope_specific_addr = 23372 BE_32(readresv_ptr->scope_specific_addr); 23373 23374 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23375 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23376 "sd_persistent_reservation_in_read_resv: " 23377 "failed ddi_copyout: resvlist\n"); 23378 rval = EFAULT; 23379 goto done; 23380 } 23381 } 23382 done: 23383 kmem_free(data_bufp, data_len); 23384 return (rval); 23385 } 23386 23387 23388 /* 23389 * Function: sr_change_blkmode() 23390 * 23391 * Description: This routine is the driver entry point for handling CD-ROM 23392 * block mode ioctl requests. Support for returning and changing 23393 * the current block size in use by the device is implemented. The 23394 * LBA size is changed via a MODE SELECT Block Descriptor. 23395 * 23396 * This routine issues a mode sense with an allocation length of 23397 * 12 bytes for the mode page header and a single block descriptor. 23398 * 23399 * Arguments: dev - the device 'dev_t' 23400 * cmd - the request type; one of CDROMGBLKMODE (get) or 23401 * CDROMSBLKMODE (set) 23402 * data - current block size or requested block size 23403 * flag - this argument is a pass through to ddi_copyxxx() directly 23404 * from the mode argument of ioctl(). 23405 * 23406 * Return Code: the code returned by sd_send_scsi_cmd() 23407 * EINVAL if invalid arguments are provided 23408 * EFAULT if ddi_copyxxx() fails 23409 * ENXIO if fail ddi_get_soft_state 23410 * EIO if invalid mode sense block descriptor length 23411 * 23412 */ 23413 23414 static int 23415 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23416 { 23417 struct sd_lun *un = NULL; 23418 struct mode_header *sense_mhp, *select_mhp; 23419 struct block_descriptor *sense_desc, *select_desc; 23420 int current_bsize; 23421 int rval = EINVAL; 23422 uchar_t *sense = NULL; 23423 uchar_t *select = NULL; 23424 23425 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23426 23427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23428 return (ENXIO); 23429 } 23430 23431 /* 23432 * The block length is changed via the Mode Select block descriptor, the 23433 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23434 * required as part of this routine. Therefore the mode sense allocation 23435 * length is specified to be the length of a mode page header and a 23436 * block descriptor. 23437 */ 23438 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23439 23440 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23441 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23443 "sr_change_blkmode: Mode Sense Failed\n"); 23444 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23445 return (rval); 23446 } 23447 23448 /* Check the block descriptor len to handle only 1 block descriptor */ 23449 sense_mhp = (struct mode_header *)sense; 23450 if ((sense_mhp->bdesc_length == 0) || 23451 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23452 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23453 "sr_change_blkmode: Mode Sense returned invalid block" 23454 " descriptor length\n"); 23455 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23456 return (EIO); 23457 } 23458 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23459 current_bsize = ((sense_desc->blksize_hi << 16) | 23460 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23461 23462 /* Process command */ 23463 switch (cmd) { 23464 case CDROMGBLKMODE: 23465 /* Return the block size obtained during the mode sense */ 23466 if (ddi_copyout(¤t_bsize, (void *)data, 23467 sizeof (int), flag) != 0) 23468 rval = EFAULT; 23469 break; 23470 case CDROMSBLKMODE: 23471 /* Validate the requested block size */ 23472 switch (data) { 23473 case CDROM_BLK_512: 23474 case CDROM_BLK_1024: 23475 case CDROM_BLK_2048: 23476 case CDROM_BLK_2056: 23477 case CDROM_BLK_2336: 23478 case CDROM_BLK_2340: 23479 case CDROM_BLK_2352: 23480 case CDROM_BLK_2368: 23481 case CDROM_BLK_2448: 23482 case CDROM_BLK_2646: 23483 case CDROM_BLK_2647: 23484 break; 23485 default: 23486 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23487 "sr_change_blkmode: " 23488 "Block Size '%ld' Not Supported\n", data); 23489 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23490 return (EINVAL); 23491 } 23492 23493 /* 23494 * The current block size matches the requested block size so 23495 * there is no need to send the mode select to change the size 23496 */ 23497 if (current_bsize == data) { 23498 break; 23499 } 23500 23501 /* Build the select data for the requested block size */ 23502 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23503 select_mhp = (struct mode_header *)select; 23504 select_desc = 23505 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23506 /* 23507 * The LBA size is changed via the block descriptor, so the 23508 * descriptor is built according to the user data 23509 */ 23510 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23511 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23512 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23513 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23514 23515 /* Send the mode select for the requested block size */ 23516 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23517 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23518 SD_PATH_STANDARD)) != 0) { 23519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23520 "sr_change_blkmode: Mode Select Failed\n"); 23521 /* 23522 * The mode select failed for the requested block size, 23523 * so reset the data for the original block size and 23524 * send it to the target. The error is indicated by the 23525 * return value for the failed mode select. 23526 */ 23527 select_desc->blksize_hi = sense_desc->blksize_hi; 23528 select_desc->blksize_mid = sense_desc->blksize_mid; 23529 select_desc->blksize_lo = sense_desc->blksize_lo; 23530 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23531 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23532 SD_PATH_STANDARD); 23533 } else { 23534 ASSERT(!mutex_owned(SD_MUTEX(un))); 23535 mutex_enter(SD_MUTEX(un)); 23536 sd_update_block_info(un, (uint32_t)data, 0); 23537 mutex_exit(SD_MUTEX(un)); 23538 } 23539 break; 23540 default: 23541 /* should not reach here, but check anyway */ 23542 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23543 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23544 rval = EINVAL; 23545 break; 23546 } 23547 23548 if (select) { 23549 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23550 } 23551 if (sense) { 23552 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23553 } 23554 return (rval); 23555 } 23556 23557 23558 /* 23559 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23560 * implement driver support for getting and setting the CD speed. The command 23561 * set used will be based on the device type. If the device has not been 23562 * identified as MMC the Toshiba vendor specific mode page will be used. If 23563 * the device is MMC but does not support the Real Time Streaming feature 23564 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23565 * be used to read the speed. 23566 */ 23567 23568 /* 23569 * Function: sr_change_speed() 23570 * 23571 * Description: This routine is the driver entry point for handling CD-ROM 23572 * drive speed ioctl requests for devices supporting the Toshiba 23573 * vendor specific drive speed mode page. Support for returning 23574 * and changing the current drive speed in use by the device is 23575 * implemented. 23576 * 23577 * Arguments: dev - the device 'dev_t' 23578 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23579 * CDROMSDRVSPEED (set) 23580 * data - current drive speed or requested drive speed 23581 * flag - this argument is a pass through to ddi_copyxxx() directly 23582 * from the mode argument of ioctl(). 23583 * 23584 * Return Code: the code returned by sd_send_scsi_cmd() 23585 * EINVAL if invalid arguments are provided 23586 * EFAULT if ddi_copyxxx() fails 23587 * ENXIO if fail ddi_get_soft_state 23588 * EIO if invalid mode sense block descriptor length 23589 */ 23590 23591 static int 23592 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23593 { 23594 struct sd_lun *un = NULL; 23595 struct mode_header *sense_mhp, *select_mhp; 23596 struct mode_speed *sense_page, *select_page; 23597 int current_speed; 23598 int rval = EINVAL; 23599 int bd_len; 23600 uchar_t *sense = NULL; 23601 uchar_t *select = NULL; 23602 23603 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23604 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23605 return (ENXIO); 23606 } 23607 23608 /* 23609 * Note: The drive speed is being modified here according to a Toshiba 23610 * vendor specific mode page (0x31). 23611 */ 23612 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23613 23614 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23615 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23616 SD_PATH_STANDARD)) != 0) { 23617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23618 "sr_change_speed: Mode Sense Failed\n"); 23619 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23620 return (rval); 23621 } 23622 sense_mhp = (struct mode_header *)sense; 23623 23624 /* Check the block descriptor len to handle only 1 block descriptor */ 23625 bd_len = sense_mhp->bdesc_length; 23626 if (bd_len > MODE_BLK_DESC_LENGTH) { 23627 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23628 "sr_change_speed: Mode Sense returned invalid block " 23629 "descriptor length\n"); 23630 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23631 return (EIO); 23632 } 23633 23634 sense_page = (struct mode_speed *) 23635 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23636 current_speed = sense_page->speed; 23637 23638 /* Process command */ 23639 switch (cmd) { 23640 case CDROMGDRVSPEED: 23641 /* Return the drive speed obtained during the mode sense */ 23642 if (current_speed == 0x2) { 23643 current_speed = CDROM_TWELVE_SPEED; 23644 } 23645 if (ddi_copyout(¤t_speed, (void *)data, 23646 sizeof (int), flag) != 0) { 23647 rval = EFAULT; 23648 } 23649 break; 23650 case CDROMSDRVSPEED: 23651 /* Validate the requested drive speed */ 23652 switch ((uchar_t)data) { 23653 case CDROM_TWELVE_SPEED: 23654 data = 0x2; 23655 /*FALLTHROUGH*/ 23656 case CDROM_NORMAL_SPEED: 23657 case CDROM_DOUBLE_SPEED: 23658 case CDROM_QUAD_SPEED: 23659 case CDROM_MAXIMUM_SPEED: 23660 break; 23661 default: 23662 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23663 "sr_change_speed: " 23664 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23665 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23666 return (EINVAL); 23667 } 23668 23669 /* 23670 * The current drive speed matches the requested drive speed so 23671 * there is no need to send the mode select to change the speed 23672 */ 23673 if (current_speed == data) { 23674 break; 23675 } 23676 23677 /* Build the select data for the requested drive speed */ 23678 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23679 select_mhp = (struct mode_header *)select; 23680 select_mhp->bdesc_length = 0; 23681 select_page = 23682 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23683 select_page = 23684 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23685 select_page->mode_page.code = CDROM_MODE_SPEED; 23686 select_page->mode_page.length = 2; 23687 select_page->speed = (uchar_t)data; 23688 23689 /* Send the mode select for the requested block size */ 23690 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23691 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23692 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23693 /* 23694 * The mode select failed for the requested drive speed, 23695 * so reset the data for the original drive speed and 23696 * send it to the target. The error is indicated by the 23697 * return value for the failed mode select. 23698 */ 23699 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23700 "sr_drive_speed: Mode Select Failed\n"); 23701 select_page->speed = sense_page->speed; 23702 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23703 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23704 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23705 } 23706 break; 23707 default: 23708 /* should not reach here, but check anyway */ 23709 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23710 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23711 rval = EINVAL; 23712 break; 23713 } 23714 23715 if (select) { 23716 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23717 } 23718 if (sense) { 23719 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23720 } 23721 23722 return (rval); 23723 } 23724 23725 23726 /* 23727 * Function: sr_atapi_change_speed() 23728 * 23729 * Description: This routine is the driver entry point for handling CD-ROM 23730 * drive speed ioctl requests for MMC devices that do not support 23731 * the Real Time Streaming feature (0x107). 23732 * 23733 * Note: This routine will use the SET SPEED command which may not 23734 * be supported by all devices. 23735 * 23736 * Arguments: dev- the device 'dev_t' 23737 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23738 * CDROMSDRVSPEED (set) 23739 * data- current drive speed or requested drive speed 23740 * flag- this argument is a pass through to ddi_copyxxx() directly 23741 * from the mode argument of ioctl(). 23742 * 23743 * Return Code: the code returned by sd_send_scsi_cmd() 23744 * EINVAL if invalid arguments are provided 23745 * EFAULT if ddi_copyxxx() fails 23746 * ENXIO if fail ddi_get_soft_state 23747 * EIO if invalid mode sense block descriptor length 23748 */ 23749 23750 static int 23751 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23752 { 23753 struct sd_lun *un; 23754 struct uscsi_cmd *com = NULL; 23755 struct mode_header_grp2 *sense_mhp; 23756 uchar_t *sense_page; 23757 uchar_t *sense = NULL; 23758 char cdb[CDB_GROUP5]; 23759 int bd_len; 23760 int current_speed = 0; 23761 int max_speed = 0; 23762 int rval; 23763 23764 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23765 23766 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23767 return (ENXIO); 23768 } 23769 23770 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23771 23772 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23773 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23774 SD_PATH_STANDARD)) != 0) { 23775 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23776 "sr_atapi_change_speed: Mode Sense Failed\n"); 23777 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23778 return (rval); 23779 } 23780 23781 /* Check the block descriptor len to handle only 1 block descriptor */ 23782 sense_mhp = (struct mode_header_grp2 *)sense; 23783 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23784 if (bd_len > MODE_BLK_DESC_LENGTH) { 23785 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23786 "sr_atapi_change_speed: Mode Sense returned invalid " 23787 "block descriptor length\n"); 23788 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23789 return (EIO); 23790 } 23791 23792 /* Calculate the current and maximum drive speeds */ 23793 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23794 current_speed = (sense_page[14] << 8) | sense_page[15]; 23795 max_speed = (sense_page[8] << 8) | sense_page[9]; 23796 23797 /* Process the command */ 23798 switch (cmd) { 23799 case CDROMGDRVSPEED: 23800 current_speed /= SD_SPEED_1X; 23801 if (ddi_copyout(¤t_speed, (void *)data, 23802 sizeof (int), flag) != 0) 23803 rval = EFAULT; 23804 break; 23805 case CDROMSDRVSPEED: 23806 /* Convert the speed code to KB/sec */ 23807 switch ((uchar_t)data) { 23808 case CDROM_NORMAL_SPEED: 23809 current_speed = SD_SPEED_1X; 23810 break; 23811 case CDROM_DOUBLE_SPEED: 23812 current_speed = 2 * SD_SPEED_1X; 23813 break; 23814 case CDROM_QUAD_SPEED: 23815 current_speed = 4 * SD_SPEED_1X; 23816 break; 23817 case CDROM_TWELVE_SPEED: 23818 current_speed = 12 * SD_SPEED_1X; 23819 break; 23820 case CDROM_MAXIMUM_SPEED: 23821 current_speed = 0xffff; 23822 break; 23823 default: 23824 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23825 "sr_atapi_change_speed: invalid drive speed %d\n", 23826 (uchar_t)data); 23827 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23828 return (EINVAL); 23829 } 23830 23831 /* Check the request against the drive's max speed. */ 23832 if (current_speed != 0xffff) { 23833 if (current_speed > max_speed) { 23834 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23835 return (EINVAL); 23836 } 23837 } 23838 23839 /* 23840 * Build and send the SET SPEED command 23841 * 23842 * Note: The SET SPEED (0xBB) command used in this routine is 23843 * obsolete per the SCSI MMC spec but still supported in the 23844 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23845 * therefore the command is still implemented in this routine. 23846 */ 23847 bzero(cdb, sizeof (cdb)); 23848 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23849 cdb[2] = (uchar_t)(current_speed >> 8); 23850 cdb[3] = (uchar_t)current_speed; 23851 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23852 com->uscsi_cdb = (caddr_t)cdb; 23853 com->uscsi_cdblen = CDB_GROUP5; 23854 com->uscsi_bufaddr = NULL; 23855 com->uscsi_buflen = 0; 23856 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23857 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23858 break; 23859 default: 23860 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23861 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23862 rval = EINVAL; 23863 } 23864 23865 if (sense) { 23866 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23867 } 23868 if (com) { 23869 kmem_free(com, sizeof (*com)); 23870 } 23871 return (rval); 23872 } 23873 23874 23875 /* 23876 * Function: sr_pause_resume() 23877 * 23878 * Description: This routine is the driver entry point for handling CD-ROM 23879 * pause/resume ioctl requests. This only affects the audio play 23880 * operation. 23881 * 23882 * Arguments: dev - the device 'dev_t' 23883 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23884 * for setting the resume bit of the cdb. 23885 * 23886 * Return Code: the code returned by sd_send_scsi_cmd() 23887 * EINVAL if invalid mode specified 23888 * 23889 */ 23890 23891 static int 23892 sr_pause_resume(dev_t dev, int cmd) 23893 { 23894 struct sd_lun *un; 23895 struct uscsi_cmd *com; 23896 char cdb[CDB_GROUP1]; 23897 int rval; 23898 23899 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23900 return (ENXIO); 23901 } 23902 23903 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23904 bzero(cdb, CDB_GROUP1); 23905 cdb[0] = SCMD_PAUSE_RESUME; 23906 switch (cmd) { 23907 case CDROMRESUME: 23908 cdb[8] = 1; 23909 break; 23910 case CDROMPAUSE: 23911 cdb[8] = 0; 23912 break; 23913 default: 23914 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23915 " Command '%x' Not Supported\n", cmd); 23916 rval = EINVAL; 23917 goto done; 23918 } 23919 23920 com->uscsi_cdb = cdb; 23921 com->uscsi_cdblen = CDB_GROUP1; 23922 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23923 23924 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23925 SD_PATH_STANDARD); 23926 23927 done: 23928 kmem_free(com, sizeof (*com)); 23929 return (rval); 23930 } 23931 23932 23933 /* 23934 * Function: sr_play_msf() 23935 * 23936 * Description: This routine is the driver entry point for handling CD-ROM 23937 * ioctl requests to output the audio signals at the specified 23938 * starting address and continue the audio play until the specified 23939 * ending address (CDROMPLAYMSF) The address is in Minute Second 23940 * Frame (MSF) format. 23941 * 23942 * Arguments: dev - the device 'dev_t' 23943 * data - pointer to user provided audio msf structure, 23944 * specifying start/end addresses. 23945 * flag - this argument is a pass through to ddi_copyxxx() 23946 * directly from the mode argument of ioctl(). 23947 * 23948 * Return Code: the code returned by sd_send_scsi_cmd() 23949 * EFAULT if ddi_copyxxx() fails 23950 * ENXIO if fail ddi_get_soft_state 23951 * EINVAL if data pointer is NULL 23952 */ 23953 23954 static int 23955 sr_play_msf(dev_t dev, caddr_t data, int flag) 23956 { 23957 struct sd_lun *un; 23958 struct uscsi_cmd *com; 23959 struct cdrom_msf msf_struct; 23960 struct cdrom_msf *msf = &msf_struct; 23961 char cdb[CDB_GROUP1]; 23962 int rval; 23963 23964 if (data == NULL) { 23965 return (EINVAL); 23966 } 23967 23968 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23969 return (ENXIO); 23970 } 23971 23972 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 23973 return (EFAULT); 23974 } 23975 23976 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23977 bzero(cdb, CDB_GROUP1); 23978 cdb[0] = SCMD_PLAYAUDIO_MSF; 23979 if (un->un_f_cfg_playmsf_bcd == TRUE) { 23980 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 23981 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 23982 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 23983 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 23984 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 23985 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 23986 } else { 23987 cdb[3] = msf->cdmsf_min0; 23988 cdb[4] = msf->cdmsf_sec0; 23989 cdb[5] = msf->cdmsf_frame0; 23990 cdb[6] = msf->cdmsf_min1; 23991 cdb[7] = msf->cdmsf_sec1; 23992 cdb[8] = msf->cdmsf_frame1; 23993 } 23994 com->uscsi_cdb = cdb; 23995 com->uscsi_cdblen = CDB_GROUP1; 23996 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23997 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23998 SD_PATH_STANDARD); 23999 kmem_free(com, sizeof (*com)); 24000 return (rval); 24001 } 24002 24003 24004 /* 24005 * Function: sr_play_trkind() 24006 * 24007 * Description: This routine is the driver entry point for handling CD-ROM 24008 * ioctl requests to output the audio signals at the specified 24009 * starting address and continue the audio play until the specified 24010 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24011 * format. 24012 * 24013 * Arguments: dev - the device 'dev_t' 24014 * data - pointer to user provided audio track/index structure, 24015 * specifying start/end addresses. 24016 * flag - this argument is a pass through to ddi_copyxxx() 24017 * directly from the mode argument of ioctl(). 24018 * 24019 * Return Code: the code returned by sd_send_scsi_cmd() 24020 * EFAULT if ddi_copyxxx() fails 24021 * ENXIO if fail ddi_get_soft_state 24022 * EINVAL if data pointer is NULL 24023 */ 24024 24025 static int 24026 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24027 { 24028 struct cdrom_ti ti_struct; 24029 struct cdrom_ti *ti = &ti_struct; 24030 struct uscsi_cmd *com = NULL; 24031 char cdb[CDB_GROUP1]; 24032 int rval; 24033 24034 if (data == NULL) { 24035 return (EINVAL); 24036 } 24037 24038 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24039 return (EFAULT); 24040 } 24041 24042 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24043 bzero(cdb, CDB_GROUP1); 24044 cdb[0] = SCMD_PLAYAUDIO_TI; 24045 cdb[4] = ti->cdti_trk0; 24046 cdb[5] = ti->cdti_ind0; 24047 cdb[7] = ti->cdti_trk1; 24048 cdb[8] = ti->cdti_ind1; 24049 com->uscsi_cdb = cdb; 24050 com->uscsi_cdblen = CDB_GROUP1; 24051 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24052 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24053 SD_PATH_STANDARD); 24054 kmem_free(com, sizeof (*com)); 24055 return (rval); 24056 } 24057 24058 24059 /* 24060 * Function: sr_read_all_subcodes() 24061 * 24062 * Description: This routine is the driver entry point for handling CD-ROM 24063 * ioctl requests to return raw subcode data while the target is 24064 * playing audio (CDROMSUBCODE). 24065 * 24066 * Arguments: dev - the device 'dev_t' 24067 * data - pointer to user provided cdrom subcode structure, 24068 * specifying the transfer length and address. 24069 * flag - this argument is a pass through to ddi_copyxxx() 24070 * directly from the mode argument of ioctl(). 24071 * 24072 * Return Code: the code returned by sd_send_scsi_cmd() 24073 * EFAULT if ddi_copyxxx() fails 24074 * ENXIO if fail ddi_get_soft_state 24075 * EINVAL if data pointer is NULL 24076 */ 24077 24078 static int 24079 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24080 { 24081 struct sd_lun *un = NULL; 24082 struct uscsi_cmd *com = NULL; 24083 struct cdrom_subcode *subcode = NULL; 24084 int rval; 24085 size_t buflen; 24086 char cdb[CDB_GROUP5]; 24087 24088 #ifdef _MULTI_DATAMODEL 24089 /* To support ILP32 applications in an LP64 world */ 24090 struct cdrom_subcode32 cdrom_subcode32; 24091 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24092 #endif 24093 if (data == NULL) { 24094 return (EINVAL); 24095 } 24096 24097 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24098 return (ENXIO); 24099 } 24100 24101 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24102 24103 #ifdef _MULTI_DATAMODEL 24104 switch (ddi_model_convert_from(flag & FMODELS)) { 24105 case DDI_MODEL_ILP32: 24106 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24108 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24109 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24110 return (EFAULT); 24111 } 24112 /* Convert the ILP32 uscsi data from the application to LP64 */ 24113 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24114 break; 24115 case DDI_MODEL_NONE: 24116 if (ddi_copyin(data, subcode, 24117 sizeof (struct cdrom_subcode), flag)) { 24118 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24119 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24120 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24121 return (EFAULT); 24122 } 24123 break; 24124 } 24125 #else /* ! _MULTI_DATAMODEL */ 24126 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24127 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24128 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24129 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24130 return (EFAULT); 24131 } 24132 #endif /* _MULTI_DATAMODEL */ 24133 24134 /* 24135 * Since MMC-2 expects max 3 bytes for length, check if the 24136 * length input is greater than 3 bytes 24137 */ 24138 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24139 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24140 "sr_read_all_subcodes: " 24141 "cdrom transfer length too large: %d (limit %d)\n", 24142 subcode->cdsc_length, 0xFFFFFF); 24143 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24144 return (EINVAL); 24145 } 24146 24147 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24148 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24149 bzero(cdb, CDB_GROUP5); 24150 24151 if (un->un_f_mmc_cap == TRUE) { 24152 cdb[0] = (char)SCMD_READ_CD; 24153 cdb[2] = (char)0xff; 24154 cdb[3] = (char)0xff; 24155 cdb[4] = (char)0xff; 24156 cdb[5] = (char)0xff; 24157 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24158 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24159 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24160 cdb[10] = 1; 24161 } else { 24162 /* 24163 * Note: A vendor specific command (0xDF) is being used her to 24164 * request a read of all subcodes. 24165 */ 24166 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24167 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24168 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24169 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24170 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24171 } 24172 com->uscsi_cdb = cdb; 24173 com->uscsi_cdblen = CDB_GROUP5; 24174 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24175 com->uscsi_buflen = buflen; 24176 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24177 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24178 SD_PATH_STANDARD); 24179 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24180 kmem_free(com, sizeof (*com)); 24181 return (rval); 24182 } 24183 24184 24185 /* 24186 * Function: sr_read_subchannel() 24187 * 24188 * Description: This routine is the driver entry point for handling CD-ROM 24189 * ioctl requests to return the Q sub-channel data of the CD 24190 * current position block. (CDROMSUBCHNL) The data includes the 24191 * track number, index number, absolute CD-ROM address (LBA or MSF 24192 * format per the user) , track relative CD-ROM address (LBA or MSF 24193 * format per the user), control data and audio status. 24194 * 24195 * Arguments: dev - the device 'dev_t' 24196 * data - pointer to user provided cdrom sub-channel structure 24197 * flag - this argument is a pass through to ddi_copyxxx() 24198 * directly from the mode argument of ioctl(). 24199 * 24200 * Return Code: the code returned by sd_send_scsi_cmd() 24201 * EFAULT if ddi_copyxxx() fails 24202 * ENXIO if fail ddi_get_soft_state 24203 * EINVAL if data pointer is NULL 24204 */ 24205 24206 static int 24207 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24208 { 24209 struct sd_lun *un; 24210 struct uscsi_cmd *com; 24211 struct cdrom_subchnl subchanel; 24212 struct cdrom_subchnl *subchnl = &subchanel; 24213 char cdb[CDB_GROUP1]; 24214 caddr_t buffer; 24215 int rval; 24216 24217 if (data == NULL) { 24218 return (EINVAL); 24219 } 24220 24221 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24222 (un->un_state == SD_STATE_OFFLINE)) { 24223 return (ENXIO); 24224 } 24225 24226 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24227 return (EFAULT); 24228 } 24229 24230 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24231 bzero(cdb, CDB_GROUP1); 24232 cdb[0] = SCMD_READ_SUBCHANNEL; 24233 /* Set the MSF bit based on the user requested address format */ 24234 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24235 /* 24236 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24237 * returned 24238 */ 24239 cdb[2] = 0x40; 24240 /* 24241 * Set byte 3 to specify the return data format. A value of 0x01 24242 * indicates that the CD-ROM current position should be returned. 24243 */ 24244 cdb[3] = 0x01; 24245 cdb[8] = 0x10; 24246 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24247 com->uscsi_cdb = cdb; 24248 com->uscsi_cdblen = CDB_GROUP1; 24249 com->uscsi_bufaddr = buffer; 24250 com->uscsi_buflen = 16; 24251 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24252 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24253 SD_PATH_STANDARD); 24254 if (rval != 0) { 24255 kmem_free(buffer, 16); 24256 kmem_free(com, sizeof (*com)); 24257 return (rval); 24258 } 24259 24260 /* Process the returned Q sub-channel data */ 24261 subchnl->cdsc_audiostatus = buffer[1]; 24262 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24263 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24264 subchnl->cdsc_trk = buffer[6]; 24265 subchnl->cdsc_ind = buffer[7]; 24266 if (subchnl->cdsc_format & CDROM_LBA) { 24267 subchnl->cdsc_absaddr.lba = 24268 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24269 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24270 subchnl->cdsc_reladdr.lba = 24271 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24272 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24273 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24274 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24275 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24276 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24277 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24278 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24279 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24280 } else { 24281 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24282 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24283 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24284 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24285 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24286 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24287 } 24288 kmem_free(buffer, 16); 24289 kmem_free(com, sizeof (*com)); 24290 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24291 != 0) { 24292 return (EFAULT); 24293 } 24294 return (rval); 24295 } 24296 24297 24298 /* 24299 * Function: sr_read_tocentry() 24300 * 24301 * Description: This routine is the driver entry point for handling CD-ROM 24302 * ioctl requests to read from the Table of Contents (TOC) 24303 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24304 * fields, the starting address (LBA or MSF format per the user) 24305 * and the data mode if the user specified track is a data track. 24306 * 24307 * Note: The READ HEADER (0x44) command used in this routine is 24308 * obsolete per the SCSI MMC spec but still supported in the 24309 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24310 * therefore the command is still implemented in this routine. 24311 * 24312 * Arguments: dev - the device 'dev_t' 24313 * data - pointer to user provided toc entry structure, 24314 * specifying the track # and the address format 24315 * (LBA or MSF). 24316 * flag - this argument is a pass through to ddi_copyxxx() 24317 * directly from the mode argument of ioctl(). 24318 * 24319 * Return Code: the code returned by sd_send_scsi_cmd() 24320 * EFAULT if ddi_copyxxx() fails 24321 * ENXIO if fail ddi_get_soft_state 24322 * EINVAL if data pointer is NULL 24323 */ 24324 24325 static int 24326 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24327 { 24328 struct sd_lun *un = NULL; 24329 struct uscsi_cmd *com; 24330 struct cdrom_tocentry toc_entry; 24331 struct cdrom_tocentry *entry = &toc_entry; 24332 caddr_t buffer; 24333 int rval; 24334 char cdb[CDB_GROUP1]; 24335 24336 if (data == NULL) { 24337 return (EINVAL); 24338 } 24339 24340 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24341 (un->un_state == SD_STATE_OFFLINE)) { 24342 return (ENXIO); 24343 } 24344 24345 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24346 return (EFAULT); 24347 } 24348 24349 /* Validate the requested track and address format */ 24350 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24351 return (EINVAL); 24352 } 24353 24354 if (entry->cdte_track == 0) { 24355 return (EINVAL); 24356 } 24357 24358 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24359 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24360 bzero(cdb, CDB_GROUP1); 24361 24362 cdb[0] = SCMD_READ_TOC; 24363 /* Set the MSF bit based on the user requested address format */ 24364 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24365 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24366 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24367 } else { 24368 cdb[6] = entry->cdte_track; 24369 } 24370 24371 /* 24372 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24373 * (4 byte TOC response header + 8 byte track descriptor) 24374 */ 24375 cdb[8] = 12; 24376 com->uscsi_cdb = cdb; 24377 com->uscsi_cdblen = CDB_GROUP1; 24378 com->uscsi_bufaddr = buffer; 24379 com->uscsi_buflen = 0x0C; 24380 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24381 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24382 SD_PATH_STANDARD); 24383 if (rval != 0) { 24384 kmem_free(buffer, 12); 24385 kmem_free(com, sizeof (*com)); 24386 return (rval); 24387 } 24388 24389 /* Process the toc entry */ 24390 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24391 entry->cdte_ctrl = (buffer[5] & 0x0F); 24392 if (entry->cdte_format & CDROM_LBA) { 24393 entry->cdte_addr.lba = 24394 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24395 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24396 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24397 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24398 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24399 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24400 /* 24401 * Send a READ TOC command using the LBA address format to get 24402 * the LBA for the track requested so it can be used in the 24403 * READ HEADER request 24404 * 24405 * Note: The MSF bit of the READ HEADER command specifies the 24406 * output format. The block address specified in that command 24407 * must be in LBA format. 24408 */ 24409 cdb[1] = 0; 24410 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24411 SD_PATH_STANDARD); 24412 if (rval != 0) { 24413 kmem_free(buffer, 12); 24414 kmem_free(com, sizeof (*com)); 24415 return (rval); 24416 } 24417 } else { 24418 entry->cdte_addr.msf.minute = buffer[9]; 24419 entry->cdte_addr.msf.second = buffer[10]; 24420 entry->cdte_addr.msf.frame = buffer[11]; 24421 /* 24422 * Send a READ TOC command using the LBA address format to get 24423 * the LBA for the track requested so it can be used in the 24424 * READ HEADER request 24425 * 24426 * Note: The MSF bit of the READ HEADER command specifies the 24427 * output format. The block address specified in that command 24428 * must be in LBA format. 24429 */ 24430 cdb[1] = 0; 24431 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24432 SD_PATH_STANDARD); 24433 if (rval != 0) { 24434 kmem_free(buffer, 12); 24435 kmem_free(com, sizeof (*com)); 24436 return (rval); 24437 } 24438 } 24439 24440 /* 24441 * Build and send the READ HEADER command to determine the data mode of 24442 * the user specified track. 24443 */ 24444 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24445 (entry->cdte_track != CDROM_LEADOUT)) { 24446 bzero(cdb, CDB_GROUP1); 24447 cdb[0] = SCMD_READ_HEADER; 24448 cdb[2] = buffer[8]; 24449 cdb[3] = buffer[9]; 24450 cdb[4] = buffer[10]; 24451 cdb[5] = buffer[11]; 24452 cdb[8] = 0x08; 24453 com->uscsi_buflen = 0x08; 24454 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24455 SD_PATH_STANDARD); 24456 if (rval == 0) { 24457 entry->cdte_datamode = buffer[0]; 24458 } else { 24459 /* 24460 * READ HEADER command failed, since this is 24461 * obsoleted in one spec, its better to return 24462 * -1 for an invlid track so that we can still 24463 * receive the rest of the TOC data. 24464 */ 24465 entry->cdte_datamode = (uchar_t)-1; 24466 } 24467 } else { 24468 entry->cdte_datamode = (uchar_t)-1; 24469 } 24470 24471 kmem_free(buffer, 12); 24472 kmem_free(com, sizeof (*com)); 24473 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24474 return (EFAULT); 24475 24476 return (rval); 24477 } 24478 24479 24480 /* 24481 * Function: sr_read_tochdr() 24482 * 24483 * Description: This routine is the driver entry point for handling CD-ROM 24484 * ioctl requests to read the Table of Contents (TOC) header 24485 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24486 * and ending track numbers 24487 * 24488 * Arguments: dev - the device 'dev_t' 24489 * data - pointer to user provided toc header structure, 24490 * specifying the starting and ending track numbers. 24491 * flag - this argument is a pass through to ddi_copyxxx() 24492 * directly from the mode argument of ioctl(). 24493 * 24494 * Return Code: the code returned by sd_send_scsi_cmd() 24495 * EFAULT if ddi_copyxxx() fails 24496 * ENXIO if fail ddi_get_soft_state 24497 * EINVAL if data pointer is NULL 24498 */ 24499 24500 static int 24501 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24502 { 24503 struct sd_lun *un; 24504 struct uscsi_cmd *com; 24505 struct cdrom_tochdr toc_header; 24506 struct cdrom_tochdr *hdr = &toc_header; 24507 char cdb[CDB_GROUP1]; 24508 int rval; 24509 caddr_t buffer; 24510 24511 if (data == NULL) { 24512 return (EINVAL); 24513 } 24514 24515 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24516 (un->un_state == SD_STATE_OFFLINE)) { 24517 return (ENXIO); 24518 } 24519 24520 buffer = kmem_zalloc(4, KM_SLEEP); 24521 bzero(cdb, CDB_GROUP1); 24522 cdb[0] = SCMD_READ_TOC; 24523 /* 24524 * Specifying a track number of 0x00 in the READ TOC command indicates 24525 * that the TOC header should be returned 24526 */ 24527 cdb[6] = 0x00; 24528 /* 24529 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24530 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24531 */ 24532 cdb[8] = 0x04; 24533 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24534 com->uscsi_cdb = cdb; 24535 com->uscsi_cdblen = CDB_GROUP1; 24536 com->uscsi_bufaddr = buffer; 24537 com->uscsi_buflen = 0x04; 24538 com->uscsi_timeout = 300; 24539 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24540 24541 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24542 SD_PATH_STANDARD); 24543 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24544 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24545 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24546 } else { 24547 hdr->cdth_trk0 = buffer[2]; 24548 hdr->cdth_trk1 = buffer[3]; 24549 } 24550 kmem_free(buffer, 4); 24551 kmem_free(com, sizeof (*com)); 24552 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24553 return (EFAULT); 24554 } 24555 return (rval); 24556 } 24557 24558 24559 /* 24560 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24561 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24562 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24563 * digital audio and extended architecture digital audio. These modes are 24564 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24565 * MMC specs. 24566 * 24567 * In addition to support for the various data formats these routines also 24568 * include support for devices that implement only the direct access READ 24569 * commands (0x08, 0x28), devices that implement the READ_CD commands 24570 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24571 * READ CDXA commands (0xD8, 0xDB) 24572 */ 24573 24574 /* 24575 * Function: sr_read_mode1() 24576 * 24577 * Description: This routine is the driver entry point for handling CD-ROM 24578 * ioctl read mode1 requests (CDROMREADMODE1). 24579 * 24580 * Arguments: dev - the device 'dev_t' 24581 * data - pointer to user provided cd read structure specifying 24582 * the lba buffer address and length. 24583 * flag - this argument is a pass through to ddi_copyxxx() 24584 * directly from the mode argument of ioctl(). 24585 * 24586 * Return Code: the code returned by sd_send_scsi_cmd() 24587 * EFAULT if ddi_copyxxx() fails 24588 * ENXIO if fail ddi_get_soft_state 24589 * EINVAL if data pointer is NULL 24590 */ 24591 24592 static int 24593 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24594 { 24595 struct sd_lun *un; 24596 struct cdrom_read mode1_struct; 24597 struct cdrom_read *mode1 = &mode1_struct; 24598 int rval; 24599 #ifdef _MULTI_DATAMODEL 24600 /* To support ILP32 applications in an LP64 world */ 24601 struct cdrom_read32 cdrom_read32; 24602 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24603 #endif /* _MULTI_DATAMODEL */ 24604 24605 if (data == NULL) { 24606 return (EINVAL); 24607 } 24608 24609 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24610 (un->un_state == SD_STATE_OFFLINE)) { 24611 return (ENXIO); 24612 } 24613 24614 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24615 "sd_read_mode1: entry: un:0x%p\n", un); 24616 24617 #ifdef _MULTI_DATAMODEL 24618 switch (ddi_model_convert_from(flag & FMODELS)) { 24619 case DDI_MODEL_ILP32: 24620 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24621 return (EFAULT); 24622 } 24623 /* Convert the ILP32 uscsi data from the application to LP64 */ 24624 cdrom_read32tocdrom_read(cdrd32, mode1); 24625 break; 24626 case DDI_MODEL_NONE: 24627 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24628 return (EFAULT); 24629 } 24630 } 24631 #else /* ! _MULTI_DATAMODEL */ 24632 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24633 return (EFAULT); 24634 } 24635 #endif /* _MULTI_DATAMODEL */ 24636 24637 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24638 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24639 24640 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24641 "sd_read_mode1: exit: un:0x%p\n", un); 24642 24643 return (rval); 24644 } 24645 24646 24647 /* 24648 * Function: sr_read_cd_mode2() 24649 * 24650 * Description: This routine is the driver entry point for handling CD-ROM 24651 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24652 * support the READ CD (0xBE) command or the 1st generation 24653 * READ CD (0xD4) command. 24654 * 24655 * Arguments: dev - the device 'dev_t' 24656 * data - pointer to user provided cd read structure specifying 24657 * the lba buffer address and length. 24658 * flag - this argument is a pass through to ddi_copyxxx() 24659 * directly from the mode argument of ioctl(). 24660 * 24661 * Return Code: the code returned by sd_send_scsi_cmd() 24662 * EFAULT if ddi_copyxxx() fails 24663 * ENXIO if fail ddi_get_soft_state 24664 * EINVAL if data pointer is NULL 24665 */ 24666 24667 static int 24668 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24669 { 24670 struct sd_lun *un; 24671 struct uscsi_cmd *com; 24672 struct cdrom_read mode2_struct; 24673 struct cdrom_read *mode2 = &mode2_struct; 24674 uchar_t cdb[CDB_GROUP5]; 24675 int nblocks; 24676 int rval; 24677 #ifdef _MULTI_DATAMODEL 24678 /* To support ILP32 applications in an LP64 world */ 24679 struct cdrom_read32 cdrom_read32; 24680 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24681 #endif /* _MULTI_DATAMODEL */ 24682 24683 if (data == NULL) { 24684 return (EINVAL); 24685 } 24686 24687 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24688 (un->un_state == SD_STATE_OFFLINE)) { 24689 return (ENXIO); 24690 } 24691 24692 #ifdef _MULTI_DATAMODEL 24693 switch (ddi_model_convert_from(flag & FMODELS)) { 24694 case DDI_MODEL_ILP32: 24695 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24696 return (EFAULT); 24697 } 24698 /* Convert the ILP32 uscsi data from the application to LP64 */ 24699 cdrom_read32tocdrom_read(cdrd32, mode2); 24700 break; 24701 case DDI_MODEL_NONE: 24702 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24703 return (EFAULT); 24704 } 24705 break; 24706 } 24707 24708 #else /* ! _MULTI_DATAMODEL */ 24709 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24710 return (EFAULT); 24711 } 24712 #endif /* _MULTI_DATAMODEL */ 24713 24714 bzero(cdb, sizeof (cdb)); 24715 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24716 /* Read command supported by 1st generation atapi drives */ 24717 cdb[0] = SCMD_READ_CDD4; 24718 } else { 24719 /* Universal CD Access Command */ 24720 cdb[0] = SCMD_READ_CD; 24721 } 24722 24723 /* 24724 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24725 */ 24726 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24727 24728 /* set the start address */ 24729 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24730 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24731 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24732 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24733 24734 /* set the transfer length */ 24735 nblocks = mode2->cdread_buflen / 2336; 24736 cdb[6] = (uchar_t)(nblocks >> 16); 24737 cdb[7] = (uchar_t)(nblocks >> 8); 24738 cdb[8] = (uchar_t)nblocks; 24739 24740 /* set the filter bits */ 24741 cdb[9] = CDROM_READ_CD_USERDATA; 24742 24743 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24744 com->uscsi_cdb = (caddr_t)cdb; 24745 com->uscsi_cdblen = sizeof (cdb); 24746 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24747 com->uscsi_buflen = mode2->cdread_buflen; 24748 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24749 24750 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24751 SD_PATH_STANDARD); 24752 kmem_free(com, sizeof (*com)); 24753 return (rval); 24754 } 24755 24756 24757 /* 24758 * Function: sr_read_mode2() 24759 * 24760 * Description: This routine is the driver entry point for handling CD-ROM 24761 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24762 * do not support the READ CD (0xBE) command. 24763 * 24764 * Arguments: dev - the device 'dev_t' 24765 * data - pointer to user provided cd read structure specifying 24766 * the lba buffer address and length. 24767 * flag - this argument is a pass through to ddi_copyxxx() 24768 * directly from the mode argument of ioctl(). 24769 * 24770 * Return Code: the code returned by sd_send_scsi_cmd() 24771 * EFAULT if ddi_copyxxx() fails 24772 * ENXIO if fail ddi_get_soft_state 24773 * EINVAL if data pointer is NULL 24774 * EIO if fail to reset block size 24775 * EAGAIN if commands are in progress in the driver 24776 */ 24777 24778 static int 24779 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24780 { 24781 struct sd_lun *un; 24782 struct cdrom_read mode2_struct; 24783 struct cdrom_read *mode2 = &mode2_struct; 24784 int rval; 24785 uint32_t restore_blksize; 24786 struct uscsi_cmd *com; 24787 uchar_t cdb[CDB_GROUP0]; 24788 int nblocks; 24789 24790 #ifdef _MULTI_DATAMODEL 24791 /* To support ILP32 applications in an LP64 world */ 24792 struct cdrom_read32 cdrom_read32; 24793 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24794 #endif /* _MULTI_DATAMODEL */ 24795 24796 if (data == NULL) { 24797 return (EINVAL); 24798 } 24799 24800 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24801 (un->un_state == SD_STATE_OFFLINE)) { 24802 return (ENXIO); 24803 } 24804 24805 /* 24806 * Because this routine will update the device and driver block size 24807 * being used we want to make sure there are no commands in progress. 24808 * If commands are in progress the user will have to try again. 24809 * 24810 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24811 * in sdioctl to protect commands from sdioctl through to the top of 24812 * sd_uscsi_strategy. See sdioctl for details. 24813 */ 24814 mutex_enter(SD_MUTEX(un)); 24815 if (un->un_ncmds_in_driver != 1) { 24816 mutex_exit(SD_MUTEX(un)); 24817 return (EAGAIN); 24818 } 24819 mutex_exit(SD_MUTEX(un)); 24820 24821 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24822 "sd_read_mode2: entry: un:0x%p\n", un); 24823 24824 #ifdef _MULTI_DATAMODEL 24825 switch (ddi_model_convert_from(flag & FMODELS)) { 24826 case DDI_MODEL_ILP32: 24827 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24828 return (EFAULT); 24829 } 24830 /* Convert the ILP32 uscsi data from the application to LP64 */ 24831 cdrom_read32tocdrom_read(cdrd32, mode2); 24832 break; 24833 case DDI_MODEL_NONE: 24834 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24835 return (EFAULT); 24836 } 24837 break; 24838 } 24839 #else /* ! _MULTI_DATAMODEL */ 24840 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24841 return (EFAULT); 24842 } 24843 #endif /* _MULTI_DATAMODEL */ 24844 24845 /* Store the current target block size for restoration later */ 24846 restore_blksize = un->un_tgt_blocksize; 24847 24848 /* Change the device and soft state target block size to 2336 */ 24849 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24850 rval = EIO; 24851 goto done; 24852 } 24853 24854 24855 bzero(cdb, sizeof (cdb)); 24856 24857 /* set READ operation */ 24858 cdb[0] = SCMD_READ; 24859 24860 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24861 mode2->cdread_lba >>= 2; 24862 24863 /* set the start address */ 24864 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24865 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24866 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24867 24868 /* set the transfer length */ 24869 nblocks = mode2->cdread_buflen / 2336; 24870 cdb[4] = (uchar_t)nblocks & 0xFF; 24871 24872 /* build command */ 24873 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24874 com->uscsi_cdb = (caddr_t)cdb; 24875 com->uscsi_cdblen = sizeof (cdb); 24876 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24877 com->uscsi_buflen = mode2->cdread_buflen; 24878 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24879 24880 /* 24881 * Issue SCSI command with user space address for read buffer. 24882 * 24883 * This sends the command through main channel in the driver. 24884 * 24885 * Since this is accessed via an IOCTL call, we go through the 24886 * standard path, so that if the device was powered down, then 24887 * it would be 'awakened' to handle the command. 24888 */ 24889 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24890 SD_PATH_STANDARD); 24891 24892 kmem_free(com, sizeof (*com)); 24893 24894 /* Restore the device and soft state target block size */ 24895 if (sr_sector_mode(dev, restore_blksize) != 0) { 24896 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24897 "can't do switch back to mode 1\n"); 24898 /* 24899 * If sd_send_scsi_READ succeeded we still need to report 24900 * an error because we failed to reset the block size 24901 */ 24902 if (rval == 0) { 24903 rval = EIO; 24904 } 24905 } 24906 24907 done: 24908 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24909 "sd_read_mode2: exit: un:0x%p\n", un); 24910 24911 return (rval); 24912 } 24913 24914 24915 /* 24916 * Function: sr_sector_mode() 24917 * 24918 * Description: This utility function is used by sr_read_mode2 to set the target 24919 * block size based on the user specified size. This is a legacy 24920 * implementation based upon a vendor specific mode page 24921 * 24922 * Arguments: dev - the device 'dev_t' 24923 * data - flag indicating if block size is being set to 2336 or 24924 * 512. 24925 * 24926 * Return Code: the code returned by sd_send_scsi_cmd() 24927 * EFAULT if ddi_copyxxx() fails 24928 * ENXIO if fail ddi_get_soft_state 24929 * EINVAL if data pointer is NULL 24930 */ 24931 24932 static int 24933 sr_sector_mode(dev_t dev, uint32_t blksize) 24934 { 24935 struct sd_lun *un; 24936 uchar_t *sense; 24937 uchar_t *select; 24938 int rval; 24939 24940 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24941 (un->un_state == SD_STATE_OFFLINE)) { 24942 return (ENXIO); 24943 } 24944 24945 sense = kmem_zalloc(20, KM_SLEEP); 24946 24947 /* Note: This is a vendor specific mode page (0x81) */ 24948 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 24949 SD_PATH_STANDARD)) != 0) { 24950 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24951 "sr_sector_mode: Mode Sense failed\n"); 24952 kmem_free(sense, 20); 24953 return (rval); 24954 } 24955 select = kmem_zalloc(20, KM_SLEEP); 24956 select[3] = 0x08; 24957 select[10] = ((blksize >> 8) & 0xff); 24958 select[11] = (blksize & 0xff); 24959 select[12] = 0x01; 24960 select[13] = 0x06; 24961 select[14] = sense[14]; 24962 select[15] = sense[15]; 24963 if (blksize == SD_MODE2_BLKSIZE) { 24964 select[14] |= 0x01; 24965 } 24966 24967 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 24968 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24969 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24970 "sr_sector_mode: Mode Select failed\n"); 24971 } else { 24972 /* 24973 * Only update the softstate block size if we successfully 24974 * changed the device block mode. 24975 */ 24976 mutex_enter(SD_MUTEX(un)); 24977 sd_update_block_info(un, blksize, 0); 24978 mutex_exit(SD_MUTEX(un)); 24979 } 24980 kmem_free(sense, 20); 24981 kmem_free(select, 20); 24982 return (rval); 24983 } 24984 24985 24986 /* 24987 * Function: sr_read_cdda() 24988 * 24989 * Description: This routine is the driver entry point for handling CD-ROM 24990 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 24991 * the target supports CDDA these requests are handled via a vendor 24992 * specific command (0xD8) If the target does not support CDDA 24993 * these requests are handled via the READ CD command (0xBE). 24994 * 24995 * Arguments: dev - the device 'dev_t' 24996 * data - pointer to user provided CD-DA structure specifying 24997 * the track starting address, transfer length, and 24998 * subcode options. 24999 * flag - this argument is a pass through to ddi_copyxxx() 25000 * directly from the mode argument of ioctl(). 25001 * 25002 * Return Code: the code returned by sd_send_scsi_cmd() 25003 * EFAULT if ddi_copyxxx() fails 25004 * ENXIO if fail ddi_get_soft_state 25005 * EINVAL if invalid arguments are provided 25006 * ENOTTY 25007 */ 25008 25009 static int 25010 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25011 { 25012 struct sd_lun *un; 25013 struct uscsi_cmd *com; 25014 struct cdrom_cdda *cdda; 25015 int rval; 25016 size_t buflen; 25017 char cdb[CDB_GROUP5]; 25018 25019 #ifdef _MULTI_DATAMODEL 25020 /* To support ILP32 applications in an LP64 world */ 25021 struct cdrom_cdda32 cdrom_cdda32; 25022 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25023 #endif /* _MULTI_DATAMODEL */ 25024 25025 if (data == NULL) { 25026 return (EINVAL); 25027 } 25028 25029 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25030 return (ENXIO); 25031 } 25032 25033 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25034 25035 #ifdef _MULTI_DATAMODEL 25036 switch (ddi_model_convert_from(flag & FMODELS)) { 25037 case DDI_MODEL_ILP32: 25038 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25039 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25040 "sr_read_cdda: ddi_copyin Failed\n"); 25041 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25042 return (EFAULT); 25043 } 25044 /* Convert the ILP32 uscsi data from the application to LP64 */ 25045 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25046 break; 25047 case DDI_MODEL_NONE: 25048 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25049 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25050 "sr_read_cdda: ddi_copyin Failed\n"); 25051 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25052 return (EFAULT); 25053 } 25054 break; 25055 } 25056 #else /* ! _MULTI_DATAMODEL */ 25057 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25058 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25059 "sr_read_cdda: ddi_copyin Failed\n"); 25060 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25061 return (EFAULT); 25062 } 25063 #endif /* _MULTI_DATAMODEL */ 25064 25065 /* 25066 * Since MMC-2 expects max 3 bytes for length, check if the 25067 * length input is greater than 3 bytes 25068 */ 25069 if ((cdda->cdda_length & 0xFF000000) != 0) { 25070 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25071 "cdrom transfer length too large: %d (limit %d)\n", 25072 cdda->cdda_length, 0xFFFFFF); 25073 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25074 return (EINVAL); 25075 } 25076 25077 switch (cdda->cdda_subcode) { 25078 case CDROM_DA_NO_SUBCODE: 25079 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25080 break; 25081 case CDROM_DA_SUBQ: 25082 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25083 break; 25084 case CDROM_DA_ALL_SUBCODE: 25085 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25086 break; 25087 case CDROM_DA_SUBCODE_ONLY: 25088 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25089 break; 25090 default: 25091 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25092 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25093 cdda->cdda_subcode); 25094 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25095 return (EINVAL); 25096 } 25097 25098 /* Build and send the command */ 25099 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25100 bzero(cdb, CDB_GROUP5); 25101 25102 if (un->un_f_cfg_cdda == TRUE) { 25103 cdb[0] = (char)SCMD_READ_CD; 25104 cdb[1] = 0x04; 25105 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25106 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25107 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25108 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25109 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25110 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25111 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25112 cdb[9] = 0x10; 25113 switch (cdda->cdda_subcode) { 25114 case CDROM_DA_NO_SUBCODE : 25115 cdb[10] = 0x0; 25116 break; 25117 case CDROM_DA_SUBQ : 25118 cdb[10] = 0x2; 25119 break; 25120 case CDROM_DA_ALL_SUBCODE : 25121 cdb[10] = 0x1; 25122 break; 25123 case CDROM_DA_SUBCODE_ONLY : 25124 /* FALLTHROUGH */ 25125 default : 25126 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25127 kmem_free(com, sizeof (*com)); 25128 return (ENOTTY); 25129 } 25130 } else { 25131 cdb[0] = (char)SCMD_READ_CDDA; 25132 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25133 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25134 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25135 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25136 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25137 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25138 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25139 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25140 cdb[10] = cdda->cdda_subcode; 25141 } 25142 25143 com->uscsi_cdb = cdb; 25144 com->uscsi_cdblen = CDB_GROUP5; 25145 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25146 com->uscsi_buflen = buflen; 25147 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25148 25149 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25150 SD_PATH_STANDARD); 25151 25152 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25153 kmem_free(com, sizeof (*com)); 25154 return (rval); 25155 } 25156 25157 25158 /* 25159 * Function: sr_read_cdxa() 25160 * 25161 * Description: This routine is the driver entry point for handling CD-ROM 25162 * ioctl requests to return CD-XA (Extended Architecture) data. 25163 * (CDROMCDXA). 25164 * 25165 * Arguments: dev - the device 'dev_t' 25166 * data - pointer to user provided CD-XA structure specifying 25167 * the data starting address, transfer length, and format 25168 * flag - this argument is a pass through to ddi_copyxxx() 25169 * directly from the mode argument of ioctl(). 25170 * 25171 * Return Code: the code returned by sd_send_scsi_cmd() 25172 * EFAULT if ddi_copyxxx() fails 25173 * ENXIO if fail ddi_get_soft_state 25174 * EINVAL if data pointer is NULL 25175 */ 25176 25177 static int 25178 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25179 { 25180 struct sd_lun *un; 25181 struct uscsi_cmd *com; 25182 struct cdrom_cdxa *cdxa; 25183 int rval; 25184 size_t buflen; 25185 char cdb[CDB_GROUP5]; 25186 uchar_t read_flags; 25187 25188 #ifdef _MULTI_DATAMODEL 25189 /* To support ILP32 applications in an LP64 world */ 25190 struct cdrom_cdxa32 cdrom_cdxa32; 25191 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25192 #endif /* _MULTI_DATAMODEL */ 25193 25194 if (data == NULL) { 25195 return (EINVAL); 25196 } 25197 25198 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25199 return (ENXIO); 25200 } 25201 25202 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25203 25204 #ifdef _MULTI_DATAMODEL 25205 switch (ddi_model_convert_from(flag & FMODELS)) { 25206 case DDI_MODEL_ILP32: 25207 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25208 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25209 return (EFAULT); 25210 } 25211 /* 25212 * Convert the ILP32 uscsi data from the 25213 * application to LP64 for internal use. 25214 */ 25215 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25216 break; 25217 case DDI_MODEL_NONE: 25218 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25219 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25220 return (EFAULT); 25221 } 25222 break; 25223 } 25224 #else /* ! _MULTI_DATAMODEL */ 25225 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25226 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25227 return (EFAULT); 25228 } 25229 #endif /* _MULTI_DATAMODEL */ 25230 25231 /* 25232 * Since MMC-2 expects max 3 bytes for length, check if the 25233 * length input is greater than 3 bytes 25234 */ 25235 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25236 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25237 "cdrom transfer length too large: %d (limit %d)\n", 25238 cdxa->cdxa_length, 0xFFFFFF); 25239 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25240 return (EINVAL); 25241 } 25242 25243 switch (cdxa->cdxa_format) { 25244 case CDROM_XA_DATA: 25245 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25246 read_flags = 0x10; 25247 break; 25248 case CDROM_XA_SECTOR_DATA: 25249 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25250 read_flags = 0xf8; 25251 break; 25252 case CDROM_XA_DATA_W_ERROR: 25253 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25254 read_flags = 0xfc; 25255 break; 25256 default: 25257 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25258 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25259 cdxa->cdxa_format); 25260 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25261 return (EINVAL); 25262 } 25263 25264 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25265 bzero(cdb, CDB_GROUP5); 25266 if (un->un_f_mmc_cap == TRUE) { 25267 cdb[0] = (char)SCMD_READ_CD; 25268 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25269 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25270 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25271 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25272 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25273 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25274 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25275 cdb[9] = (char)read_flags; 25276 } else { 25277 /* 25278 * Note: A vendor specific command (0xDB) is being used her to 25279 * request a read of all subcodes. 25280 */ 25281 cdb[0] = (char)SCMD_READ_CDXA; 25282 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25283 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25284 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25285 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25286 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25287 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25288 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25289 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25290 cdb[10] = cdxa->cdxa_format; 25291 } 25292 com->uscsi_cdb = cdb; 25293 com->uscsi_cdblen = CDB_GROUP5; 25294 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25295 com->uscsi_buflen = buflen; 25296 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25297 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25298 SD_PATH_STANDARD); 25299 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25300 kmem_free(com, sizeof (*com)); 25301 return (rval); 25302 } 25303 25304 25305 /* 25306 * Function: sr_eject() 25307 * 25308 * Description: This routine is the driver entry point for handling CD-ROM 25309 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25310 * 25311 * Arguments: dev - the device 'dev_t' 25312 * 25313 * Return Code: the code returned by sd_send_scsi_cmd() 25314 */ 25315 25316 static int 25317 sr_eject(dev_t dev) 25318 { 25319 struct sd_lun *un; 25320 int rval; 25321 25322 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25323 (un->un_state == SD_STATE_OFFLINE)) { 25324 return (ENXIO); 25325 } 25326 25327 /* 25328 * To prevent race conditions with the eject 25329 * command, keep track of an eject command as 25330 * it progresses. If we are already handling 25331 * an eject command in the driver for the given 25332 * unit and another request to eject is received 25333 * immediately return EAGAIN so we don't lose 25334 * the command if the current eject command fails. 25335 */ 25336 mutex_enter(SD_MUTEX(un)); 25337 if (un->un_f_ejecting == TRUE) { 25338 mutex_exit(SD_MUTEX(un)); 25339 return (EAGAIN); 25340 } 25341 un->un_f_ejecting = TRUE; 25342 mutex_exit(SD_MUTEX(un)); 25343 25344 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25345 SD_PATH_STANDARD)) != 0) { 25346 mutex_enter(SD_MUTEX(un)); 25347 un->un_f_ejecting = FALSE; 25348 mutex_exit(SD_MUTEX(un)); 25349 return (rval); 25350 } 25351 25352 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25353 SD_PATH_STANDARD); 25354 25355 if (rval == 0) { 25356 mutex_enter(SD_MUTEX(un)); 25357 sr_ejected(un); 25358 un->un_mediastate = DKIO_EJECTED; 25359 un->un_f_ejecting = FALSE; 25360 cv_broadcast(&un->un_state_cv); 25361 mutex_exit(SD_MUTEX(un)); 25362 } else { 25363 mutex_enter(SD_MUTEX(un)); 25364 un->un_f_ejecting = FALSE; 25365 mutex_exit(SD_MUTEX(un)); 25366 } 25367 return (rval); 25368 } 25369 25370 25371 /* 25372 * Function: sr_ejected() 25373 * 25374 * Description: This routine updates the soft state structure to invalidate the 25375 * geometry information after the media has been ejected or a 25376 * media eject has been detected. 25377 * 25378 * Arguments: un - driver soft state (unit) structure 25379 */ 25380 25381 static void 25382 sr_ejected(struct sd_lun *un) 25383 { 25384 struct sd_errstats *stp; 25385 25386 ASSERT(un != NULL); 25387 ASSERT(mutex_owned(SD_MUTEX(un))); 25388 25389 un->un_f_blockcount_is_valid = FALSE; 25390 un->un_f_tgt_blocksize_is_valid = FALSE; 25391 mutex_exit(SD_MUTEX(un)); 25392 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25393 mutex_enter(SD_MUTEX(un)); 25394 25395 if (un->un_errstats != NULL) { 25396 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25397 stp->sd_capacity.value.ui64 = 0; 25398 } 25399 } 25400 25401 25402 /* 25403 * Function: sr_check_wp() 25404 * 25405 * Description: This routine checks the write protection of a removable 25406 * media disk and hotpluggable devices via the write protect bit of 25407 * the Mode Page Header device specific field. Some devices choke 25408 * on unsupported mode page. In order to workaround this issue, 25409 * this routine has been implemented to use 0x3f mode page(request 25410 * for all pages) for all device types. 25411 * 25412 * Arguments: dev - the device 'dev_t' 25413 * 25414 * Return Code: int indicating if the device is write protected (1) or not (0) 25415 * 25416 * Context: Kernel thread. 25417 * 25418 */ 25419 25420 static int 25421 sr_check_wp(dev_t dev) 25422 { 25423 struct sd_lun *un; 25424 uchar_t device_specific; 25425 uchar_t *sense; 25426 int hdrlen; 25427 int rval = FALSE; 25428 25429 /* 25430 * Note: The return codes for this routine should be reworked to 25431 * properly handle the case of a NULL softstate. 25432 */ 25433 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25434 return (FALSE); 25435 } 25436 25437 if (un->un_f_cfg_is_atapi == TRUE) { 25438 /* 25439 * The mode page contents are not required; set the allocation 25440 * length for the mode page header only 25441 */ 25442 hdrlen = MODE_HEADER_LENGTH_GRP2; 25443 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25444 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25445 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25446 goto err_exit; 25447 device_specific = 25448 ((struct mode_header_grp2 *)sense)->device_specific; 25449 } else { 25450 hdrlen = MODE_HEADER_LENGTH; 25451 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25452 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25453 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25454 goto err_exit; 25455 device_specific = 25456 ((struct mode_header *)sense)->device_specific; 25457 } 25458 25459 /* 25460 * Write protect mode sense failed; not all disks 25461 * understand this query. Return FALSE assuming that 25462 * these devices are not writable. 25463 */ 25464 if (device_specific & WRITE_PROTECT) { 25465 rval = TRUE; 25466 } 25467 25468 err_exit: 25469 kmem_free(sense, hdrlen); 25470 return (rval); 25471 } 25472 25473 /* 25474 * Function: sr_volume_ctrl() 25475 * 25476 * Description: This routine is the driver entry point for handling CD-ROM 25477 * audio output volume ioctl requests. (CDROMVOLCTRL) 25478 * 25479 * Arguments: dev - the device 'dev_t' 25480 * data - pointer to user audio volume control structure 25481 * flag - this argument is a pass through to ddi_copyxxx() 25482 * directly from the mode argument of ioctl(). 25483 * 25484 * Return Code: the code returned by sd_send_scsi_cmd() 25485 * EFAULT if ddi_copyxxx() fails 25486 * ENXIO if fail ddi_get_soft_state 25487 * EINVAL if data pointer is NULL 25488 * 25489 */ 25490 25491 static int 25492 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25493 { 25494 struct sd_lun *un; 25495 struct cdrom_volctrl volume; 25496 struct cdrom_volctrl *vol = &volume; 25497 uchar_t *sense_page; 25498 uchar_t *select_page; 25499 uchar_t *sense; 25500 uchar_t *select; 25501 int sense_buflen; 25502 int select_buflen; 25503 int rval; 25504 25505 if (data == NULL) { 25506 return (EINVAL); 25507 } 25508 25509 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25510 (un->un_state == SD_STATE_OFFLINE)) { 25511 return (ENXIO); 25512 } 25513 25514 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25515 return (EFAULT); 25516 } 25517 25518 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25519 struct mode_header_grp2 *sense_mhp; 25520 struct mode_header_grp2 *select_mhp; 25521 int bd_len; 25522 25523 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25524 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25525 MODEPAGE_AUDIO_CTRL_LEN; 25526 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25527 select = kmem_zalloc(select_buflen, KM_SLEEP); 25528 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25529 sense_buflen, MODEPAGE_AUDIO_CTRL, 25530 SD_PATH_STANDARD)) != 0) { 25531 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25532 "sr_volume_ctrl: Mode Sense Failed\n"); 25533 kmem_free(sense, sense_buflen); 25534 kmem_free(select, select_buflen); 25535 return (rval); 25536 } 25537 sense_mhp = (struct mode_header_grp2 *)sense; 25538 select_mhp = (struct mode_header_grp2 *)select; 25539 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25540 sense_mhp->bdesc_length_lo; 25541 if (bd_len > MODE_BLK_DESC_LENGTH) { 25542 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25543 "sr_volume_ctrl: Mode Sense returned invalid " 25544 "block descriptor length\n"); 25545 kmem_free(sense, sense_buflen); 25546 kmem_free(select, select_buflen); 25547 return (EIO); 25548 } 25549 sense_page = (uchar_t *) 25550 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25551 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25552 select_mhp->length_msb = 0; 25553 select_mhp->length_lsb = 0; 25554 select_mhp->bdesc_length_hi = 0; 25555 select_mhp->bdesc_length_lo = 0; 25556 } else { 25557 struct mode_header *sense_mhp, *select_mhp; 25558 25559 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25560 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25561 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25562 select = kmem_zalloc(select_buflen, KM_SLEEP); 25563 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25564 sense_buflen, MODEPAGE_AUDIO_CTRL, 25565 SD_PATH_STANDARD)) != 0) { 25566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25567 "sr_volume_ctrl: Mode Sense Failed\n"); 25568 kmem_free(sense, sense_buflen); 25569 kmem_free(select, select_buflen); 25570 return (rval); 25571 } 25572 sense_mhp = (struct mode_header *)sense; 25573 select_mhp = (struct mode_header *)select; 25574 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25575 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25576 "sr_volume_ctrl: Mode Sense returned invalid " 25577 "block descriptor length\n"); 25578 kmem_free(sense, sense_buflen); 25579 kmem_free(select, select_buflen); 25580 return (EIO); 25581 } 25582 sense_page = (uchar_t *) 25583 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25584 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25585 select_mhp->length = 0; 25586 select_mhp->bdesc_length = 0; 25587 } 25588 /* 25589 * Note: An audio control data structure could be created and overlayed 25590 * on the following in place of the array indexing method implemented. 25591 */ 25592 25593 /* Build the select data for the user volume data */ 25594 select_page[0] = MODEPAGE_AUDIO_CTRL; 25595 select_page[1] = 0xE; 25596 /* Set the immediate bit */ 25597 select_page[2] = 0x04; 25598 /* Zero out reserved fields */ 25599 select_page[3] = 0x00; 25600 select_page[4] = 0x00; 25601 /* Return sense data for fields not to be modified */ 25602 select_page[5] = sense_page[5]; 25603 select_page[6] = sense_page[6]; 25604 select_page[7] = sense_page[7]; 25605 /* Set the user specified volume levels for channel 0 and 1 */ 25606 select_page[8] = 0x01; 25607 select_page[9] = vol->channel0; 25608 select_page[10] = 0x02; 25609 select_page[11] = vol->channel1; 25610 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25611 select_page[12] = sense_page[12]; 25612 select_page[13] = sense_page[13]; 25613 select_page[14] = sense_page[14]; 25614 select_page[15] = sense_page[15]; 25615 25616 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25617 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25618 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25619 } else { 25620 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25621 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25622 } 25623 25624 kmem_free(sense, sense_buflen); 25625 kmem_free(select, select_buflen); 25626 return (rval); 25627 } 25628 25629 25630 /* 25631 * Function: sr_read_sony_session_offset() 25632 * 25633 * Description: This routine is the driver entry point for handling CD-ROM 25634 * ioctl requests for session offset information. (CDROMREADOFFSET) 25635 * The address of the first track in the last session of a 25636 * multi-session CD-ROM is returned 25637 * 25638 * Note: This routine uses a vendor specific key value in the 25639 * command control field without implementing any vendor check here 25640 * or in the ioctl routine. 25641 * 25642 * Arguments: dev - the device 'dev_t' 25643 * data - pointer to an int to hold the requested address 25644 * flag - this argument is a pass through to ddi_copyxxx() 25645 * directly from the mode argument of ioctl(). 25646 * 25647 * Return Code: the code returned by sd_send_scsi_cmd() 25648 * EFAULT if ddi_copyxxx() fails 25649 * ENXIO if fail ddi_get_soft_state 25650 * EINVAL if data pointer is NULL 25651 */ 25652 25653 static int 25654 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25655 { 25656 struct sd_lun *un; 25657 struct uscsi_cmd *com; 25658 caddr_t buffer; 25659 char cdb[CDB_GROUP1]; 25660 int session_offset = 0; 25661 int rval; 25662 25663 if (data == NULL) { 25664 return (EINVAL); 25665 } 25666 25667 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25668 (un->un_state == SD_STATE_OFFLINE)) { 25669 return (ENXIO); 25670 } 25671 25672 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25673 bzero(cdb, CDB_GROUP1); 25674 cdb[0] = SCMD_READ_TOC; 25675 /* 25676 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25677 * (4 byte TOC response header + 8 byte response data) 25678 */ 25679 cdb[8] = SONY_SESSION_OFFSET_LEN; 25680 /* Byte 9 is the control byte. A vendor specific value is used */ 25681 cdb[9] = SONY_SESSION_OFFSET_KEY; 25682 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25683 com->uscsi_cdb = cdb; 25684 com->uscsi_cdblen = CDB_GROUP1; 25685 com->uscsi_bufaddr = buffer; 25686 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25687 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25688 25689 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25690 SD_PATH_STANDARD); 25691 if (rval != 0) { 25692 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25693 kmem_free(com, sizeof (*com)); 25694 return (rval); 25695 } 25696 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25697 session_offset = 25698 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25699 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25700 /* 25701 * Offset returned offset in current lbasize block's. Convert to 25702 * 2k block's to return to the user 25703 */ 25704 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25705 session_offset >>= 2; 25706 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25707 session_offset >>= 1; 25708 } 25709 } 25710 25711 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25712 rval = EFAULT; 25713 } 25714 25715 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25716 kmem_free(com, sizeof (*com)); 25717 return (rval); 25718 } 25719 25720 25721 /* 25722 * Function: sd_wm_cache_constructor() 25723 * 25724 * Description: Cache Constructor for the wmap cache for the read/modify/write 25725 * devices. 25726 * 25727 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25728 * un - sd_lun structure for the device. 25729 * flag - the km flags passed to constructor 25730 * 25731 * Return Code: 0 on success. 25732 * -1 on failure. 25733 */ 25734 25735 /*ARGSUSED*/ 25736 static int 25737 sd_wm_cache_constructor(void *wm, void *un, int flags) 25738 { 25739 bzero(wm, sizeof (struct sd_w_map)); 25740 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25741 return (0); 25742 } 25743 25744 25745 /* 25746 * Function: sd_wm_cache_destructor() 25747 * 25748 * Description: Cache destructor for the wmap cache for the read/modify/write 25749 * devices. 25750 * 25751 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25752 * un - sd_lun structure for the device. 25753 */ 25754 /*ARGSUSED*/ 25755 static void 25756 sd_wm_cache_destructor(void *wm, void *un) 25757 { 25758 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25759 } 25760 25761 25762 /* 25763 * Function: sd_range_lock() 25764 * 25765 * Description: Lock the range of blocks specified as parameter to ensure 25766 * that read, modify write is atomic and no other i/o writes 25767 * to the same location. The range is specified in terms 25768 * of start and end blocks. Block numbers are the actual 25769 * media block numbers and not system. 25770 * 25771 * Arguments: un - sd_lun structure for the device. 25772 * startb - The starting block number 25773 * endb - The end block number 25774 * typ - type of i/o - simple/read_modify_write 25775 * 25776 * Return Code: wm - pointer to the wmap structure. 25777 * 25778 * Context: This routine can sleep. 25779 */ 25780 25781 static struct sd_w_map * 25782 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25783 { 25784 struct sd_w_map *wmp = NULL; 25785 struct sd_w_map *sl_wmp = NULL; 25786 struct sd_w_map *tmp_wmp; 25787 wm_state state = SD_WM_CHK_LIST; 25788 25789 25790 ASSERT(un != NULL); 25791 ASSERT(!mutex_owned(SD_MUTEX(un))); 25792 25793 mutex_enter(SD_MUTEX(un)); 25794 25795 while (state != SD_WM_DONE) { 25796 25797 switch (state) { 25798 case SD_WM_CHK_LIST: 25799 /* 25800 * This is the starting state. Check the wmap list 25801 * to see if the range is currently available. 25802 */ 25803 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25804 /* 25805 * If this is a simple write and no rmw 25806 * i/o is pending then try to lock the 25807 * range as the range should be available. 25808 */ 25809 state = SD_WM_LOCK_RANGE; 25810 } else { 25811 tmp_wmp = sd_get_range(un, startb, endb); 25812 if (tmp_wmp != NULL) { 25813 if ((wmp != NULL) && ONLIST(un, wmp)) { 25814 /* 25815 * Should not keep onlist wmps 25816 * while waiting this macro 25817 * will also do wmp = NULL; 25818 */ 25819 FREE_ONLIST_WMAP(un, wmp); 25820 } 25821 /* 25822 * sl_wmp is the wmap on which wait 25823 * is done, since the tmp_wmp points 25824 * to the inuse wmap, set sl_wmp to 25825 * tmp_wmp and change the state to sleep 25826 */ 25827 sl_wmp = tmp_wmp; 25828 state = SD_WM_WAIT_MAP; 25829 } else { 25830 state = SD_WM_LOCK_RANGE; 25831 } 25832 25833 } 25834 break; 25835 25836 case SD_WM_LOCK_RANGE: 25837 ASSERT(un->un_wm_cache); 25838 /* 25839 * The range need to be locked, try to get a wmap. 25840 * First attempt it with NO_SLEEP, want to avoid a sleep 25841 * if possible as we will have to release the sd mutex 25842 * if we have to sleep. 25843 */ 25844 if (wmp == NULL) 25845 wmp = kmem_cache_alloc(un->un_wm_cache, 25846 KM_NOSLEEP); 25847 if (wmp == NULL) { 25848 mutex_exit(SD_MUTEX(un)); 25849 _NOTE(DATA_READABLE_WITHOUT_LOCK 25850 (sd_lun::un_wm_cache)) 25851 wmp = kmem_cache_alloc(un->un_wm_cache, 25852 KM_SLEEP); 25853 mutex_enter(SD_MUTEX(un)); 25854 /* 25855 * we released the mutex so recheck and go to 25856 * check list state. 25857 */ 25858 state = SD_WM_CHK_LIST; 25859 } else { 25860 /* 25861 * We exit out of state machine since we 25862 * have the wmap. Do the housekeeping first. 25863 * place the wmap on the wmap list if it is not 25864 * on it already and then set the state to done. 25865 */ 25866 wmp->wm_start = startb; 25867 wmp->wm_end = endb; 25868 wmp->wm_flags = typ | SD_WM_BUSY; 25869 if (typ & SD_WTYPE_RMW) { 25870 un->un_rmw_count++; 25871 } 25872 /* 25873 * If not already on the list then link 25874 */ 25875 if (!ONLIST(un, wmp)) { 25876 wmp->wm_next = un->un_wm; 25877 wmp->wm_prev = NULL; 25878 if (wmp->wm_next) 25879 wmp->wm_next->wm_prev = wmp; 25880 un->un_wm = wmp; 25881 } 25882 state = SD_WM_DONE; 25883 } 25884 break; 25885 25886 case SD_WM_WAIT_MAP: 25887 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25888 /* 25889 * Wait is done on sl_wmp, which is set in the 25890 * check_list state. 25891 */ 25892 sl_wmp->wm_wanted_count++; 25893 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25894 sl_wmp->wm_wanted_count--; 25895 /* 25896 * We can reuse the memory from the completed sl_wmp 25897 * lock range for our new lock, but only if noone is 25898 * waiting for it. 25899 */ 25900 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25901 if (sl_wmp->wm_wanted_count == 0) { 25902 if (wmp != NULL) 25903 CHK_N_FREEWMP(un, wmp); 25904 wmp = sl_wmp; 25905 } 25906 sl_wmp = NULL; 25907 /* 25908 * After waking up, need to recheck for availability of 25909 * range. 25910 */ 25911 state = SD_WM_CHK_LIST; 25912 break; 25913 25914 default: 25915 panic("sd_range_lock: " 25916 "Unknown state %d in sd_range_lock", state); 25917 /*NOTREACHED*/ 25918 } /* switch(state) */ 25919 25920 } /* while(state != SD_WM_DONE) */ 25921 25922 mutex_exit(SD_MUTEX(un)); 25923 25924 ASSERT(wmp != NULL); 25925 25926 return (wmp); 25927 } 25928 25929 25930 /* 25931 * Function: sd_get_range() 25932 * 25933 * Description: Find if there any overlapping I/O to this one 25934 * Returns the write-map of 1st such I/O, NULL otherwise. 25935 * 25936 * Arguments: un - sd_lun structure for the device. 25937 * startb - The starting block number 25938 * endb - The end block number 25939 * 25940 * Return Code: wm - pointer to the wmap structure. 25941 */ 25942 25943 static struct sd_w_map * 25944 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 25945 { 25946 struct sd_w_map *wmp; 25947 25948 ASSERT(un != NULL); 25949 25950 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 25951 if (!(wmp->wm_flags & SD_WM_BUSY)) { 25952 continue; 25953 } 25954 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 25955 break; 25956 } 25957 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 25958 break; 25959 } 25960 } 25961 25962 return (wmp); 25963 } 25964 25965 25966 /* 25967 * Function: sd_free_inlist_wmap() 25968 * 25969 * Description: Unlink and free a write map struct. 25970 * 25971 * Arguments: un - sd_lun structure for the device. 25972 * wmp - sd_w_map which needs to be unlinked. 25973 */ 25974 25975 static void 25976 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 25977 { 25978 ASSERT(un != NULL); 25979 25980 if (un->un_wm == wmp) { 25981 un->un_wm = wmp->wm_next; 25982 } else { 25983 wmp->wm_prev->wm_next = wmp->wm_next; 25984 } 25985 25986 if (wmp->wm_next) { 25987 wmp->wm_next->wm_prev = wmp->wm_prev; 25988 } 25989 25990 wmp->wm_next = wmp->wm_prev = NULL; 25991 25992 kmem_cache_free(un->un_wm_cache, wmp); 25993 } 25994 25995 25996 /* 25997 * Function: sd_range_unlock() 25998 * 25999 * Description: Unlock the range locked by wm. 26000 * Free write map if nobody else is waiting on it. 26001 * 26002 * Arguments: un - sd_lun structure for the device. 26003 * wmp - sd_w_map which needs to be unlinked. 26004 */ 26005 26006 static void 26007 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26008 { 26009 ASSERT(un != NULL); 26010 ASSERT(wm != NULL); 26011 ASSERT(!mutex_owned(SD_MUTEX(un))); 26012 26013 mutex_enter(SD_MUTEX(un)); 26014 26015 if (wm->wm_flags & SD_WTYPE_RMW) { 26016 un->un_rmw_count--; 26017 } 26018 26019 if (wm->wm_wanted_count) { 26020 wm->wm_flags = 0; 26021 /* 26022 * Broadcast that the wmap is available now. 26023 */ 26024 cv_broadcast(&wm->wm_avail); 26025 } else { 26026 /* 26027 * If no one is waiting on the map, it should be free'ed. 26028 */ 26029 sd_free_inlist_wmap(un, wm); 26030 } 26031 26032 mutex_exit(SD_MUTEX(un)); 26033 } 26034 26035 26036 /* 26037 * Function: sd_read_modify_write_task 26038 * 26039 * Description: Called from a taskq thread to initiate the write phase of 26040 * a read-modify-write request. This is used for targets where 26041 * un->un_sys_blocksize != un->un_tgt_blocksize. 26042 * 26043 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26044 * 26045 * Context: Called under taskq thread context. 26046 */ 26047 26048 static void 26049 sd_read_modify_write_task(void *arg) 26050 { 26051 struct sd_mapblocksize_info *bsp; 26052 struct buf *bp; 26053 struct sd_xbuf *xp; 26054 struct sd_lun *un; 26055 26056 bp = arg; /* The bp is given in arg */ 26057 ASSERT(bp != NULL); 26058 26059 /* Get the pointer to the layer-private data struct */ 26060 xp = SD_GET_XBUF(bp); 26061 ASSERT(xp != NULL); 26062 bsp = xp->xb_private; 26063 ASSERT(bsp != NULL); 26064 26065 un = SD_GET_UN(bp); 26066 ASSERT(un != NULL); 26067 ASSERT(!mutex_owned(SD_MUTEX(un))); 26068 26069 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26070 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26071 26072 /* 26073 * This is the write phase of a read-modify-write request, called 26074 * under the context of a taskq thread in response to the completion 26075 * of the read portion of the rmw request completing under interrupt 26076 * context. The write request must be sent from here down the iostart 26077 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26078 * we use the layer index saved in the layer-private data area. 26079 */ 26080 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26081 26082 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26083 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26084 } 26085 26086 26087 /* 26088 * Function: sddump_do_read_of_rmw() 26089 * 26090 * Description: This routine will be called from sddump, If sddump is called 26091 * with an I/O which not aligned on device blocksize boundary 26092 * then the write has to be converted to read-modify-write. 26093 * Do the read part here in order to keep sddump simple. 26094 * Note - That the sd_mutex is held across the call to this 26095 * routine. 26096 * 26097 * Arguments: un - sd_lun 26098 * blkno - block number in terms of media block size. 26099 * nblk - number of blocks. 26100 * bpp - pointer to pointer to the buf structure. On return 26101 * from this function, *bpp points to the valid buffer 26102 * to which the write has to be done. 26103 * 26104 * Return Code: 0 for success or errno-type return code 26105 */ 26106 26107 static int 26108 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26109 struct buf **bpp) 26110 { 26111 int err; 26112 int i; 26113 int rval; 26114 struct buf *bp; 26115 struct scsi_pkt *pkt = NULL; 26116 uint32_t target_blocksize; 26117 26118 ASSERT(un != NULL); 26119 ASSERT(mutex_owned(SD_MUTEX(un))); 26120 26121 target_blocksize = un->un_tgt_blocksize; 26122 26123 mutex_exit(SD_MUTEX(un)); 26124 26125 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26126 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26127 if (bp == NULL) { 26128 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26129 "no resources for dumping; giving up"); 26130 err = ENOMEM; 26131 goto done; 26132 } 26133 26134 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26135 blkno, nblk); 26136 if (rval != 0) { 26137 scsi_free_consistent_buf(bp); 26138 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26139 "no resources for dumping; giving up"); 26140 err = ENOMEM; 26141 goto done; 26142 } 26143 26144 pkt->pkt_flags |= FLAG_NOINTR; 26145 26146 err = EIO; 26147 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26148 26149 /* 26150 * Scsi_poll returns 0 (success) if the command completes and 26151 * the status block is STATUS_GOOD. We should only check 26152 * errors if this condition is not true. Even then we should 26153 * send our own request sense packet only if we have a check 26154 * condition and auto request sense has not been performed by 26155 * the hba. 26156 */ 26157 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26158 26159 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26160 err = 0; 26161 break; 26162 } 26163 26164 /* 26165 * Check CMD_DEV_GONE 1st, give up if device is gone, 26166 * no need to read RQS data. 26167 */ 26168 if (pkt->pkt_reason == CMD_DEV_GONE) { 26169 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26170 "Device is gone\n"); 26171 break; 26172 } 26173 26174 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26175 SD_INFO(SD_LOG_DUMP, un, 26176 "sddump: read failed with CHECK, try # %d\n", i); 26177 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26178 (void) sd_send_polled_RQS(un); 26179 } 26180 26181 continue; 26182 } 26183 26184 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26185 int reset_retval = 0; 26186 26187 SD_INFO(SD_LOG_DUMP, un, 26188 "sddump: read failed with BUSY, try # %d\n", i); 26189 26190 if (un->un_f_lun_reset_enabled == TRUE) { 26191 reset_retval = scsi_reset(SD_ADDRESS(un), 26192 RESET_LUN); 26193 } 26194 if (reset_retval == 0) { 26195 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26196 } 26197 (void) sd_send_polled_RQS(un); 26198 26199 } else { 26200 SD_INFO(SD_LOG_DUMP, un, 26201 "sddump: read failed with 0x%x, try # %d\n", 26202 SD_GET_PKT_STATUS(pkt), i); 26203 mutex_enter(SD_MUTEX(un)); 26204 sd_reset_target(un, pkt); 26205 mutex_exit(SD_MUTEX(un)); 26206 } 26207 26208 /* 26209 * If we are not getting anywhere with lun/target resets, 26210 * let's reset the bus. 26211 */ 26212 if (i > SD_NDUMP_RETRIES/2) { 26213 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26214 (void) sd_send_polled_RQS(un); 26215 } 26216 26217 } 26218 scsi_destroy_pkt(pkt); 26219 26220 if (err != 0) { 26221 scsi_free_consistent_buf(bp); 26222 *bpp = NULL; 26223 } else { 26224 *bpp = bp; 26225 } 26226 26227 done: 26228 mutex_enter(SD_MUTEX(un)); 26229 return (err); 26230 } 26231 26232 26233 /* 26234 * Function: sd_failfast_flushq 26235 * 26236 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26237 * in b_flags and move them onto the failfast queue, then kick 26238 * off a thread to return all bp's on the failfast queue to 26239 * their owners with an error set. 26240 * 26241 * Arguments: un - pointer to the soft state struct for the instance. 26242 * 26243 * Context: may execute in interrupt context. 26244 */ 26245 26246 static void 26247 sd_failfast_flushq(struct sd_lun *un) 26248 { 26249 struct buf *bp; 26250 struct buf *next_waitq_bp; 26251 struct buf *prev_waitq_bp = NULL; 26252 26253 ASSERT(un != NULL); 26254 ASSERT(mutex_owned(SD_MUTEX(un))); 26255 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26256 ASSERT(un->un_failfast_bp == NULL); 26257 26258 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26259 "sd_failfast_flushq: entry: un:0x%p\n", un); 26260 26261 /* 26262 * Check if we should flush all bufs when entering failfast state, or 26263 * just those with B_FAILFAST set. 26264 */ 26265 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26266 /* 26267 * Move *all* bp's on the wait queue to the failfast flush 26268 * queue, including those that do NOT have B_FAILFAST set. 26269 */ 26270 if (un->un_failfast_headp == NULL) { 26271 ASSERT(un->un_failfast_tailp == NULL); 26272 un->un_failfast_headp = un->un_waitq_headp; 26273 } else { 26274 ASSERT(un->un_failfast_tailp != NULL); 26275 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26276 } 26277 26278 un->un_failfast_tailp = un->un_waitq_tailp; 26279 26280 /* update kstat for each bp moved out of the waitq */ 26281 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26282 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26283 } 26284 26285 /* empty the waitq */ 26286 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26287 26288 } else { 26289 /* 26290 * Go thru the wait queue, pick off all entries with 26291 * B_FAILFAST set, and move these onto the failfast queue. 26292 */ 26293 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26294 /* 26295 * Save the pointer to the next bp on the wait queue, 26296 * so we get to it on the next iteration of this loop. 26297 */ 26298 next_waitq_bp = bp->av_forw; 26299 26300 /* 26301 * If this bp from the wait queue does NOT have 26302 * B_FAILFAST set, just move on to the next element 26303 * in the wait queue. Note, this is the only place 26304 * where it is correct to set prev_waitq_bp. 26305 */ 26306 if ((bp->b_flags & B_FAILFAST) == 0) { 26307 prev_waitq_bp = bp; 26308 continue; 26309 } 26310 26311 /* 26312 * Remove the bp from the wait queue. 26313 */ 26314 if (bp == un->un_waitq_headp) { 26315 /* The bp is the first element of the waitq. */ 26316 un->un_waitq_headp = next_waitq_bp; 26317 if (un->un_waitq_headp == NULL) { 26318 /* The wait queue is now empty */ 26319 un->un_waitq_tailp = NULL; 26320 } 26321 } else { 26322 /* 26323 * The bp is either somewhere in the middle 26324 * or at the end of the wait queue. 26325 */ 26326 ASSERT(un->un_waitq_headp != NULL); 26327 ASSERT(prev_waitq_bp != NULL); 26328 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26329 == 0); 26330 if (bp == un->un_waitq_tailp) { 26331 /* bp is the last entry on the waitq. */ 26332 ASSERT(next_waitq_bp == NULL); 26333 un->un_waitq_tailp = prev_waitq_bp; 26334 } 26335 prev_waitq_bp->av_forw = next_waitq_bp; 26336 } 26337 bp->av_forw = NULL; 26338 26339 /* 26340 * update kstat since the bp is moved out of 26341 * the waitq 26342 */ 26343 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26344 26345 /* 26346 * Now put the bp onto the failfast queue. 26347 */ 26348 if (un->un_failfast_headp == NULL) { 26349 /* failfast queue is currently empty */ 26350 ASSERT(un->un_failfast_tailp == NULL); 26351 un->un_failfast_headp = 26352 un->un_failfast_tailp = bp; 26353 } else { 26354 /* Add the bp to the end of the failfast q */ 26355 ASSERT(un->un_failfast_tailp != NULL); 26356 ASSERT(un->un_failfast_tailp->b_flags & 26357 B_FAILFAST); 26358 un->un_failfast_tailp->av_forw = bp; 26359 un->un_failfast_tailp = bp; 26360 } 26361 } 26362 } 26363 26364 /* 26365 * Now return all bp's on the failfast queue to their owners. 26366 */ 26367 while ((bp = un->un_failfast_headp) != NULL) { 26368 26369 un->un_failfast_headp = bp->av_forw; 26370 if (un->un_failfast_headp == NULL) { 26371 un->un_failfast_tailp = NULL; 26372 } 26373 26374 /* 26375 * We want to return the bp with a failure error code, but 26376 * we do not want a call to sd_start_cmds() to occur here, 26377 * so use sd_return_failed_command_no_restart() instead of 26378 * sd_return_failed_command(). 26379 */ 26380 sd_return_failed_command_no_restart(un, bp, EIO); 26381 } 26382 26383 /* Flush the xbuf queues if required. */ 26384 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26385 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26386 } 26387 26388 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26389 "sd_failfast_flushq: exit: un:0x%p\n", un); 26390 } 26391 26392 26393 /* 26394 * Function: sd_failfast_flushq_callback 26395 * 26396 * Description: Return TRUE if the given bp meets the criteria for failfast 26397 * flushing. Used with ddi_xbuf_flushq(9F). 26398 * 26399 * Arguments: bp - ptr to buf struct to be examined. 26400 * 26401 * Context: Any 26402 */ 26403 26404 static int 26405 sd_failfast_flushq_callback(struct buf *bp) 26406 { 26407 /* 26408 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26409 * state is entered; OR (2) the given bp has B_FAILFAST set. 26410 */ 26411 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26412 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26413 } 26414 26415 26416 26417 #if defined(__i386) || defined(__amd64) 26418 /* 26419 * Function: sd_setup_next_xfer 26420 * 26421 * Description: Prepare next I/O operation using DMA_PARTIAL 26422 * 26423 */ 26424 26425 static int 26426 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26427 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26428 { 26429 ssize_t num_blks_not_xfered; 26430 daddr_t strt_blk_num; 26431 ssize_t bytes_not_xfered; 26432 int rval; 26433 26434 ASSERT(pkt->pkt_resid == 0); 26435 26436 /* 26437 * Calculate next block number and amount to be transferred. 26438 * 26439 * How much data NOT transfered to the HBA yet. 26440 */ 26441 bytes_not_xfered = xp->xb_dma_resid; 26442 26443 /* 26444 * figure how many blocks NOT transfered to the HBA yet. 26445 */ 26446 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26447 26448 /* 26449 * set starting block number to the end of what WAS transfered. 26450 */ 26451 strt_blk_num = xp->xb_blkno + 26452 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26453 26454 /* 26455 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26456 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26457 * the disk mutex here. 26458 */ 26459 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26460 strt_blk_num, num_blks_not_xfered); 26461 26462 if (rval == 0) { 26463 26464 /* 26465 * Success. 26466 * 26467 * Adjust things if there are still more blocks to be 26468 * transfered. 26469 */ 26470 xp->xb_dma_resid = pkt->pkt_resid; 26471 pkt->pkt_resid = 0; 26472 26473 return (1); 26474 } 26475 26476 /* 26477 * There's really only one possible return value from 26478 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26479 * returns NULL. 26480 */ 26481 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26482 26483 bp->b_resid = bp->b_bcount; 26484 bp->b_flags |= B_ERROR; 26485 26486 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26487 "Error setting up next portion of DMA transfer\n"); 26488 26489 return (0); 26490 } 26491 #endif 26492 26493 /* 26494 * Function: sd_panic_for_res_conflict 26495 * 26496 * Description: Call panic with a string formatted with "Reservation Conflict" 26497 * and a human readable identifier indicating the SD instance 26498 * that experienced the reservation conflict. 26499 * 26500 * Arguments: un - pointer to the soft state struct for the instance. 26501 * 26502 * Context: may execute in interrupt context. 26503 */ 26504 26505 #define SD_RESV_CONFLICT_FMT_LEN 40 26506 void 26507 sd_panic_for_res_conflict(struct sd_lun *un) 26508 { 26509 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26510 char path_str[MAXPATHLEN]; 26511 26512 (void) snprintf(panic_str, sizeof (panic_str), 26513 "Reservation Conflict\nDisk: %s", 26514 ddi_pathname(SD_DEVINFO(un), path_str)); 26515 26516 panic(panic_str); 26517 } 26518 26519 /* 26520 * Note: The following sd_faultinjection_ioctl( ) routines implement 26521 * driver support for handling fault injection for error analysis 26522 * causing faults in multiple layers of the driver. 26523 * 26524 */ 26525 26526 #ifdef SD_FAULT_INJECTION 26527 static uint_t sd_fault_injection_on = 0; 26528 26529 /* 26530 * Function: sd_faultinjection_ioctl() 26531 * 26532 * Description: This routine is the driver entry point for handling 26533 * faultinjection ioctls to inject errors into the 26534 * layer model 26535 * 26536 * Arguments: cmd - the ioctl cmd received 26537 * arg - the arguments from user and returns 26538 */ 26539 26540 static void 26541 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26542 26543 uint_t i; 26544 uint_t rval; 26545 26546 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26547 26548 mutex_enter(SD_MUTEX(un)); 26549 26550 switch (cmd) { 26551 case SDIOCRUN: 26552 /* Allow pushed faults to be injected */ 26553 SD_INFO(SD_LOG_SDTEST, un, 26554 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26555 26556 sd_fault_injection_on = 1; 26557 26558 SD_INFO(SD_LOG_IOERR, un, 26559 "sd_faultinjection_ioctl: run finished\n"); 26560 break; 26561 26562 case SDIOCSTART: 26563 /* Start Injection Session */ 26564 SD_INFO(SD_LOG_SDTEST, un, 26565 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26566 26567 sd_fault_injection_on = 0; 26568 un->sd_injection_mask = 0xFFFFFFFF; 26569 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26570 un->sd_fi_fifo_pkt[i] = NULL; 26571 un->sd_fi_fifo_xb[i] = NULL; 26572 un->sd_fi_fifo_un[i] = NULL; 26573 un->sd_fi_fifo_arq[i] = NULL; 26574 } 26575 un->sd_fi_fifo_start = 0; 26576 un->sd_fi_fifo_end = 0; 26577 26578 mutex_enter(&(un->un_fi_mutex)); 26579 un->sd_fi_log[0] = '\0'; 26580 un->sd_fi_buf_len = 0; 26581 mutex_exit(&(un->un_fi_mutex)); 26582 26583 SD_INFO(SD_LOG_IOERR, un, 26584 "sd_faultinjection_ioctl: start finished\n"); 26585 break; 26586 26587 case SDIOCSTOP: 26588 /* Stop Injection Session */ 26589 SD_INFO(SD_LOG_SDTEST, un, 26590 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26591 sd_fault_injection_on = 0; 26592 un->sd_injection_mask = 0x0; 26593 26594 /* Empty stray or unuseds structs from fifo */ 26595 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26596 if (un->sd_fi_fifo_pkt[i] != NULL) { 26597 kmem_free(un->sd_fi_fifo_pkt[i], 26598 sizeof (struct sd_fi_pkt)); 26599 } 26600 if (un->sd_fi_fifo_xb[i] != NULL) { 26601 kmem_free(un->sd_fi_fifo_xb[i], 26602 sizeof (struct sd_fi_xb)); 26603 } 26604 if (un->sd_fi_fifo_un[i] != NULL) { 26605 kmem_free(un->sd_fi_fifo_un[i], 26606 sizeof (struct sd_fi_un)); 26607 } 26608 if (un->sd_fi_fifo_arq[i] != NULL) { 26609 kmem_free(un->sd_fi_fifo_arq[i], 26610 sizeof (struct sd_fi_arq)); 26611 } 26612 un->sd_fi_fifo_pkt[i] = NULL; 26613 un->sd_fi_fifo_un[i] = NULL; 26614 un->sd_fi_fifo_xb[i] = NULL; 26615 un->sd_fi_fifo_arq[i] = NULL; 26616 } 26617 un->sd_fi_fifo_start = 0; 26618 un->sd_fi_fifo_end = 0; 26619 26620 SD_INFO(SD_LOG_IOERR, un, 26621 "sd_faultinjection_ioctl: stop finished\n"); 26622 break; 26623 26624 case SDIOCINSERTPKT: 26625 /* Store a packet struct to be pushed onto fifo */ 26626 SD_INFO(SD_LOG_SDTEST, un, 26627 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26628 26629 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26630 26631 sd_fault_injection_on = 0; 26632 26633 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26634 if (un->sd_fi_fifo_pkt[i] != NULL) { 26635 kmem_free(un->sd_fi_fifo_pkt[i], 26636 sizeof (struct sd_fi_pkt)); 26637 } 26638 if (arg != NULL) { 26639 un->sd_fi_fifo_pkt[i] = 26640 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26641 if (un->sd_fi_fifo_pkt[i] == NULL) { 26642 /* Alloc failed don't store anything */ 26643 break; 26644 } 26645 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26646 sizeof (struct sd_fi_pkt), 0); 26647 if (rval == -1) { 26648 kmem_free(un->sd_fi_fifo_pkt[i], 26649 sizeof (struct sd_fi_pkt)); 26650 un->sd_fi_fifo_pkt[i] = NULL; 26651 } 26652 } else { 26653 SD_INFO(SD_LOG_IOERR, un, 26654 "sd_faultinjection_ioctl: pkt null\n"); 26655 } 26656 break; 26657 26658 case SDIOCINSERTXB: 26659 /* Store a xb struct to be pushed onto fifo */ 26660 SD_INFO(SD_LOG_SDTEST, un, 26661 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26662 26663 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26664 26665 sd_fault_injection_on = 0; 26666 26667 if (un->sd_fi_fifo_xb[i] != NULL) { 26668 kmem_free(un->sd_fi_fifo_xb[i], 26669 sizeof (struct sd_fi_xb)); 26670 un->sd_fi_fifo_xb[i] = NULL; 26671 } 26672 if (arg != NULL) { 26673 un->sd_fi_fifo_xb[i] = 26674 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26675 if (un->sd_fi_fifo_xb[i] == NULL) { 26676 /* Alloc failed don't store anything */ 26677 break; 26678 } 26679 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26680 sizeof (struct sd_fi_xb), 0); 26681 26682 if (rval == -1) { 26683 kmem_free(un->sd_fi_fifo_xb[i], 26684 sizeof (struct sd_fi_xb)); 26685 un->sd_fi_fifo_xb[i] = NULL; 26686 } 26687 } else { 26688 SD_INFO(SD_LOG_IOERR, un, 26689 "sd_faultinjection_ioctl: xb null\n"); 26690 } 26691 break; 26692 26693 case SDIOCINSERTUN: 26694 /* Store a un struct to be pushed onto fifo */ 26695 SD_INFO(SD_LOG_SDTEST, un, 26696 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26697 26698 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26699 26700 sd_fault_injection_on = 0; 26701 26702 if (un->sd_fi_fifo_un[i] != NULL) { 26703 kmem_free(un->sd_fi_fifo_un[i], 26704 sizeof (struct sd_fi_un)); 26705 un->sd_fi_fifo_un[i] = NULL; 26706 } 26707 if (arg != NULL) { 26708 un->sd_fi_fifo_un[i] = 26709 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26710 if (un->sd_fi_fifo_un[i] == NULL) { 26711 /* Alloc failed don't store anything */ 26712 break; 26713 } 26714 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26715 sizeof (struct sd_fi_un), 0); 26716 if (rval == -1) { 26717 kmem_free(un->sd_fi_fifo_un[i], 26718 sizeof (struct sd_fi_un)); 26719 un->sd_fi_fifo_un[i] = NULL; 26720 } 26721 26722 } else { 26723 SD_INFO(SD_LOG_IOERR, un, 26724 "sd_faultinjection_ioctl: un null\n"); 26725 } 26726 26727 break; 26728 26729 case SDIOCINSERTARQ: 26730 /* Store a arq struct to be pushed onto fifo */ 26731 SD_INFO(SD_LOG_SDTEST, un, 26732 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26733 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26734 26735 sd_fault_injection_on = 0; 26736 26737 if (un->sd_fi_fifo_arq[i] != NULL) { 26738 kmem_free(un->sd_fi_fifo_arq[i], 26739 sizeof (struct sd_fi_arq)); 26740 un->sd_fi_fifo_arq[i] = NULL; 26741 } 26742 if (arg != NULL) { 26743 un->sd_fi_fifo_arq[i] = 26744 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26745 if (un->sd_fi_fifo_arq[i] == NULL) { 26746 /* Alloc failed don't store anything */ 26747 break; 26748 } 26749 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26750 sizeof (struct sd_fi_arq), 0); 26751 if (rval == -1) { 26752 kmem_free(un->sd_fi_fifo_arq[i], 26753 sizeof (struct sd_fi_arq)); 26754 un->sd_fi_fifo_arq[i] = NULL; 26755 } 26756 26757 } else { 26758 SD_INFO(SD_LOG_IOERR, un, 26759 "sd_faultinjection_ioctl: arq null\n"); 26760 } 26761 26762 break; 26763 26764 case SDIOCPUSH: 26765 /* Push stored xb, pkt, un, and arq onto fifo */ 26766 sd_fault_injection_on = 0; 26767 26768 if (arg != NULL) { 26769 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26770 if (rval != -1 && 26771 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26772 un->sd_fi_fifo_end += i; 26773 } 26774 } else { 26775 SD_INFO(SD_LOG_IOERR, un, 26776 "sd_faultinjection_ioctl: push arg null\n"); 26777 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26778 un->sd_fi_fifo_end++; 26779 } 26780 } 26781 SD_INFO(SD_LOG_IOERR, un, 26782 "sd_faultinjection_ioctl: push to end=%d\n", 26783 un->sd_fi_fifo_end); 26784 break; 26785 26786 case SDIOCRETRIEVE: 26787 /* Return buffer of log from Injection session */ 26788 SD_INFO(SD_LOG_SDTEST, un, 26789 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26790 26791 sd_fault_injection_on = 0; 26792 26793 mutex_enter(&(un->un_fi_mutex)); 26794 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26795 un->sd_fi_buf_len+1, 0); 26796 mutex_exit(&(un->un_fi_mutex)); 26797 26798 if (rval == -1) { 26799 /* 26800 * arg is possibly invalid setting 26801 * it to NULL for return 26802 */ 26803 arg = NULL; 26804 } 26805 break; 26806 } 26807 26808 mutex_exit(SD_MUTEX(un)); 26809 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26810 " exit\n"); 26811 } 26812 26813 26814 /* 26815 * Function: sd_injection_log() 26816 * 26817 * Description: This routine adds buff to the already existing injection log 26818 * for retrieval via faultinjection_ioctl for use in fault 26819 * detection and recovery 26820 * 26821 * Arguments: buf - the string to add to the log 26822 */ 26823 26824 static void 26825 sd_injection_log(char *buf, struct sd_lun *un) 26826 { 26827 uint_t len; 26828 26829 ASSERT(un != NULL); 26830 ASSERT(buf != NULL); 26831 26832 mutex_enter(&(un->un_fi_mutex)); 26833 26834 len = min(strlen(buf), 255); 26835 /* Add logged value to Injection log to be returned later */ 26836 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26837 uint_t offset = strlen((char *)un->sd_fi_log); 26838 char *destp = (char *)un->sd_fi_log + offset; 26839 int i; 26840 for (i = 0; i < len; i++) { 26841 *destp++ = *buf++; 26842 } 26843 un->sd_fi_buf_len += len; 26844 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26845 } 26846 26847 mutex_exit(&(un->un_fi_mutex)); 26848 } 26849 26850 26851 /* 26852 * Function: sd_faultinjection() 26853 * 26854 * Description: This routine takes the pkt and changes its 26855 * content based on error injection scenerio. 26856 * 26857 * Arguments: pktp - packet to be changed 26858 */ 26859 26860 static void 26861 sd_faultinjection(struct scsi_pkt *pktp) 26862 { 26863 uint_t i; 26864 struct sd_fi_pkt *fi_pkt; 26865 struct sd_fi_xb *fi_xb; 26866 struct sd_fi_un *fi_un; 26867 struct sd_fi_arq *fi_arq; 26868 struct buf *bp; 26869 struct sd_xbuf *xb; 26870 struct sd_lun *un; 26871 26872 ASSERT(pktp != NULL); 26873 26874 /* pull bp xb and un from pktp */ 26875 bp = (struct buf *)pktp->pkt_private; 26876 xb = SD_GET_XBUF(bp); 26877 un = SD_GET_UN(bp); 26878 26879 ASSERT(un != NULL); 26880 26881 mutex_enter(SD_MUTEX(un)); 26882 26883 SD_TRACE(SD_LOG_SDTEST, un, 26884 "sd_faultinjection: entry Injection from sdintr\n"); 26885 26886 /* if injection is off return */ 26887 if (sd_fault_injection_on == 0 || 26888 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26889 mutex_exit(SD_MUTEX(un)); 26890 return; 26891 } 26892 26893 26894 /* take next set off fifo */ 26895 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26896 26897 fi_pkt = un->sd_fi_fifo_pkt[i]; 26898 fi_xb = un->sd_fi_fifo_xb[i]; 26899 fi_un = un->sd_fi_fifo_un[i]; 26900 fi_arq = un->sd_fi_fifo_arq[i]; 26901 26902 26903 /* set variables accordingly */ 26904 /* set pkt if it was on fifo */ 26905 if (fi_pkt != NULL) { 26906 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26907 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26908 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26909 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26910 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26911 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26912 26913 } 26914 26915 /* set xb if it was on fifo */ 26916 if (fi_xb != NULL) { 26917 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26918 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26919 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26920 SD_CONDSET(xb, xb, xb_victim_retry_count, 26921 "xb_victim_retry_count"); 26922 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26923 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26924 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26925 26926 /* copy in block data from sense */ 26927 if (fi_xb->xb_sense_data[0] != -1) { 26928 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26929 SENSE_LENGTH); 26930 } 26931 26932 /* copy in extended sense codes */ 26933 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 26934 "es_code"); 26935 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 26936 "es_key"); 26937 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 26938 "es_add_code"); 26939 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 26940 es_qual_code, "es_qual_code"); 26941 } 26942 26943 /* set un if it was on fifo */ 26944 if (fi_un != NULL) { 26945 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 26946 SD_CONDSET(un, un, un_ctype, "un_ctype"); 26947 SD_CONDSET(un, un, un_reset_retry_count, 26948 "un_reset_retry_count"); 26949 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 26950 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 26951 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 26952 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 26953 "un_f_allow_bus_device_reset"); 26954 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 26955 26956 } 26957 26958 /* copy in auto request sense if it was on fifo */ 26959 if (fi_arq != NULL) { 26960 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 26961 } 26962 26963 /* free structs */ 26964 if (un->sd_fi_fifo_pkt[i] != NULL) { 26965 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 26966 } 26967 if (un->sd_fi_fifo_xb[i] != NULL) { 26968 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 26969 } 26970 if (un->sd_fi_fifo_un[i] != NULL) { 26971 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 26972 } 26973 if (un->sd_fi_fifo_arq[i] != NULL) { 26974 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 26975 } 26976 26977 /* 26978 * kmem_free does not gurantee to set to NULL 26979 * since we uses these to determine if we set 26980 * values or not lets confirm they are always 26981 * NULL after free 26982 */ 26983 un->sd_fi_fifo_pkt[i] = NULL; 26984 un->sd_fi_fifo_un[i] = NULL; 26985 un->sd_fi_fifo_xb[i] = NULL; 26986 un->sd_fi_fifo_arq[i] = NULL; 26987 26988 un->sd_fi_fifo_start++; 26989 26990 mutex_exit(SD_MUTEX(un)); 26991 26992 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 26993 } 26994 26995 #endif /* SD_FAULT_INJECTION */ 26996 26997 /* 26998 * This routine is invoked in sd_unit_attach(). Before calling it, the 26999 * properties in conf file should be processed already, and "hotpluggable" 27000 * property was processed also. 27001 * 27002 * The sd driver distinguishes 3 different type of devices: removable media, 27003 * non-removable media, and hotpluggable. Below the differences are defined: 27004 * 27005 * 1. Device ID 27006 * 27007 * The device ID of a device is used to identify this device. Refer to 27008 * ddi_devid_register(9F). 27009 * 27010 * For a non-removable media disk device which can provide 0x80 or 0x83 27011 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27012 * device ID is created to identify this device. For other non-removable 27013 * media devices, a default device ID is created only if this device has 27014 * at least 2 alter cylinders. Otherwise, this device has no devid. 27015 * 27016 * ------------------------------------------------------- 27017 * removable media hotpluggable | Can Have Device ID 27018 * ------------------------------------------------------- 27019 * false false | Yes 27020 * false true | Yes 27021 * true x | No 27022 * ------------------------------------------------------ 27023 * 27024 * 27025 * 2. SCSI group 4 commands 27026 * 27027 * In SCSI specs, only some commands in group 4 command set can use 27028 * 8-byte addresses that can be used to access >2TB storage spaces. 27029 * Other commands have no such capability. Without supporting group4, 27030 * it is impossible to make full use of storage spaces of a disk with 27031 * capacity larger than 2TB. 27032 * 27033 * ----------------------------------------------- 27034 * removable media hotpluggable LP64 | Group 27035 * ----------------------------------------------- 27036 * false false false | 1 27037 * false false true | 4 27038 * false true false | 1 27039 * false true true | 4 27040 * true x x | 5 27041 * ----------------------------------------------- 27042 * 27043 * 27044 * 3. Check for VTOC Label 27045 * 27046 * If a direct-access disk has no EFI label, sd will check if it has a 27047 * valid VTOC label. Now, sd also does that check for removable media 27048 * and hotpluggable devices. 27049 * 27050 * -------------------------------------------------------------- 27051 * Direct-Access removable media hotpluggable | Check Label 27052 * ------------------------------------------------------------- 27053 * false false false | No 27054 * false false true | No 27055 * false true false | Yes 27056 * false true true | Yes 27057 * true x x | Yes 27058 * -------------------------------------------------------------- 27059 * 27060 * 27061 * 4. Building default VTOC label 27062 * 27063 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27064 * If those devices have no valid VTOC label, sd(7d) will attempt to 27065 * create default VTOC for them. Currently sd creates default VTOC label 27066 * for all devices on x86 platform (VTOC_16), but only for removable 27067 * media devices on SPARC (VTOC_8). 27068 * 27069 * ----------------------------------------------------------- 27070 * removable media hotpluggable platform | Default Label 27071 * ----------------------------------------------------------- 27072 * false false sparc | No 27073 * false true x86 | Yes 27074 * false true sparc | Yes 27075 * true x x | Yes 27076 * ---------------------------------------------------------- 27077 * 27078 * 27079 * 5. Supported blocksizes of target devices 27080 * 27081 * Sd supports non-512-byte blocksize for removable media devices only. 27082 * For other devices, only 512-byte blocksize is supported. This may be 27083 * changed in near future because some RAID devices require non-512-byte 27084 * blocksize 27085 * 27086 * ----------------------------------------------------------- 27087 * removable media hotpluggable | non-512-byte blocksize 27088 * ----------------------------------------------------------- 27089 * false false | No 27090 * false true | No 27091 * true x | Yes 27092 * ----------------------------------------------------------- 27093 * 27094 * 27095 * 6. Automatic mount & unmount 27096 * 27097 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27098 * if a device is removable media device. It return 1 for removable media 27099 * devices, and 0 for others. 27100 * 27101 * The automatic mounting subsystem should distinguish between the types 27102 * of devices and apply automounting policies to each. 27103 * 27104 * 27105 * 7. fdisk partition management 27106 * 27107 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27108 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27109 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27110 * fdisk partitions on both x86 and SPARC platform. 27111 * 27112 * ----------------------------------------------------------- 27113 * platform removable media USB/1394 | fdisk supported 27114 * ----------------------------------------------------------- 27115 * x86 X X | true 27116 * ------------------------------------------------------------ 27117 * sparc X X | false 27118 * ------------------------------------------------------------ 27119 * 27120 * 27121 * 8. MBOOT/MBR 27122 * 27123 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27124 * read/write mboot for removable media devices on sparc platform. 27125 * 27126 * ----------------------------------------------------------- 27127 * platform removable media USB/1394 | mboot supported 27128 * ----------------------------------------------------------- 27129 * x86 X X | true 27130 * ------------------------------------------------------------ 27131 * sparc false false | false 27132 * sparc false true | true 27133 * sparc true false | true 27134 * sparc true true | true 27135 * ------------------------------------------------------------ 27136 * 27137 * 27138 * 9. error handling during opening device 27139 * 27140 * If failed to open a disk device, an errno is returned. For some kinds 27141 * of errors, different errno is returned depending on if this device is 27142 * a removable media device. This brings USB/1394 hard disks in line with 27143 * expected hard disk behavior. It is not expected that this breaks any 27144 * application. 27145 * 27146 * ------------------------------------------------------ 27147 * removable media hotpluggable | errno 27148 * ------------------------------------------------------ 27149 * false false | EIO 27150 * false true | EIO 27151 * true x | ENXIO 27152 * ------------------------------------------------------ 27153 * 27154 * 27155 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27156 * 27157 * These IOCTLs are applicable only to removable media devices. 27158 * 27159 * ----------------------------------------------------------- 27160 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27161 * ----------------------------------------------------------- 27162 * false false | No 27163 * false true | No 27164 * true x | Yes 27165 * ----------------------------------------------------------- 27166 * 27167 * 27168 * 12. Kstats for partitions 27169 * 27170 * sd creates partition kstat for non-removable media devices. USB and 27171 * Firewire hard disks now have partition kstats 27172 * 27173 * ------------------------------------------------------ 27174 * removable media hotpluggable | kstat 27175 * ------------------------------------------------------ 27176 * false false | Yes 27177 * false true | Yes 27178 * true x | No 27179 * ------------------------------------------------------ 27180 * 27181 * 27182 * 13. Removable media & hotpluggable properties 27183 * 27184 * Sd driver creates a "removable-media" property for removable media 27185 * devices. Parent nexus drivers create a "hotpluggable" property if 27186 * it supports hotplugging. 27187 * 27188 * --------------------------------------------------------------------- 27189 * removable media hotpluggable | "removable-media" " hotpluggable" 27190 * --------------------------------------------------------------------- 27191 * false false | No No 27192 * false true | No Yes 27193 * true false | Yes No 27194 * true true | Yes Yes 27195 * --------------------------------------------------------------------- 27196 * 27197 * 27198 * 14. Power Management 27199 * 27200 * sd only power manages removable media devices or devices that support 27201 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27202 * 27203 * A parent nexus that supports hotplugging can also set "pm-capable" 27204 * if the disk can be power managed. 27205 * 27206 * ------------------------------------------------------------ 27207 * removable media hotpluggable pm-capable | power manage 27208 * ------------------------------------------------------------ 27209 * false false false | No 27210 * false false true | Yes 27211 * false true false | No 27212 * false true true | Yes 27213 * true x x | Yes 27214 * ------------------------------------------------------------ 27215 * 27216 * USB and firewire hard disks can now be power managed independently 27217 * of the framebuffer 27218 * 27219 * 27220 * 15. Support for USB disks with capacity larger than 1TB 27221 * 27222 * Currently, sd doesn't permit a fixed disk device with capacity 27223 * larger than 1TB to be used in a 32-bit operating system environment. 27224 * However, sd doesn't do that for removable media devices. Instead, it 27225 * assumes that removable media devices cannot have a capacity larger 27226 * than 1TB. Therefore, using those devices on 32-bit system is partially 27227 * supported, which can cause some unexpected results. 27228 * 27229 * --------------------------------------------------------------------- 27230 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27231 * --------------------------------------------------------------------- 27232 * false false | true | no 27233 * false true | true | no 27234 * true false | true | Yes 27235 * true true | true | Yes 27236 * --------------------------------------------------------------------- 27237 * 27238 * 27239 * 16. Check write-protection at open time 27240 * 27241 * When a removable media device is being opened for writing without NDELAY 27242 * flag, sd will check if this device is writable. If attempting to open 27243 * without NDELAY flag a write-protected device, this operation will abort. 27244 * 27245 * ------------------------------------------------------------ 27246 * removable media USB/1394 | WP Check 27247 * ------------------------------------------------------------ 27248 * false false | No 27249 * false true | No 27250 * true false | Yes 27251 * true true | Yes 27252 * ------------------------------------------------------------ 27253 * 27254 * 27255 * 17. syslog when corrupted VTOC is encountered 27256 * 27257 * Currently, if an invalid VTOC is encountered, sd only print syslog 27258 * for fixed SCSI disks. 27259 * ------------------------------------------------------------ 27260 * removable media USB/1394 | print syslog 27261 * ------------------------------------------------------------ 27262 * false false | Yes 27263 * false true | No 27264 * true false | No 27265 * true true | No 27266 * ------------------------------------------------------------ 27267 */ 27268 static void 27269 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27270 { 27271 int pm_capable_prop; 27272 27273 ASSERT(un->un_sd); 27274 ASSERT(un->un_sd->sd_inq); 27275 27276 /* 27277 * Enable SYNC CACHE support for all devices. 27278 */ 27279 un->un_f_sync_cache_supported = TRUE; 27280 27281 if (un->un_sd->sd_inq->inq_rmb) { 27282 /* 27283 * The media of this device is removable. And for this kind 27284 * of devices, it is possible to change medium after opening 27285 * devices. Thus we should support this operation. 27286 */ 27287 un->un_f_has_removable_media = TRUE; 27288 27289 /* 27290 * support non-512-byte blocksize of removable media devices 27291 */ 27292 un->un_f_non_devbsize_supported = TRUE; 27293 27294 /* 27295 * Assume that all removable media devices support DOOR_LOCK 27296 */ 27297 un->un_f_doorlock_supported = TRUE; 27298 27299 /* 27300 * For a removable media device, it is possible to be opened 27301 * with NDELAY flag when there is no media in drive, in this 27302 * case we don't care if device is writable. But if without 27303 * NDELAY flag, we need to check if media is write-protected. 27304 */ 27305 un->un_f_chk_wp_open = TRUE; 27306 27307 /* 27308 * need to start a SCSI watch thread to monitor media state, 27309 * when media is being inserted or ejected, notify syseventd. 27310 */ 27311 un->un_f_monitor_media_state = TRUE; 27312 27313 /* 27314 * Some devices don't support START_STOP_UNIT command. 27315 * Therefore, we'd better check if a device supports it 27316 * before sending it. 27317 */ 27318 un->un_f_check_start_stop = TRUE; 27319 27320 /* 27321 * support eject media ioctl: 27322 * FDEJECT, DKIOCEJECT, CDROMEJECT 27323 */ 27324 un->un_f_eject_media_supported = TRUE; 27325 27326 /* 27327 * Because many removable-media devices don't support 27328 * LOG_SENSE, we couldn't use this command to check if 27329 * a removable media device support power-management. 27330 * We assume that they support power-management via 27331 * START_STOP_UNIT command and can be spun up and down 27332 * without limitations. 27333 */ 27334 un->un_f_pm_supported = TRUE; 27335 27336 /* 27337 * Need to create a zero length (Boolean) property 27338 * removable-media for the removable media devices. 27339 * Note that the return value of the property is not being 27340 * checked, since if unable to create the property 27341 * then do not want the attach to fail altogether. Consistent 27342 * with other property creation in attach. 27343 */ 27344 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27345 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27346 27347 } else { 27348 /* 27349 * create device ID for device 27350 */ 27351 un->un_f_devid_supported = TRUE; 27352 27353 /* 27354 * Spin up non-removable-media devices once it is attached 27355 */ 27356 un->un_f_attach_spinup = TRUE; 27357 27358 /* 27359 * According to SCSI specification, Sense data has two kinds of 27360 * format: fixed format, and descriptor format. At present, we 27361 * don't support descriptor format sense data for removable 27362 * media. 27363 */ 27364 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27365 un->un_f_descr_format_supported = TRUE; 27366 } 27367 27368 /* 27369 * kstats are created only for non-removable media devices. 27370 * 27371 * Set this in sd.conf to 0 in order to disable kstats. The 27372 * default is 1, so they are enabled by default. 27373 */ 27374 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27375 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27376 "enable-partition-kstats", 1)); 27377 27378 /* 27379 * Check if HBA has set the "pm-capable" property. 27380 * If "pm-capable" exists and is non-zero then we can 27381 * power manage the device without checking the start/stop 27382 * cycle count log sense page. 27383 * 27384 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27385 * then we should not power manage the device. 27386 * 27387 * If "pm-capable" doesn't exist then pm_capable_prop will 27388 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27389 * sd will check the start/stop cycle count log sense page 27390 * and power manage the device if the cycle count limit has 27391 * not been exceeded. 27392 */ 27393 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27394 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27395 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27396 un->un_f_log_sense_supported = TRUE; 27397 } else { 27398 /* 27399 * pm-capable property exists. 27400 * 27401 * Convert "TRUE" values for pm_capable_prop to 27402 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27403 * later. "TRUE" values are any values except 27404 * SD_PM_CAPABLE_FALSE (0) and 27405 * SD_PM_CAPABLE_UNDEFINED (-1) 27406 */ 27407 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27408 un->un_f_log_sense_supported = FALSE; 27409 } else { 27410 un->un_f_pm_supported = TRUE; 27411 } 27412 27413 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27414 "sd_unit_attach: un:0x%p pm-capable " 27415 "property set to %d.\n", un, un->un_f_pm_supported); 27416 } 27417 } 27418 27419 if (un->un_f_is_hotpluggable) { 27420 27421 /* 27422 * Have to watch hotpluggable devices as well, since 27423 * that's the only way for userland applications to 27424 * detect hot removal while device is busy/mounted. 27425 */ 27426 un->un_f_monitor_media_state = TRUE; 27427 27428 un->un_f_check_start_stop = TRUE; 27429 27430 } 27431 } 27432 27433 /* 27434 * sd_tg_rdwr: 27435 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27436 * in sys block size, req_length in bytes. 27437 * 27438 */ 27439 static int 27440 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27441 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27442 { 27443 struct sd_lun *un; 27444 int path_flag = (int)(uintptr_t)tg_cookie; 27445 char *dkl = NULL; 27446 diskaddr_t real_addr = start_block; 27447 diskaddr_t first_byte, end_block; 27448 27449 size_t buffer_size = reqlength; 27450 int rval; 27451 diskaddr_t cap; 27452 uint32_t lbasize; 27453 27454 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27455 if (un == NULL) 27456 return (ENXIO); 27457 27458 if (cmd != TG_READ && cmd != TG_WRITE) 27459 return (EINVAL); 27460 27461 mutex_enter(SD_MUTEX(un)); 27462 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27463 mutex_exit(SD_MUTEX(un)); 27464 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27465 &lbasize, path_flag); 27466 if (rval != 0) 27467 return (rval); 27468 mutex_enter(SD_MUTEX(un)); 27469 sd_update_block_info(un, lbasize, cap); 27470 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27471 mutex_exit(SD_MUTEX(un)); 27472 return (EIO); 27473 } 27474 } 27475 27476 if (NOT_DEVBSIZE(un)) { 27477 /* 27478 * sys_blocksize != tgt_blocksize, need to re-adjust 27479 * blkno and save the index to beginning of dk_label 27480 */ 27481 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27482 real_addr = first_byte / un->un_tgt_blocksize; 27483 27484 end_block = (first_byte + reqlength + 27485 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27486 27487 /* round up buffer size to multiple of target block size */ 27488 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27489 27490 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27491 "label_addr: 0x%x allocation size: 0x%x\n", 27492 real_addr, buffer_size); 27493 27494 if (((first_byte % un->un_tgt_blocksize) != 0) || 27495 (reqlength % un->un_tgt_blocksize) != 0) 27496 /* the request is not aligned */ 27497 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27498 } 27499 27500 /* 27501 * The MMC standard allows READ CAPACITY to be 27502 * inaccurate by a bounded amount (in the interest of 27503 * response latency). As a result, failed READs are 27504 * commonplace (due to the reading of metadata and not 27505 * data). Depending on the per-Vendor/drive Sense data, 27506 * the failed READ can cause many (unnecessary) retries. 27507 */ 27508 27509 if (ISCD(un) && (cmd == TG_READ) && 27510 (un->un_f_blockcount_is_valid == TRUE) && 27511 ((start_block == (un->un_blockcount - 1))|| 27512 (start_block == (un->un_blockcount - 2)))) { 27513 path_flag = SD_PATH_DIRECT_PRIORITY; 27514 } 27515 27516 mutex_exit(SD_MUTEX(un)); 27517 if (cmd == TG_READ) { 27518 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27519 buffer_size, real_addr, path_flag); 27520 if (dkl != NULL) 27521 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27522 real_addr), bufaddr, reqlength); 27523 } else { 27524 if (dkl) { 27525 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27526 real_addr, path_flag); 27527 if (rval) { 27528 kmem_free(dkl, buffer_size); 27529 return (rval); 27530 } 27531 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27532 real_addr), reqlength); 27533 } 27534 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27535 buffer_size, real_addr, path_flag); 27536 } 27537 27538 if (dkl != NULL) 27539 kmem_free(dkl, buffer_size); 27540 27541 return (rval); 27542 } 27543 27544 27545 static int 27546 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27547 { 27548 27549 struct sd_lun *un; 27550 diskaddr_t cap; 27551 uint32_t lbasize; 27552 int path_flag = (int)(uintptr_t)tg_cookie; 27553 int ret = 0; 27554 27555 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27556 if (un == NULL) 27557 return (ENXIO); 27558 27559 switch (cmd) { 27560 case TG_GETPHYGEOM: 27561 case TG_GETVIRTGEOM: 27562 case TG_GETCAPACITY: 27563 case TG_GETBLOCKSIZE: 27564 mutex_enter(SD_MUTEX(un)); 27565 27566 if ((un->un_f_blockcount_is_valid == TRUE) && 27567 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27568 cap = un->un_blockcount; 27569 lbasize = un->un_tgt_blocksize; 27570 mutex_exit(SD_MUTEX(un)); 27571 } else { 27572 mutex_exit(SD_MUTEX(un)); 27573 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27574 &lbasize, path_flag); 27575 if (ret != 0) 27576 return (ret); 27577 mutex_enter(SD_MUTEX(un)); 27578 sd_update_block_info(un, lbasize, cap); 27579 if ((un->un_f_blockcount_is_valid == FALSE) || 27580 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27581 mutex_exit(SD_MUTEX(un)); 27582 return (EIO); 27583 } 27584 mutex_exit(SD_MUTEX(un)); 27585 } 27586 27587 if (cmd == TG_GETCAPACITY) { 27588 *(diskaddr_t *)arg = cap; 27589 return (0); 27590 } 27591 27592 if (cmd == TG_GETBLOCKSIZE) { 27593 *(uint32_t *)arg = lbasize; 27594 return (0); 27595 } 27596 27597 if (cmd == TG_GETPHYGEOM) 27598 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27599 cap, lbasize, path_flag); 27600 else 27601 /* TG_GETVIRTGEOM */ 27602 ret = sd_get_virtual_geometry(un, 27603 (cmlb_geom_t *)arg, cap, lbasize); 27604 27605 return (ret); 27606 27607 case TG_GETATTR: 27608 mutex_enter(SD_MUTEX(un)); 27609 ((tg_attribute_t *)arg)->media_is_writable = 27610 un->un_f_mmc_writable_media; 27611 mutex_exit(SD_MUTEX(un)); 27612 return (0); 27613 default: 27614 return (ENOTTY); 27615 27616 } 27617 27618 } 27619