1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 623 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 624 { "SUN T3", SD_CONF_BSET_THROTTLE | 625 SD_CONF_BSET_BSY_RETRY_COUNT| 626 SD_CONF_BSET_RST_RETRIES| 627 SD_CONF_BSET_RSV_REL_TIME, 628 &purple_properties }, 629 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 630 SD_CONF_BSET_BSY_RETRY_COUNT| 631 SD_CONF_BSET_RST_RETRIES| 632 SD_CONF_BSET_RSV_REL_TIME| 633 SD_CONF_BSET_MIN_THROTTLE| 634 SD_CONF_BSET_DISKSORT_DISABLED, 635 &sve_properties }, 636 { "SUN T4", SD_CONF_BSET_THROTTLE | 637 SD_CONF_BSET_BSY_RETRY_COUNT| 638 SD_CONF_BSET_RST_RETRIES| 639 SD_CONF_BSET_RSV_REL_TIME, 640 &purple_properties }, 641 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 642 SD_CONF_BSET_LUN_RESET_ENABLED, 643 &maserati_properties }, 644 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 645 SD_CONF_BSET_NRR_COUNT| 646 SD_CONF_BSET_BSY_RETRY_COUNT| 647 SD_CONF_BSET_RST_RETRIES| 648 SD_CONF_BSET_MIN_THROTTLE| 649 SD_CONF_BSET_DISKSORT_DISABLED| 650 SD_CONF_BSET_LUN_RESET_ENABLED, 651 &pirus_properties }, 652 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 653 SD_CONF_BSET_NRR_COUNT| 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_MIN_THROTTLE| 657 SD_CONF_BSET_DISKSORT_DISABLED| 658 SD_CONF_BSET_LUN_RESET_ENABLED, 659 &pirus_properties }, 660 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 661 SD_CONF_BSET_NRR_COUNT| 662 SD_CONF_BSET_BSY_RETRY_COUNT| 663 SD_CONF_BSET_RST_RETRIES| 664 SD_CONF_BSET_MIN_THROTTLE| 665 SD_CONF_BSET_DISKSORT_DISABLED| 666 SD_CONF_BSET_LUN_RESET_ENABLED, 667 &pirus_properties }, 668 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 669 SD_CONF_BSET_NRR_COUNT| 670 SD_CONF_BSET_BSY_RETRY_COUNT| 671 SD_CONF_BSET_RST_RETRIES| 672 SD_CONF_BSET_MIN_THROTTLE| 673 SD_CONF_BSET_DISKSORT_DISABLED| 674 SD_CONF_BSET_LUN_RESET_ENABLED, 675 &pirus_properties }, 676 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 677 SD_CONF_BSET_NRR_COUNT| 678 SD_CONF_BSET_BSY_RETRY_COUNT| 679 SD_CONF_BSET_RST_RETRIES| 680 SD_CONF_BSET_MIN_THROTTLE| 681 SD_CONF_BSET_DISKSORT_DISABLED| 682 SD_CONF_BSET_LUN_RESET_ENABLED, 683 &pirus_properties }, 684 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 685 SD_CONF_BSET_NRR_COUNT| 686 SD_CONF_BSET_BSY_RETRY_COUNT| 687 SD_CONF_BSET_RST_RETRIES| 688 SD_CONF_BSET_MIN_THROTTLE| 689 SD_CONF_BSET_DISKSORT_DISABLED| 690 SD_CONF_BSET_LUN_RESET_ENABLED, 691 &pirus_properties }, 692 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 694 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 695 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 696 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 697 #endif /* fibre or NON-sparc platforms */ 698 #if ((defined(__sparc) && !defined(__fibre)) ||\ 699 (defined(__i386) || defined(__amd64))) 700 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 701 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 702 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 703 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 704 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 713 &symbios_properties }, 714 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 715 &lsi_properties_scsi }, 716 #if defined(__i386) || defined(__amd64) 717 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 718 | SD_CONF_BSET_READSUB_BCD 719 | SD_CONF_BSET_READ_TOC_ADDR_BCD 720 | SD_CONF_BSET_NO_READ_HEADER 721 | SD_CONF_BSET_READ_CD_XD4), NULL }, 722 723 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 724 | SD_CONF_BSET_READSUB_BCD 725 | SD_CONF_BSET_READ_TOC_ADDR_BCD 726 | SD_CONF_BSET_NO_READ_HEADER 727 | SD_CONF_BSET_READ_CD_XD4), NULL }, 728 #endif /* __i386 || __amd64 */ 729 #endif /* sparc NON-fibre or NON-sparc platforms */ 730 731 #if (defined(SD_PROP_TST)) 732 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 733 | SD_CONF_BSET_CTYPE 734 | SD_CONF_BSET_NRR_COUNT 735 | SD_CONF_BSET_FAB_DEVID 736 | SD_CONF_BSET_NOCACHE 737 | SD_CONF_BSET_BSY_RETRY_COUNT 738 | SD_CONF_BSET_PLAYMSF_BCD 739 | SD_CONF_BSET_READSUB_BCD 740 | SD_CONF_BSET_READ_TOC_TRK_BCD 741 | SD_CONF_BSET_READ_TOC_ADDR_BCD 742 | SD_CONF_BSET_NO_READ_HEADER 743 | SD_CONF_BSET_READ_CD_XD4 744 | SD_CONF_BSET_RST_RETRIES 745 | SD_CONF_BSET_RSV_REL_TIME 746 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 747 #endif 748 }; 749 750 static const int sd_disk_table_size = 751 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 752 753 754 755 #define SD_INTERCONNECT_PARALLEL 0 756 #define SD_INTERCONNECT_FABRIC 1 757 #define SD_INTERCONNECT_FIBRE 2 758 #define SD_INTERCONNECT_SSA 3 759 #define SD_INTERCONNECT_SATA 4 760 #define SD_IS_PARALLEL_SCSI(un) \ 761 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 762 #define SD_IS_SERIAL(un) \ 763 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 764 765 /* 766 * Definitions used by device id registration routines 767 */ 768 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 769 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 770 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 771 772 static kmutex_t sd_sense_mutex = {0}; 773 774 /* 775 * Macros for updates of the driver state 776 */ 777 #define New_state(un, s) \ 778 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 779 #define Restore_state(un) \ 780 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 781 782 static struct sd_cdbinfo sd_cdbtab[] = { 783 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 784 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 785 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 786 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 787 }; 788 789 /* 790 * Specifies the number of seconds that must have elapsed since the last 791 * cmd. has completed for a device to be declared idle to the PM framework. 792 */ 793 static int sd_pm_idletime = 1; 794 795 /* 796 * Internal function prototypes 797 */ 798 799 #if (defined(__fibre)) 800 /* 801 * These #defines are to avoid namespace collisions that occur because this 802 * code is currently used to compile two separate driver modules: sd and ssd. 803 * All function names need to be treated this way (even if declared static) 804 * in order to allow the debugger to resolve the names properly. 805 * It is anticipated that in the near future the ssd module will be obsoleted, 806 * at which time this ugliness should go away. 807 */ 808 #define sd_log_trace ssd_log_trace 809 #define sd_log_info ssd_log_info 810 #define sd_log_err ssd_log_err 811 #define sdprobe ssdprobe 812 #define sdinfo ssdinfo 813 #define sd_prop_op ssd_prop_op 814 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 815 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 816 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 817 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 818 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 819 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 820 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 821 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 822 #define sd_spin_up_unit ssd_spin_up_unit 823 #define sd_enable_descr_sense ssd_enable_descr_sense 824 #define sd_reenable_dsense_task ssd_reenable_dsense_task 825 #define sd_set_mmc_caps ssd_set_mmc_caps 826 #define sd_read_unit_properties ssd_read_unit_properties 827 #define sd_process_sdconf_file ssd_process_sdconf_file 828 #define sd_process_sdconf_table ssd_process_sdconf_table 829 #define sd_sdconf_id_match ssd_sdconf_id_match 830 #define sd_blank_cmp ssd_blank_cmp 831 #define sd_chk_vers1_data ssd_chk_vers1_data 832 #define sd_set_vers1_properties ssd_set_vers1_properties 833 834 #define sd_get_physical_geometry ssd_get_physical_geometry 835 #define sd_get_virtual_geometry ssd_get_virtual_geometry 836 #define sd_update_block_info ssd_update_block_info 837 #define sd_register_devid ssd_register_devid 838 #define sd_get_devid ssd_get_devid 839 #define sd_create_devid ssd_create_devid 840 #define sd_write_deviceid ssd_write_deviceid 841 #define sd_check_vpd_page_support ssd_check_vpd_page_support 842 #define sd_setup_pm ssd_setup_pm 843 #define sd_create_pm_components ssd_create_pm_components 844 #define sd_ddi_suspend ssd_ddi_suspend 845 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 846 #define sd_ddi_resume ssd_ddi_resume 847 #define sd_ddi_pm_resume ssd_ddi_pm_resume 848 #define sdpower ssdpower 849 #define sdattach ssdattach 850 #define sddetach ssddetach 851 #define sd_unit_attach ssd_unit_attach 852 #define sd_unit_detach ssd_unit_detach 853 #define sd_set_unit_attributes ssd_set_unit_attributes 854 #define sd_create_errstats ssd_create_errstats 855 #define sd_set_errstats ssd_set_errstats 856 #define sd_set_pstats ssd_set_pstats 857 #define sddump ssddump 858 #define sd_scsi_poll ssd_scsi_poll 859 #define sd_send_polled_RQS ssd_send_polled_RQS 860 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 861 #define sd_init_event_callbacks ssd_init_event_callbacks 862 #define sd_event_callback ssd_event_callback 863 #define sd_cache_control ssd_cache_control 864 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 865 #define sd_make_device ssd_make_device 866 #define sdopen ssdopen 867 #define sdclose ssdclose 868 #define sd_ready_and_valid ssd_ready_and_valid 869 #define sdmin ssdmin 870 #define sdread ssdread 871 #define sdwrite ssdwrite 872 #define sdaread ssdaread 873 #define sdawrite ssdawrite 874 #define sdstrategy ssdstrategy 875 #define sdioctl ssdioctl 876 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 877 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 878 #define sd_checksum_iostart ssd_checksum_iostart 879 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 880 #define sd_pm_iostart ssd_pm_iostart 881 #define sd_core_iostart ssd_core_iostart 882 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 883 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 884 #define sd_checksum_iodone ssd_checksum_iodone 885 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 886 #define sd_pm_iodone ssd_pm_iodone 887 #define sd_initpkt_for_buf ssd_initpkt_for_buf 888 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 889 #define sd_setup_rw_pkt ssd_setup_rw_pkt 890 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 891 #define sd_buf_iodone ssd_buf_iodone 892 #define sd_uscsi_strategy ssd_uscsi_strategy 893 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 894 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 895 #define sd_uscsi_iodone ssd_uscsi_iodone 896 #define sd_xbuf_strategy ssd_xbuf_strategy 897 #define sd_xbuf_init ssd_xbuf_init 898 #define sd_pm_entry ssd_pm_entry 899 #define sd_pm_exit ssd_pm_exit 900 901 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 902 #define sd_pm_timeout_handler ssd_pm_timeout_handler 903 904 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 905 #define sdintr ssdintr 906 #define sd_start_cmds ssd_start_cmds 907 #define sd_send_scsi_cmd ssd_send_scsi_cmd 908 #define sd_bioclone_alloc ssd_bioclone_alloc 909 #define sd_bioclone_free ssd_bioclone_free 910 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 911 #define sd_shadow_buf_free ssd_shadow_buf_free 912 #define sd_print_transport_rejected_message \ 913 ssd_print_transport_rejected_message 914 #define sd_retry_command ssd_retry_command 915 #define sd_set_retry_bp ssd_set_retry_bp 916 #define sd_send_request_sense_command ssd_send_request_sense_command 917 #define sd_start_retry_command ssd_start_retry_command 918 #define sd_start_direct_priority_command \ 919 ssd_start_direct_priority_command 920 #define sd_return_failed_command ssd_return_failed_command 921 #define sd_return_failed_command_no_restart \ 922 ssd_return_failed_command_no_restart 923 #define sd_return_command ssd_return_command 924 #define sd_sync_with_callback ssd_sync_with_callback 925 #define sdrunout ssdrunout 926 #define sd_mark_rqs_busy ssd_mark_rqs_busy 927 #define sd_mark_rqs_idle ssd_mark_rqs_idle 928 #define sd_reduce_throttle ssd_reduce_throttle 929 #define sd_restore_throttle ssd_restore_throttle 930 #define sd_print_incomplete_msg ssd_print_incomplete_msg 931 #define sd_init_cdb_limits ssd_init_cdb_limits 932 #define sd_pkt_status_good ssd_pkt_status_good 933 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 934 #define sd_pkt_status_busy ssd_pkt_status_busy 935 #define sd_pkt_status_reservation_conflict \ 936 ssd_pkt_status_reservation_conflict 937 #define sd_pkt_status_qfull ssd_pkt_status_qfull 938 #define sd_handle_request_sense ssd_handle_request_sense 939 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 940 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 941 #define sd_validate_sense_data ssd_validate_sense_data 942 #define sd_decode_sense ssd_decode_sense 943 #define sd_print_sense_msg ssd_print_sense_msg 944 #define sd_sense_key_no_sense ssd_sense_key_no_sense 945 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 946 #define sd_sense_key_not_ready ssd_sense_key_not_ready 947 #define sd_sense_key_medium_or_hardware_error \ 948 ssd_sense_key_medium_or_hardware_error 949 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 950 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 951 #define sd_sense_key_fail_command ssd_sense_key_fail_command 952 #define sd_sense_key_blank_check ssd_sense_key_blank_check 953 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 954 #define sd_sense_key_default ssd_sense_key_default 955 #define sd_print_retry_msg ssd_print_retry_msg 956 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 957 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 958 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 959 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 960 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 961 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 962 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 963 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 964 #define sd_pkt_reason_default ssd_pkt_reason_default 965 #define sd_reset_target ssd_reset_target 966 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 967 #define sd_start_stop_unit_task ssd_start_stop_unit_task 968 #define sd_taskq_create ssd_taskq_create 969 #define sd_taskq_delete ssd_taskq_delete 970 #define sd_media_change_task ssd_media_change_task 971 #define sd_handle_mchange ssd_handle_mchange 972 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 973 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 974 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 975 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 976 #define sd_send_scsi_feature_GET_CONFIGURATION \ 977 sd_send_scsi_feature_GET_CONFIGURATION 978 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 979 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 980 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 981 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 982 ssd_send_scsi_PERSISTENT_RESERVE_IN 983 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 984 ssd_send_scsi_PERSISTENT_RESERVE_OUT 985 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 986 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 987 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 988 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 989 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 990 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 991 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 992 #define sd_alloc_rqs ssd_alloc_rqs 993 #define sd_free_rqs ssd_free_rqs 994 #define sd_dump_memory ssd_dump_memory 995 #define sd_get_media_info ssd_get_media_info 996 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 997 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 998 #define sd_setup_next_xfer ssd_setup_next_xfer 999 #define sd_dkio_get_temp ssd_dkio_get_temp 1000 #define sd_check_mhd ssd_check_mhd 1001 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1002 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1003 #define sd_sname ssd_sname 1004 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1005 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1006 #define sd_take_ownership ssd_take_ownership 1007 #define sd_reserve_release ssd_reserve_release 1008 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1009 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1010 #define sd_persistent_reservation_in_read_keys \ 1011 ssd_persistent_reservation_in_read_keys 1012 #define sd_persistent_reservation_in_read_resv \ 1013 ssd_persistent_reservation_in_read_resv 1014 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1015 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1016 #define sd_mhdioc_release ssd_mhdioc_release 1017 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1018 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1019 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1020 #define sr_change_blkmode ssr_change_blkmode 1021 #define sr_change_speed ssr_change_speed 1022 #define sr_atapi_change_speed ssr_atapi_change_speed 1023 #define sr_pause_resume ssr_pause_resume 1024 #define sr_play_msf ssr_play_msf 1025 #define sr_play_trkind ssr_play_trkind 1026 #define sr_read_all_subcodes ssr_read_all_subcodes 1027 #define sr_read_subchannel ssr_read_subchannel 1028 #define sr_read_tocentry ssr_read_tocentry 1029 #define sr_read_tochdr ssr_read_tochdr 1030 #define sr_read_cdda ssr_read_cdda 1031 #define sr_read_cdxa ssr_read_cdxa 1032 #define sr_read_mode1 ssr_read_mode1 1033 #define sr_read_mode2 ssr_read_mode2 1034 #define sr_read_cd_mode2 ssr_read_cd_mode2 1035 #define sr_sector_mode ssr_sector_mode 1036 #define sr_eject ssr_eject 1037 #define sr_ejected ssr_ejected 1038 #define sr_check_wp ssr_check_wp 1039 #define sd_check_media ssd_check_media 1040 #define sd_media_watch_cb ssd_media_watch_cb 1041 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1042 #define sr_volume_ctrl ssr_volume_ctrl 1043 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1044 #define sd_log_page_supported ssd_log_page_supported 1045 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1046 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1047 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1048 #define sd_range_lock ssd_range_lock 1049 #define sd_get_range ssd_get_range 1050 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1051 #define sd_range_unlock ssd_range_unlock 1052 #define sd_read_modify_write_task ssd_read_modify_write_task 1053 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1054 1055 #define sd_iostart_chain ssd_iostart_chain 1056 #define sd_iodone_chain ssd_iodone_chain 1057 #define sd_initpkt_map ssd_initpkt_map 1058 #define sd_destroypkt_map ssd_destroypkt_map 1059 #define sd_chain_type_map ssd_chain_type_map 1060 #define sd_chain_index_map ssd_chain_index_map 1061 1062 #define sd_failfast_flushctl ssd_failfast_flushctl 1063 #define sd_failfast_flushq ssd_failfast_flushq 1064 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1065 1066 #define sd_is_lsi ssd_is_lsi 1067 #define sd_tg_rdwr ssd_tg_rdwr 1068 #define sd_tg_getinfo ssd_tg_getinfo 1069 1070 #endif /* #if (defined(__fibre)) */ 1071 1072 1073 int _init(void); 1074 int _fini(void); 1075 int _info(struct modinfo *modinfop); 1076 1077 /*PRINTFLIKE3*/ 1078 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1079 /*PRINTFLIKE3*/ 1080 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1081 /*PRINTFLIKE3*/ 1082 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1083 1084 static int sdprobe(dev_info_t *devi); 1085 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1086 void **result); 1087 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1088 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1089 1090 /* 1091 * Smart probe for parallel scsi 1092 */ 1093 static void sd_scsi_probe_cache_init(void); 1094 static void sd_scsi_probe_cache_fini(void); 1095 static void sd_scsi_clear_probe_cache(void); 1096 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1097 1098 /* 1099 * Attached luns on target for parallel scsi 1100 */ 1101 static void sd_scsi_target_lun_init(void); 1102 static void sd_scsi_target_lun_fini(void); 1103 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1104 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1105 1106 static int sd_spin_up_unit(struct sd_lun *un); 1107 #ifdef _LP64 1108 static void sd_enable_descr_sense(struct sd_lun *un); 1109 static void sd_reenable_dsense_task(void *arg); 1110 #endif /* _LP64 */ 1111 1112 static void sd_set_mmc_caps(struct sd_lun *un); 1113 1114 static void sd_read_unit_properties(struct sd_lun *un); 1115 static int sd_process_sdconf_file(struct sd_lun *un); 1116 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1117 int *data_list, sd_tunables *values); 1118 static void sd_process_sdconf_table(struct sd_lun *un); 1119 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1120 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1121 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1122 int list_len, char *dataname_ptr); 1123 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1124 sd_tunables *prop_list); 1125 1126 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1127 int reservation_flag); 1128 static int sd_get_devid(struct sd_lun *un); 1129 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1130 static int sd_write_deviceid(struct sd_lun *un); 1131 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1132 static int sd_check_vpd_page_support(struct sd_lun *un); 1133 1134 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1135 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1136 1137 static int sd_ddi_suspend(dev_info_t *devi); 1138 static int sd_ddi_pm_suspend(struct sd_lun *un); 1139 static int sd_ddi_resume(dev_info_t *devi); 1140 static int sd_ddi_pm_resume(struct sd_lun *un); 1141 static int sdpower(dev_info_t *devi, int component, int level); 1142 1143 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1144 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1145 static int sd_unit_attach(dev_info_t *devi); 1146 static int sd_unit_detach(dev_info_t *devi); 1147 1148 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1149 static void sd_create_errstats(struct sd_lun *un, int instance); 1150 static void sd_set_errstats(struct sd_lun *un); 1151 static void sd_set_pstats(struct sd_lun *un); 1152 1153 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1154 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1155 static int sd_send_polled_RQS(struct sd_lun *un); 1156 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1157 1158 #if (defined(__fibre)) 1159 /* 1160 * Event callbacks (photon) 1161 */ 1162 static void sd_init_event_callbacks(struct sd_lun *un); 1163 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1164 #endif 1165 1166 /* 1167 * Defines for sd_cache_control 1168 */ 1169 1170 #define SD_CACHE_ENABLE 1 1171 #define SD_CACHE_DISABLE 0 1172 #define SD_CACHE_NOCHANGE -1 1173 1174 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1175 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1176 static dev_t sd_make_device(dev_info_t *devi); 1177 1178 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1179 uint64_t capacity); 1180 1181 /* 1182 * Driver entry point functions. 1183 */ 1184 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1185 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1186 static int sd_ready_and_valid(struct sd_lun *un); 1187 1188 static void sdmin(struct buf *bp); 1189 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1190 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1191 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1192 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1193 1194 static int sdstrategy(struct buf *bp); 1195 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1196 1197 /* 1198 * Function prototypes for layering functions in the iostart chain. 1199 */ 1200 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1201 struct buf *bp); 1202 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1203 struct buf *bp); 1204 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1205 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1206 struct buf *bp); 1207 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1208 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1209 1210 /* 1211 * Function prototypes for layering functions in the iodone chain. 1212 */ 1213 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1214 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1215 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1218 struct buf *bp); 1219 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1220 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1221 struct buf *bp); 1222 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1223 1224 /* 1225 * Prototypes for functions to support buf(9S) based IO. 1226 */ 1227 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1228 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1229 static void sd_destroypkt_for_buf(struct buf *); 1230 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1231 struct buf *bp, int flags, 1232 int (*callback)(caddr_t), caddr_t callback_arg, 1233 diskaddr_t lba, uint32_t blockcount); 1234 #if defined(__i386) || defined(__amd64) 1235 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1236 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1237 #endif /* defined(__i386) || defined(__amd64) */ 1238 1239 /* 1240 * Prototypes for functions to support USCSI IO. 1241 */ 1242 static int sd_uscsi_strategy(struct buf *bp); 1243 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1244 static void sd_destroypkt_for_uscsi(struct buf *); 1245 1246 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1247 uchar_t chain_type, void *pktinfop); 1248 1249 static int sd_pm_entry(struct sd_lun *un); 1250 static void sd_pm_exit(struct sd_lun *un); 1251 1252 static void sd_pm_idletimeout_handler(void *arg); 1253 1254 /* 1255 * sd_core internal functions (used at the sd_core_io layer). 1256 */ 1257 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1258 static void sdintr(struct scsi_pkt *pktp); 1259 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1260 1261 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1262 enum uio_seg dataspace, int path_flag); 1263 1264 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1265 daddr_t blkno, int (*func)(struct buf *)); 1266 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1267 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1268 static void sd_bioclone_free(struct buf *bp); 1269 static void sd_shadow_buf_free(struct buf *bp); 1270 1271 static void sd_print_transport_rejected_message(struct sd_lun *un, 1272 struct sd_xbuf *xp, int code); 1273 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1274 void *arg, int code); 1275 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1276 void *arg, int code); 1277 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1278 void *arg, int code); 1279 1280 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1281 int retry_check_flag, 1282 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1283 int c), 1284 void *user_arg, int failure_code, clock_t retry_delay, 1285 void (*statp)(kstat_io_t *)); 1286 1287 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1288 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1289 1290 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1291 struct scsi_pkt *pktp); 1292 static void sd_start_retry_command(void *arg); 1293 static void sd_start_direct_priority_command(void *arg); 1294 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1295 int errcode); 1296 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1297 struct buf *bp, int errcode); 1298 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1299 static void sd_sync_with_callback(struct sd_lun *un); 1300 static int sdrunout(caddr_t arg); 1301 1302 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1303 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1304 1305 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1306 static void sd_restore_throttle(void *arg); 1307 1308 static void sd_init_cdb_limits(struct sd_lun *un); 1309 1310 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1311 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1312 1313 /* 1314 * Error handling functions 1315 */ 1316 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1317 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1318 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1319 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1321 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1323 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1324 1325 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp); 1331 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1332 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 1334 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1335 void *arg, int code); 1336 1337 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1338 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1339 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1340 uint8_t *sense_datap, 1341 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_sense_key_not_ready(struct sd_lun *un, 1343 uint8_t *sense_datap, 1344 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1346 uint8_t *sense_datap, 1347 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1349 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_unit_attention(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1356 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1358 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1359 static void sd_sense_key_default(struct sd_lun *un, 1360 uint8_t *sense_datap, 1361 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 1363 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1364 void *arg, int flag); 1365 1366 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1377 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1379 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1380 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 1383 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1384 1385 static void sd_start_stop_unit_callback(void *arg); 1386 static void sd_start_stop_unit_task(void *arg); 1387 1388 static void sd_taskq_create(void); 1389 static void sd_taskq_delete(void); 1390 static void sd_media_change_task(void *arg); 1391 1392 static int sd_handle_mchange(struct sd_lun *un); 1393 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1394 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1395 uint32_t *lbap, int path_flag); 1396 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1397 uint32_t *lbap, int path_flag); 1398 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1399 int path_flag); 1400 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1401 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1402 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1403 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1404 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1405 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1406 uchar_t usr_cmd, uchar_t *usr_bufp); 1407 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1408 struct dk_callback *dkc); 1409 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1410 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1411 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1412 uchar_t *bufaddr, uint_t buflen, int path_flag); 1413 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1414 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1415 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1416 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1417 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1418 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1419 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1420 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1421 size_t buflen, daddr_t start_block, int path_flag); 1422 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1423 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1424 path_flag) 1425 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1426 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1427 path_flag) 1428 1429 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1430 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1431 uint16_t param_ptr, int path_flag); 1432 1433 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1434 static void sd_free_rqs(struct sd_lun *un); 1435 1436 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1437 uchar_t *data, int len, int fmt); 1438 static void sd_panic_for_res_conflict(struct sd_lun *un); 1439 1440 /* 1441 * Disk Ioctl Function Prototypes 1442 */ 1443 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1444 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1445 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1446 1447 /* 1448 * Multi-host Ioctl Prototypes 1449 */ 1450 static int sd_check_mhd(dev_t dev, int interval); 1451 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1452 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1453 static char *sd_sname(uchar_t status); 1454 static void sd_mhd_resvd_recover(void *arg); 1455 static void sd_resv_reclaim_thread(); 1456 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1457 static int sd_reserve_release(dev_t dev, int cmd); 1458 static void sd_rmv_resv_reclaim_req(dev_t dev); 1459 static void sd_mhd_reset_notify_cb(caddr_t arg); 1460 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1461 mhioc_inkeys_t *usrp, int flag); 1462 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1463 mhioc_inresvs_t *usrp, int flag); 1464 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1465 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1466 static int sd_mhdioc_release(dev_t dev); 1467 static int sd_mhdioc_register_devid(dev_t dev); 1468 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1469 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1470 1471 /* 1472 * SCSI removable prototypes 1473 */ 1474 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1476 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1477 static int sr_pause_resume(dev_t dev, int mode); 1478 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1479 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1489 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1490 static int sr_eject(dev_t dev); 1491 static void sr_ejected(register struct sd_lun *un); 1492 static int sr_check_wp(dev_t dev); 1493 static int sd_check_media(dev_t dev, enum dkio_state state); 1494 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1495 static void sd_delayed_cv_broadcast(void *arg); 1496 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1498 1499 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1500 1501 /* 1502 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1503 */ 1504 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1505 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1506 static void sd_wm_cache_destructor(void *wm, void *un); 1507 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1508 daddr_t endb, ushort_t typ); 1509 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1510 daddr_t endb); 1511 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1512 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1513 static void sd_read_modify_write_task(void * arg); 1514 static int 1515 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1516 struct buf **bpp); 1517 1518 1519 /* 1520 * Function prototypes for failfast support. 1521 */ 1522 static void sd_failfast_flushq(struct sd_lun *un); 1523 static int sd_failfast_flushq_callback(struct buf *bp); 1524 1525 /* 1526 * Function prototypes to check for lsi devices 1527 */ 1528 static void sd_is_lsi(struct sd_lun *un); 1529 1530 /* 1531 * Function prototypes for x86 support 1532 */ 1533 #if defined(__i386) || defined(__amd64) 1534 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1535 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1536 #endif 1537 1538 1539 /* Function prototypes for cmlb */ 1540 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1541 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1542 1543 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1544 1545 /* 1546 * Constants for failfast support: 1547 * 1548 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1549 * failfast processing being performed. 1550 * 1551 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1552 * failfast processing on all bufs with B_FAILFAST set. 1553 */ 1554 1555 #define SD_FAILFAST_INACTIVE 0 1556 #define SD_FAILFAST_ACTIVE 1 1557 1558 /* 1559 * Bitmask to control behavior of buf(9S) flushes when a transition to 1560 * the failfast state occurs. Optional bits include: 1561 * 1562 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1563 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1564 * be flushed. 1565 * 1566 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1567 * driver, in addition to the regular wait queue. This includes the xbuf 1568 * queues. When clear, only the driver's wait queue will be flushed. 1569 */ 1570 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1571 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1572 1573 /* 1574 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1575 * to flush all queues within the driver. 1576 */ 1577 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1578 1579 1580 /* 1581 * SD Testing Fault Injection 1582 */ 1583 #ifdef SD_FAULT_INJECTION 1584 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1585 static void sd_faultinjection(struct scsi_pkt *pktp); 1586 static void sd_injection_log(char *buf, struct sd_lun *un); 1587 #endif 1588 1589 /* 1590 * Device driver ops vector 1591 */ 1592 static struct cb_ops sd_cb_ops = { 1593 sdopen, /* open */ 1594 sdclose, /* close */ 1595 sdstrategy, /* strategy */ 1596 nodev, /* print */ 1597 sddump, /* dump */ 1598 sdread, /* read */ 1599 sdwrite, /* write */ 1600 sdioctl, /* ioctl */ 1601 nodev, /* devmap */ 1602 nodev, /* mmap */ 1603 nodev, /* segmap */ 1604 nochpoll, /* poll */ 1605 sd_prop_op, /* cb_prop_op */ 1606 0, /* streamtab */ 1607 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1608 CB_REV, /* cb_rev */ 1609 sdaread, /* async I/O read entry point */ 1610 sdawrite /* async I/O write entry point */ 1611 }; 1612 1613 static struct dev_ops sd_ops = { 1614 DEVO_REV, /* devo_rev, */ 1615 0, /* refcnt */ 1616 sdinfo, /* info */ 1617 nulldev, /* identify */ 1618 sdprobe, /* probe */ 1619 sdattach, /* attach */ 1620 sddetach, /* detach */ 1621 nodev, /* reset */ 1622 &sd_cb_ops, /* driver operations */ 1623 NULL, /* bus operations */ 1624 sdpower /* power */ 1625 }; 1626 1627 1628 /* 1629 * This is the loadable module wrapper. 1630 */ 1631 #include <sys/modctl.h> 1632 1633 static struct modldrv modldrv = { 1634 &mod_driverops, /* Type of module. This one is a driver */ 1635 SD_MODULE_NAME, /* Module name. */ 1636 &sd_ops /* driver ops */ 1637 }; 1638 1639 1640 static struct modlinkage modlinkage = { 1641 MODREV_1, 1642 &modldrv, 1643 NULL 1644 }; 1645 1646 static cmlb_tg_ops_t sd_tgops = { 1647 TG_DK_OPS_VERSION_1, 1648 sd_tg_rdwr, 1649 sd_tg_getinfo 1650 }; 1651 1652 static struct scsi_asq_key_strings sd_additional_codes[] = { 1653 0x81, 0, "Logical Unit is Reserved", 1654 0x85, 0, "Audio Address Not Valid", 1655 0xb6, 0, "Media Load Mechanism Failed", 1656 0xB9, 0, "Audio Play Operation Aborted", 1657 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1658 0x53, 2, "Medium removal prevented", 1659 0x6f, 0, "Authentication failed during key exchange", 1660 0x6f, 1, "Key not present", 1661 0x6f, 2, "Key not established", 1662 0x6f, 3, "Read without proper authentication", 1663 0x6f, 4, "Mismatched region to this logical unit", 1664 0x6f, 5, "Region reset count error", 1665 0xffff, 0x0, NULL 1666 }; 1667 1668 1669 /* 1670 * Struct for passing printing information for sense data messages 1671 */ 1672 struct sd_sense_info { 1673 int ssi_severity; 1674 int ssi_pfa_flag; 1675 }; 1676 1677 /* 1678 * Table of function pointers for iostart-side routines. Separate "chains" 1679 * of layered function calls are formed by placing the function pointers 1680 * sequentially in the desired order. Functions are called according to an 1681 * incrementing table index ordering. The last function in each chain must 1682 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1683 * in the sd_iodone_chain[] array. 1684 * 1685 * Note: It may seem more natural to organize both the iostart and iodone 1686 * functions together, into an array of structures (or some similar 1687 * organization) with a common index, rather than two separate arrays which 1688 * must be maintained in synchronization. The purpose of this division is 1689 * to achieve improved performance: individual arrays allows for more 1690 * effective cache line utilization on certain platforms. 1691 */ 1692 1693 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1694 1695 1696 static sd_chain_t sd_iostart_chain[] = { 1697 1698 /* Chain for buf IO for disk drive targets (PM enabled) */ 1699 sd_mapblockaddr_iostart, /* Index: 0 */ 1700 sd_pm_iostart, /* Index: 1 */ 1701 sd_core_iostart, /* Index: 2 */ 1702 1703 /* Chain for buf IO for disk drive targets (PM disabled) */ 1704 sd_mapblockaddr_iostart, /* Index: 3 */ 1705 sd_core_iostart, /* Index: 4 */ 1706 1707 /* Chain for buf IO for removable-media targets (PM enabled) */ 1708 sd_mapblockaddr_iostart, /* Index: 5 */ 1709 sd_mapblocksize_iostart, /* Index: 6 */ 1710 sd_pm_iostart, /* Index: 7 */ 1711 sd_core_iostart, /* Index: 8 */ 1712 1713 /* Chain for buf IO for removable-media targets (PM disabled) */ 1714 sd_mapblockaddr_iostart, /* Index: 9 */ 1715 sd_mapblocksize_iostart, /* Index: 10 */ 1716 sd_core_iostart, /* Index: 11 */ 1717 1718 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 12 */ 1720 sd_checksum_iostart, /* Index: 13 */ 1721 sd_pm_iostart, /* Index: 14 */ 1722 sd_core_iostart, /* Index: 15 */ 1723 1724 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1725 sd_mapblockaddr_iostart, /* Index: 16 */ 1726 sd_checksum_iostart, /* Index: 17 */ 1727 sd_core_iostart, /* Index: 18 */ 1728 1729 /* Chain for USCSI commands (all targets) */ 1730 sd_pm_iostart, /* Index: 19 */ 1731 sd_core_iostart, /* Index: 20 */ 1732 1733 /* Chain for checksumming USCSI commands (all targets) */ 1734 sd_checksum_uscsi_iostart, /* Index: 21 */ 1735 sd_pm_iostart, /* Index: 22 */ 1736 sd_core_iostart, /* Index: 23 */ 1737 1738 /* Chain for "direct" USCSI commands (all targets) */ 1739 sd_core_iostart, /* Index: 24 */ 1740 1741 /* Chain for "direct priority" USCSI commands (all targets) */ 1742 sd_core_iostart, /* Index: 25 */ 1743 }; 1744 1745 /* 1746 * Macros to locate the first function of each iostart chain in the 1747 * sd_iostart_chain[] array. These are located by the index in the array. 1748 */ 1749 #define SD_CHAIN_DISK_IOSTART 0 1750 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1751 #define SD_CHAIN_RMMEDIA_IOSTART 5 1752 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1753 #define SD_CHAIN_CHKSUM_IOSTART 12 1754 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1755 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1756 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1757 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1758 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1759 1760 1761 /* 1762 * Table of function pointers for the iodone-side routines for the driver- 1763 * internal layering mechanism. The calling sequence for iodone routines 1764 * uses a decrementing table index, so the last routine called in a chain 1765 * must be at the lowest array index location for that chain. The last 1766 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1767 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1768 * of the functions in an iodone side chain must correspond to the ordering 1769 * of the iostart routines for that chain. Note that there is no iodone 1770 * side routine that corresponds to sd_core_iostart(), so there is no 1771 * entry in the table for this. 1772 */ 1773 1774 static sd_chain_t sd_iodone_chain[] = { 1775 1776 /* Chain for buf IO for disk drive targets (PM enabled) */ 1777 sd_buf_iodone, /* Index: 0 */ 1778 sd_mapblockaddr_iodone, /* Index: 1 */ 1779 sd_pm_iodone, /* Index: 2 */ 1780 1781 /* Chain for buf IO for disk drive targets (PM disabled) */ 1782 sd_buf_iodone, /* Index: 3 */ 1783 sd_mapblockaddr_iodone, /* Index: 4 */ 1784 1785 /* Chain for buf IO for removable-media targets (PM enabled) */ 1786 sd_buf_iodone, /* Index: 5 */ 1787 sd_mapblockaddr_iodone, /* Index: 6 */ 1788 sd_mapblocksize_iodone, /* Index: 7 */ 1789 sd_pm_iodone, /* Index: 8 */ 1790 1791 /* Chain for buf IO for removable-media targets (PM disabled) */ 1792 sd_buf_iodone, /* Index: 9 */ 1793 sd_mapblockaddr_iodone, /* Index: 10 */ 1794 sd_mapblocksize_iodone, /* Index: 11 */ 1795 1796 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1797 sd_buf_iodone, /* Index: 12 */ 1798 sd_mapblockaddr_iodone, /* Index: 13 */ 1799 sd_checksum_iodone, /* Index: 14 */ 1800 sd_pm_iodone, /* Index: 15 */ 1801 1802 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1803 sd_buf_iodone, /* Index: 16 */ 1804 sd_mapblockaddr_iodone, /* Index: 17 */ 1805 sd_checksum_iodone, /* Index: 18 */ 1806 1807 /* Chain for USCSI commands (non-checksum targets) */ 1808 sd_uscsi_iodone, /* Index: 19 */ 1809 sd_pm_iodone, /* Index: 20 */ 1810 1811 /* Chain for USCSI commands (checksum targets) */ 1812 sd_uscsi_iodone, /* Index: 21 */ 1813 sd_checksum_uscsi_iodone, /* Index: 22 */ 1814 sd_pm_iodone, /* Index: 22 */ 1815 1816 /* Chain for "direct" USCSI commands (all targets) */ 1817 sd_uscsi_iodone, /* Index: 24 */ 1818 1819 /* Chain for "direct priority" USCSI commands (all targets) */ 1820 sd_uscsi_iodone, /* Index: 25 */ 1821 }; 1822 1823 1824 /* 1825 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1826 * each iodone-side chain. These are located by the array index, but as the 1827 * iodone side functions are called in a decrementing-index order, the 1828 * highest index number in each chain must be specified (as these correspond 1829 * to the first function in the iodone chain that will be called by the core 1830 * at IO completion time). 1831 */ 1832 1833 #define SD_CHAIN_DISK_IODONE 2 1834 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1835 #define SD_CHAIN_RMMEDIA_IODONE 8 1836 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1837 #define SD_CHAIN_CHKSUM_IODONE 15 1838 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1839 #define SD_CHAIN_USCSI_CMD_IODONE 20 1840 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1841 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1842 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1843 1844 1845 1846 1847 /* 1848 * Array to map a layering chain index to the appropriate initpkt routine. 1849 * The redundant entries are present so that the index used for accessing 1850 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1851 * with this table as well. 1852 */ 1853 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1854 1855 static sd_initpkt_t sd_initpkt_map[] = { 1856 1857 /* Chain for buf IO for disk drive targets (PM enabled) */ 1858 sd_initpkt_for_buf, /* Index: 0 */ 1859 sd_initpkt_for_buf, /* Index: 1 */ 1860 sd_initpkt_for_buf, /* Index: 2 */ 1861 1862 /* Chain for buf IO for disk drive targets (PM disabled) */ 1863 sd_initpkt_for_buf, /* Index: 3 */ 1864 sd_initpkt_for_buf, /* Index: 4 */ 1865 1866 /* Chain for buf IO for removable-media targets (PM enabled) */ 1867 sd_initpkt_for_buf, /* Index: 5 */ 1868 sd_initpkt_for_buf, /* Index: 6 */ 1869 sd_initpkt_for_buf, /* Index: 7 */ 1870 sd_initpkt_for_buf, /* Index: 8 */ 1871 1872 /* Chain for buf IO for removable-media targets (PM disabled) */ 1873 sd_initpkt_for_buf, /* Index: 9 */ 1874 sd_initpkt_for_buf, /* Index: 10 */ 1875 sd_initpkt_for_buf, /* Index: 11 */ 1876 1877 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1878 sd_initpkt_for_buf, /* Index: 12 */ 1879 sd_initpkt_for_buf, /* Index: 13 */ 1880 sd_initpkt_for_buf, /* Index: 14 */ 1881 sd_initpkt_for_buf, /* Index: 15 */ 1882 1883 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1884 sd_initpkt_for_buf, /* Index: 16 */ 1885 sd_initpkt_for_buf, /* Index: 17 */ 1886 sd_initpkt_for_buf, /* Index: 18 */ 1887 1888 /* Chain for USCSI commands (non-checksum targets) */ 1889 sd_initpkt_for_uscsi, /* Index: 19 */ 1890 sd_initpkt_for_uscsi, /* Index: 20 */ 1891 1892 /* Chain for USCSI commands (checksum targets) */ 1893 sd_initpkt_for_uscsi, /* Index: 21 */ 1894 sd_initpkt_for_uscsi, /* Index: 22 */ 1895 sd_initpkt_for_uscsi, /* Index: 22 */ 1896 1897 /* Chain for "direct" USCSI commands (all targets) */ 1898 sd_initpkt_for_uscsi, /* Index: 24 */ 1899 1900 /* Chain for "direct priority" USCSI commands (all targets) */ 1901 sd_initpkt_for_uscsi, /* Index: 25 */ 1902 1903 }; 1904 1905 1906 /* 1907 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1908 * The redundant entries are present so that the index used for accessing 1909 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1910 * with this table as well. 1911 */ 1912 typedef void (*sd_destroypkt_t)(struct buf *); 1913 1914 static sd_destroypkt_t sd_destroypkt_map[] = { 1915 1916 /* Chain for buf IO for disk drive targets (PM enabled) */ 1917 sd_destroypkt_for_buf, /* Index: 0 */ 1918 sd_destroypkt_for_buf, /* Index: 1 */ 1919 sd_destroypkt_for_buf, /* Index: 2 */ 1920 1921 /* Chain for buf IO for disk drive targets (PM disabled) */ 1922 sd_destroypkt_for_buf, /* Index: 3 */ 1923 sd_destroypkt_for_buf, /* Index: 4 */ 1924 1925 /* Chain for buf IO for removable-media targets (PM enabled) */ 1926 sd_destroypkt_for_buf, /* Index: 5 */ 1927 sd_destroypkt_for_buf, /* Index: 6 */ 1928 sd_destroypkt_for_buf, /* Index: 7 */ 1929 sd_destroypkt_for_buf, /* Index: 8 */ 1930 1931 /* Chain for buf IO for removable-media targets (PM disabled) */ 1932 sd_destroypkt_for_buf, /* Index: 9 */ 1933 sd_destroypkt_for_buf, /* Index: 10 */ 1934 sd_destroypkt_for_buf, /* Index: 11 */ 1935 1936 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1937 sd_destroypkt_for_buf, /* Index: 12 */ 1938 sd_destroypkt_for_buf, /* Index: 13 */ 1939 sd_destroypkt_for_buf, /* Index: 14 */ 1940 sd_destroypkt_for_buf, /* Index: 15 */ 1941 1942 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1943 sd_destroypkt_for_buf, /* Index: 16 */ 1944 sd_destroypkt_for_buf, /* Index: 17 */ 1945 sd_destroypkt_for_buf, /* Index: 18 */ 1946 1947 /* Chain for USCSI commands (non-checksum targets) */ 1948 sd_destroypkt_for_uscsi, /* Index: 19 */ 1949 sd_destroypkt_for_uscsi, /* Index: 20 */ 1950 1951 /* Chain for USCSI commands (checksum targets) */ 1952 sd_destroypkt_for_uscsi, /* Index: 21 */ 1953 sd_destroypkt_for_uscsi, /* Index: 22 */ 1954 sd_destroypkt_for_uscsi, /* Index: 22 */ 1955 1956 /* Chain for "direct" USCSI commands (all targets) */ 1957 sd_destroypkt_for_uscsi, /* Index: 24 */ 1958 1959 /* Chain for "direct priority" USCSI commands (all targets) */ 1960 sd_destroypkt_for_uscsi, /* Index: 25 */ 1961 1962 }; 1963 1964 1965 1966 /* 1967 * Array to map a layering chain index to the appropriate chain "type". 1968 * The chain type indicates a specific property/usage of the chain. 1969 * The redundant entries are present so that the index used for accessing 1970 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1971 * with this table as well. 1972 */ 1973 1974 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1975 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1976 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1977 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1978 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1979 /* (for error recovery) */ 1980 1981 static int sd_chain_type_map[] = { 1982 1983 /* Chain for buf IO for disk drive targets (PM enabled) */ 1984 SD_CHAIN_BUFIO, /* Index: 0 */ 1985 SD_CHAIN_BUFIO, /* Index: 1 */ 1986 SD_CHAIN_BUFIO, /* Index: 2 */ 1987 1988 /* Chain for buf IO for disk drive targets (PM disabled) */ 1989 SD_CHAIN_BUFIO, /* Index: 3 */ 1990 SD_CHAIN_BUFIO, /* Index: 4 */ 1991 1992 /* Chain for buf IO for removable-media targets (PM enabled) */ 1993 SD_CHAIN_BUFIO, /* Index: 5 */ 1994 SD_CHAIN_BUFIO, /* Index: 6 */ 1995 SD_CHAIN_BUFIO, /* Index: 7 */ 1996 SD_CHAIN_BUFIO, /* Index: 8 */ 1997 1998 /* Chain for buf IO for removable-media targets (PM disabled) */ 1999 SD_CHAIN_BUFIO, /* Index: 9 */ 2000 SD_CHAIN_BUFIO, /* Index: 10 */ 2001 SD_CHAIN_BUFIO, /* Index: 11 */ 2002 2003 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 12 */ 2005 SD_CHAIN_BUFIO, /* Index: 13 */ 2006 SD_CHAIN_BUFIO, /* Index: 14 */ 2007 SD_CHAIN_BUFIO, /* Index: 15 */ 2008 2009 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2010 SD_CHAIN_BUFIO, /* Index: 16 */ 2011 SD_CHAIN_BUFIO, /* Index: 17 */ 2012 SD_CHAIN_BUFIO, /* Index: 18 */ 2013 2014 /* Chain for USCSI commands (non-checksum targets) */ 2015 SD_CHAIN_USCSI, /* Index: 19 */ 2016 SD_CHAIN_USCSI, /* Index: 20 */ 2017 2018 /* Chain for USCSI commands (checksum targets) */ 2019 SD_CHAIN_USCSI, /* Index: 21 */ 2020 SD_CHAIN_USCSI, /* Index: 22 */ 2021 SD_CHAIN_USCSI, /* Index: 22 */ 2022 2023 /* Chain for "direct" USCSI commands (all targets) */ 2024 SD_CHAIN_DIRECT, /* Index: 24 */ 2025 2026 /* Chain for "direct priority" USCSI commands (all targets) */ 2027 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2028 }; 2029 2030 2031 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2032 #define SD_IS_BUFIO(xp) \ 2033 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2034 2035 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2036 #define SD_IS_DIRECT_PRIORITY(xp) \ 2037 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2038 2039 2040 2041 /* 2042 * Struct, array, and macros to map a specific chain to the appropriate 2043 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2044 * 2045 * The sd_chain_index_map[] array is used at attach time to set the various 2046 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2047 * chain to be used with the instance. This allows different instances to use 2048 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2049 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2050 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2051 * dynamically & without the use of locking; and (2) a layer may update the 2052 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2053 * to allow for deferred processing of an IO within the same chain from a 2054 * different execution context. 2055 */ 2056 2057 struct sd_chain_index { 2058 int sci_iostart_index; 2059 int sci_iodone_index; 2060 }; 2061 2062 static struct sd_chain_index sd_chain_index_map[] = { 2063 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2064 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2065 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2066 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2067 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2068 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2069 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2070 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2071 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2072 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2073 }; 2074 2075 2076 /* 2077 * The following are indexes into the sd_chain_index_map[] array. 2078 */ 2079 2080 /* un->un_buf_chain_type must be set to one of these */ 2081 #define SD_CHAIN_INFO_DISK 0 2082 #define SD_CHAIN_INFO_DISK_NO_PM 1 2083 #define SD_CHAIN_INFO_RMMEDIA 2 2084 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2085 #define SD_CHAIN_INFO_CHKSUM 4 2086 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2087 2088 /* un->un_uscsi_chain_type must be set to one of these */ 2089 #define SD_CHAIN_INFO_USCSI_CMD 6 2090 /* USCSI with PM disabled is the same as DIRECT */ 2091 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2092 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2093 2094 /* un->un_direct_chain_type must be set to one of these */ 2095 #define SD_CHAIN_INFO_DIRECT_CMD 8 2096 2097 /* un->un_priority_chain_type must be set to one of these */ 2098 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2099 2100 /* size for devid inquiries */ 2101 #define MAX_INQUIRY_SIZE 0xF0 2102 2103 /* 2104 * Macros used by functions to pass a given buf(9S) struct along to the 2105 * next function in the layering chain for further processing. 2106 * 2107 * In the following macros, passing more than three arguments to the called 2108 * routines causes the optimizer for the SPARC compiler to stop doing tail 2109 * call elimination which results in significant performance degradation. 2110 */ 2111 #define SD_BEGIN_IOSTART(index, un, bp) \ 2112 ((*(sd_iostart_chain[index]))(index, un, bp)) 2113 2114 #define SD_BEGIN_IODONE(index, un, bp) \ 2115 ((*(sd_iodone_chain[index]))(index, un, bp)) 2116 2117 #define SD_NEXT_IOSTART(index, un, bp) \ 2118 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2119 2120 #define SD_NEXT_IODONE(index, un, bp) \ 2121 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2122 2123 /* 2124 * Function: _init 2125 * 2126 * Description: This is the driver _init(9E) entry point. 2127 * 2128 * Return Code: Returns the value from mod_install(9F) or 2129 * ddi_soft_state_init(9F) as appropriate. 2130 * 2131 * Context: Called when driver module loaded. 2132 */ 2133 2134 int 2135 _init(void) 2136 { 2137 int err; 2138 2139 /* establish driver name from module name */ 2140 sd_label = mod_modname(&modlinkage); 2141 2142 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2143 SD_MAXUNIT); 2144 2145 if (err != 0) { 2146 return (err); 2147 } 2148 2149 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2150 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2151 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2152 2153 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2154 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2155 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2156 2157 /* 2158 * it's ok to init here even for fibre device 2159 */ 2160 sd_scsi_probe_cache_init(); 2161 2162 sd_scsi_target_lun_init(); 2163 2164 /* 2165 * Creating taskq before mod_install ensures that all callers (threads) 2166 * that enter the module after a successfull mod_install encounter 2167 * a valid taskq. 2168 */ 2169 sd_taskq_create(); 2170 2171 err = mod_install(&modlinkage); 2172 if (err != 0) { 2173 /* delete taskq if install fails */ 2174 sd_taskq_delete(); 2175 2176 mutex_destroy(&sd_detach_mutex); 2177 mutex_destroy(&sd_log_mutex); 2178 mutex_destroy(&sd_label_mutex); 2179 2180 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2181 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2182 cv_destroy(&sd_tr.srq_inprocess_cv); 2183 2184 sd_scsi_probe_cache_fini(); 2185 2186 sd_scsi_target_lun_fini(); 2187 2188 ddi_soft_state_fini(&sd_state); 2189 return (err); 2190 } 2191 2192 return (err); 2193 } 2194 2195 2196 /* 2197 * Function: _fini 2198 * 2199 * Description: This is the driver _fini(9E) entry point. 2200 * 2201 * Return Code: Returns the value from mod_remove(9F) 2202 * 2203 * Context: Called when driver module is unloaded. 2204 */ 2205 2206 int 2207 _fini(void) 2208 { 2209 int err; 2210 2211 if ((err = mod_remove(&modlinkage)) != 0) { 2212 return (err); 2213 } 2214 2215 sd_taskq_delete(); 2216 2217 mutex_destroy(&sd_detach_mutex); 2218 mutex_destroy(&sd_log_mutex); 2219 mutex_destroy(&sd_label_mutex); 2220 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2221 2222 sd_scsi_probe_cache_fini(); 2223 2224 sd_scsi_target_lun_fini(); 2225 2226 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2227 cv_destroy(&sd_tr.srq_inprocess_cv); 2228 2229 ddi_soft_state_fini(&sd_state); 2230 2231 return (err); 2232 } 2233 2234 2235 /* 2236 * Function: _info 2237 * 2238 * Description: This is the driver _info(9E) entry point. 2239 * 2240 * Arguments: modinfop - pointer to the driver modinfo structure 2241 * 2242 * Return Code: Returns the value from mod_info(9F). 2243 * 2244 * Context: Kernel thread context 2245 */ 2246 2247 int 2248 _info(struct modinfo *modinfop) 2249 { 2250 return (mod_info(&modlinkage, modinfop)); 2251 } 2252 2253 2254 /* 2255 * The following routines implement the driver message logging facility. 2256 * They provide component- and level- based debug output filtering. 2257 * Output may also be restricted to messages for a single instance by 2258 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2259 * to NULL, then messages for all instances are printed. 2260 * 2261 * These routines have been cloned from each other due to the language 2262 * constraints of macros and variable argument list processing. 2263 */ 2264 2265 2266 /* 2267 * Function: sd_log_err 2268 * 2269 * Description: This routine is called by the SD_ERROR macro for debug 2270 * logging of error conditions. 2271 * 2272 * Arguments: comp - driver component being logged 2273 * dev - pointer to driver info structure 2274 * fmt - error string and format to be logged 2275 */ 2276 2277 static void 2278 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2279 { 2280 va_list ap; 2281 dev_info_t *dev; 2282 2283 ASSERT(un != NULL); 2284 dev = SD_DEVINFO(un); 2285 ASSERT(dev != NULL); 2286 2287 /* 2288 * Filter messages based on the global component and level masks. 2289 * Also print if un matches the value of sd_debug_un, or if 2290 * sd_debug_un is set to NULL. 2291 */ 2292 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2293 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2294 mutex_enter(&sd_log_mutex); 2295 va_start(ap, fmt); 2296 (void) vsprintf(sd_log_buf, fmt, ap); 2297 va_end(ap); 2298 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2299 mutex_exit(&sd_log_mutex); 2300 } 2301 #ifdef SD_FAULT_INJECTION 2302 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2303 if (un->sd_injection_mask & comp) { 2304 mutex_enter(&sd_log_mutex); 2305 va_start(ap, fmt); 2306 (void) vsprintf(sd_log_buf, fmt, ap); 2307 va_end(ap); 2308 sd_injection_log(sd_log_buf, un); 2309 mutex_exit(&sd_log_mutex); 2310 } 2311 #endif 2312 } 2313 2314 2315 /* 2316 * Function: sd_log_info 2317 * 2318 * Description: This routine is called by the SD_INFO macro for debug 2319 * logging of general purpose informational conditions. 2320 * 2321 * Arguments: comp - driver component being logged 2322 * dev - pointer to driver info structure 2323 * fmt - info string and format to be logged 2324 */ 2325 2326 static void 2327 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2328 { 2329 va_list ap; 2330 dev_info_t *dev; 2331 2332 ASSERT(un != NULL); 2333 dev = SD_DEVINFO(un); 2334 ASSERT(dev != NULL); 2335 2336 /* 2337 * Filter messages based on the global component and level masks. 2338 * Also print if un matches the value of sd_debug_un, or if 2339 * sd_debug_un is set to NULL. 2340 */ 2341 if ((sd_component_mask & component) && 2342 (sd_level_mask & SD_LOGMASK_INFO) && 2343 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2344 mutex_enter(&sd_log_mutex); 2345 va_start(ap, fmt); 2346 (void) vsprintf(sd_log_buf, fmt, ap); 2347 va_end(ap); 2348 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2349 mutex_exit(&sd_log_mutex); 2350 } 2351 #ifdef SD_FAULT_INJECTION 2352 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2353 if (un->sd_injection_mask & component) { 2354 mutex_enter(&sd_log_mutex); 2355 va_start(ap, fmt); 2356 (void) vsprintf(sd_log_buf, fmt, ap); 2357 va_end(ap); 2358 sd_injection_log(sd_log_buf, un); 2359 mutex_exit(&sd_log_mutex); 2360 } 2361 #endif 2362 } 2363 2364 2365 /* 2366 * Function: sd_log_trace 2367 * 2368 * Description: This routine is called by the SD_TRACE macro for debug 2369 * logging of trace conditions (i.e. function entry/exit). 2370 * 2371 * Arguments: comp - driver component being logged 2372 * dev - pointer to driver info structure 2373 * fmt - trace string and format to be logged 2374 */ 2375 2376 static void 2377 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2378 { 2379 va_list ap; 2380 dev_info_t *dev; 2381 2382 ASSERT(un != NULL); 2383 dev = SD_DEVINFO(un); 2384 ASSERT(dev != NULL); 2385 2386 /* 2387 * Filter messages based on the global component and level masks. 2388 * Also print if un matches the value of sd_debug_un, or if 2389 * sd_debug_un is set to NULL. 2390 */ 2391 if ((sd_component_mask & component) && 2392 (sd_level_mask & SD_LOGMASK_TRACE) && 2393 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2394 mutex_enter(&sd_log_mutex); 2395 va_start(ap, fmt); 2396 (void) vsprintf(sd_log_buf, fmt, ap); 2397 va_end(ap); 2398 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2399 mutex_exit(&sd_log_mutex); 2400 } 2401 #ifdef SD_FAULT_INJECTION 2402 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2403 if (un->sd_injection_mask & component) { 2404 mutex_enter(&sd_log_mutex); 2405 va_start(ap, fmt); 2406 (void) vsprintf(sd_log_buf, fmt, ap); 2407 va_end(ap); 2408 sd_injection_log(sd_log_buf, un); 2409 mutex_exit(&sd_log_mutex); 2410 } 2411 #endif 2412 } 2413 2414 2415 /* 2416 * Function: sdprobe 2417 * 2418 * Description: This is the driver probe(9e) entry point function. 2419 * 2420 * Arguments: devi - opaque device info handle 2421 * 2422 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2423 * DDI_PROBE_FAILURE: If the probe failed. 2424 * DDI_PROBE_PARTIAL: If the instance is not present now, 2425 * but may be present in the future. 2426 */ 2427 2428 static int 2429 sdprobe(dev_info_t *devi) 2430 { 2431 struct scsi_device *devp; 2432 int rval; 2433 int instance; 2434 2435 /* 2436 * if it wasn't for pln, sdprobe could actually be nulldev 2437 * in the "__fibre" case. 2438 */ 2439 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2440 return (DDI_PROBE_DONTCARE); 2441 } 2442 2443 devp = ddi_get_driver_private(devi); 2444 2445 if (devp == NULL) { 2446 /* Ooops... nexus driver is mis-configured... */ 2447 return (DDI_PROBE_FAILURE); 2448 } 2449 2450 instance = ddi_get_instance(devi); 2451 2452 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2453 return (DDI_PROBE_PARTIAL); 2454 } 2455 2456 /* 2457 * Call the SCSA utility probe routine to see if we actually 2458 * have a target at this SCSI nexus. 2459 */ 2460 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2461 case SCSIPROBE_EXISTS: 2462 switch (devp->sd_inq->inq_dtype) { 2463 case DTYPE_DIRECT: 2464 rval = DDI_PROBE_SUCCESS; 2465 break; 2466 case DTYPE_RODIRECT: 2467 /* CDs etc. Can be removable media */ 2468 rval = DDI_PROBE_SUCCESS; 2469 break; 2470 case DTYPE_OPTICAL: 2471 /* 2472 * Rewritable optical driver HP115AA 2473 * Can also be removable media 2474 */ 2475 2476 /* 2477 * Do not attempt to bind to DTYPE_OPTICAL if 2478 * pre solaris 9 sparc sd behavior is required 2479 * 2480 * If first time through and sd_dtype_optical_bind 2481 * has not been set in /etc/system check properties 2482 */ 2483 2484 if (sd_dtype_optical_bind < 0) { 2485 sd_dtype_optical_bind = ddi_prop_get_int 2486 (DDI_DEV_T_ANY, devi, 0, 2487 "optical-device-bind", 1); 2488 } 2489 2490 if (sd_dtype_optical_bind == 0) { 2491 rval = DDI_PROBE_FAILURE; 2492 } else { 2493 rval = DDI_PROBE_SUCCESS; 2494 } 2495 break; 2496 2497 case DTYPE_NOTPRESENT: 2498 default: 2499 rval = DDI_PROBE_FAILURE; 2500 break; 2501 } 2502 break; 2503 default: 2504 rval = DDI_PROBE_PARTIAL; 2505 break; 2506 } 2507 2508 /* 2509 * This routine checks for resource allocation prior to freeing, 2510 * so it will take care of the "smart probing" case where a 2511 * scsi_probe() may or may not have been issued and will *not* 2512 * free previously-freed resources. 2513 */ 2514 scsi_unprobe(devp); 2515 return (rval); 2516 } 2517 2518 2519 /* 2520 * Function: sdinfo 2521 * 2522 * Description: This is the driver getinfo(9e) entry point function. 2523 * Given the device number, return the devinfo pointer from 2524 * the scsi_device structure or the instance number 2525 * associated with the dev_t. 2526 * 2527 * Arguments: dip - pointer to device info structure 2528 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2529 * DDI_INFO_DEVT2INSTANCE) 2530 * arg - driver dev_t 2531 * resultp - user buffer for request response 2532 * 2533 * Return Code: DDI_SUCCESS 2534 * DDI_FAILURE 2535 */ 2536 /* ARGSUSED */ 2537 static int 2538 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2539 { 2540 struct sd_lun *un; 2541 dev_t dev; 2542 int instance; 2543 int error; 2544 2545 switch (infocmd) { 2546 case DDI_INFO_DEVT2DEVINFO: 2547 dev = (dev_t)arg; 2548 instance = SDUNIT(dev); 2549 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2550 return (DDI_FAILURE); 2551 } 2552 *result = (void *) SD_DEVINFO(un); 2553 error = DDI_SUCCESS; 2554 break; 2555 case DDI_INFO_DEVT2INSTANCE: 2556 dev = (dev_t)arg; 2557 instance = SDUNIT(dev); 2558 *result = (void *)(uintptr_t)instance; 2559 error = DDI_SUCCESS; 2560 break; 2561 default: 2562 error = DDI_FAILURE; 2563 } 2564 return (error); 2565 } 2566 2567 /* 2568 * Function: sd_prop_op 2569 * 2570 * Description: This is the driver prop_op(9e) entry point function. 2571 * Return the number of blocks for the partition in question 2572 * or forward the request to the property facilities. 2573 * 2574 * Arguments: dev - device number 2575 * dip - pointer to device info structure 2576 * prop_op - property operator 2577 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2578 * name - pointer to property name 2579 * valuep - pointer or address of the user buffer 2580 * lengthp - property length 2581 * 2582 * Return Code: DDI_PROP_SUCCESS 2583 * DDI_PROP_NOT_FOUND 2584 * DDI_PROP_UNDEFINED 2585 * DDI_PROP_NO_MEMORY 2586 * DDI_PROP_BUF_TOO_SMALL 2587 */ 2588 2589 static int 2590 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2591 char *name, caddr_t valuep, int *lengthp) 2592 { 2593 int instance = ddi_get_instance(dip); 2594 struct sd_lun *un; 2595 uint64_t nblocks64; 2596 uint_t dblk; 2597 2598 /* 2599 * Our dynamic properties are all device specific and size oriented. 2600 * Requests issued under conditions where size is valid are passed 2601 * to ddi_prop_op_nblocks with the size information, otherwise the 2602 * request is passed to ddi_prop_op. Size depends on valid geometry. 2603 */ 2604 un = ddi_get_soft_state(sd_state, instance); 2605 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2606 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2607 name, valuep, lengthp)); 2608 } else if (!SD_IS_VALID_LABEL(un)) { 2609 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2610 valuep, lengthp)); 2611 } 2612 2613 /* get nblocks value */ 2614 ASSERT(!mutex_owned(SD_MUTEX(un))); 2615 2616 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2617 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2618 2619 /* report size in target size blocks */ 2620 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2621 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2622 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2623 } 2624 2625 /* 2626 * The following functions are for smart probing: 2627 * sd_scsi_probe_cache_init() 2628 * sd_scsi_probe_cache_fini() 2629 * sd_scsi_clear_probe_cache() 2630 * sd_scsi_probe_with_cache() 2631 */ 2632 2633 /* 2634 * Function: sd_scsi_probe_cache_init 2635 * 2636 * Description: Initializes the probe response cache mutex and head pointer. 2637 * 2638 * Context: Kernel thread context 2639 */ 2640 2641 static void 2642 sd_scsi_probe_cache_init(void) 2643 { 2644 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2645 sd_scsi_probe_cache_head = NULL; 2646 } 2647 2648 2649 /* 2650 * Function: sd_scsi_probe_cache_fini 2651 * 2652 * Description: Frees all resources associated with the probe response cache. 2653 * 2654 * Context: Kernel thread context 2655 */ 2656 2657 static void 2658 sd_scsi_probe_cache_fini(void) 2659 { 2660 struct sd_scsi_probe_cache *cp; 2661 struct sd_scsi_probe_cache *ncp; 2662 2663 /* Clean up our smart probing linked list */ 2664 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2665 ncp = cp->next; 2666 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2667 } 2668 sd_scsi_probe_cache_head = NULL; 2669 mutex_destroy(&sd_scsi_probe_cache_mutex); 2670 } 2671 2672 2673 /* 2674 * Function: sd_scsi_clear_probe_cache 2675 * 2676 * Description: This routine clears the probe response cache. This is 2677 * done when open() returns ENXIO so that when deferred 2678 * attach is attempted (possibly after a device has been 2679 * turned on) we will retry the probe. Since we don't know 2680 * which target we failed to open, we just clear the 2681 * entire cache. 2682 * 2683 * Context: Kernel thread context 2684 */ 2685 2686 static void 2687 sd_scsi_clear_probe_cache(void) 2688 { 2689 struct sd_scsi_probe_cache *cp; 2690 int i; 2691 2692 mutex_enter(&sd_scsi_probe_cache_mutex); 2693 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2694 /* 2695 * Reset all entries to SCSIPROBE_EXISTS. This will 2696 * force probing to be performed the next time 2697 * sd_scsi_probe_with_cache is called. 2698 */ 2699 for (i = 0; i < NTARGETS_WIDE; i++) { 2700 cp->cache[i] = SCSIPROBE_EXISTS; 2701 } 2702 } 2703 mutex_exit(&sd_scsi_probe_cache_mutex); 2704 } 2705 2706 2707 /* 2708 * Function: sd_scsi_probe_with_cache 2709 * 2710 * Description: This routine implements support for a scsi device probe 2711 * with cache. The driver maintains a cache of the target 2712 * responses to scsi probes. If we get no response from a 2713 * target during a probe inquiry, we remember that, and we 2714 * avoid additional calls to scsi_probe on non-zero LUNs 2715 * on the same target until the cache is cleared. By doing 2716 * so we avoid the 1/4 sec selection timeout for nonzero 2717 * LUNs. lun0 of a target is always probed. 2718 * 2719 * Arguments: devp - Pointer to a scsi_device(9S) structure 2720 * waitfunc - indicates what the allocator routines should 2721 * do when resources are not available. This value 2722 * is passed on to scsi_probe() when that routine 2723 * is called. 2724 * 2725 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2726 * otherwise the value returned by scsi_probe(9F). 2727 * 2728 * Context: Kernel thread context 2729 */ 2730 2731 static int 2732 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2733 { 2734 struct sd_scsi_probe_cache *cp; 2735 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2736 int lun, tgt; 2737 2738 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2739 SCSI_ADDR_PROP_LUN, 0); 2740 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2741 SCSI_ADDR_PROP_TARGET, -1); 2742 2743 /* Make sure caching enabled and target in range */ 2744 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2745 /* do it the old way (no cache) */ 2746 return (scsi_probe(devp, waitfn)); 2747 } 2748 2749 mutex_enter(&sd_scsi_probe_cache_mutex); 2750 2751 /* Find the cache for this scsi bus instance */ 2752 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2753 if (cp->pdip == pdip) { 2754 break; 2755 } 2756 } 2757 2758 /* If we can't find a cache for this pdip, create one */ 2759 if (cp == NULL) { 2760 int i; 2761 2762 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2763 KM_SLEEP); 2764 cp->pdip = pdip; 2765 cp->next = sd_scsi_probe_cache_head; 2766 sd_scsi_probe_cache_head = cp; 2767 for (i = 0; i < NTARGETS_WIDE; i++) { 2768 cp->cache[i] = SCSIPROBE_EXISTS; 2769 } 2770 } 2771 2772 mutex_exit(&sd_scsi_probe_cache_mutex); 2773 2774 /* Recompute the cache for this target if LUN zero */ 2775 if (lun == 0) { 2776 cp->cache[tgt] = SCSIPROBE_EXISTS; 2777 } 2778 2779 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2780 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2781 return (SCSIPROBE_NORESP); 2782 } 2783 2784 /* Do the actual probe; save & return the result */ 2785 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2786 } 2787 2788 2789 /* 2790 * Function: sd_scsi_target_lun_init 2791 * 2792 * Description: Initializes the attached lun chain mutex and head pointer. 2793 * 2794 * Context: Kernel thread context 2795 */ 2796 2797 static void 2798 sd_scsi_target_lun_init(void) 2799 { 2800 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2801 sd_scsi_target_lun_head = NULL; 2802 } 2803 2804 2805 /* 2806 * Function: sd_scsi_target_lun_fini 2807 * 2808 * Description: Frees all resources associated with the attached lun 2809 * chain 2810 * 2811 * Context: Kernel thread context 2812 */ 2813 2814 static void 2815 sd_scsi_target_lun_fini(void) 2816 { 2817 struct sd_scsi_hba_tgt_lun *cp; 2818 struct sd_scsi_hba_tgt_lun *ncp; 2819 2820 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2821 ncp = cp->next; 2822 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2823 } 2824 sd_scsi_target_lun_head = NULL; 2825 mutex_destroy(&sd_scsi_target_lun_mutex); 2826 } 2827 2828 2829 /* 2830 * Function: sd_scsi_get_target_lun_count 2831 * 2832 * Description: This routine will check in the attached lun chain to see 2833 * how many luns are attached on the required SCSI controller 2834 * and target. Currently, some capabilities like tagged queue 2835 * are supported per target based by HBA. So all luns in a 2836 * target have the same capabilities. Based on this assumption, 2837 * sd should only set these capabilities once per target. This 2838 * function is called when sd needs to decide how many luns 2839 * already attached on a target. 2840 * 2841 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2842 * controller device. 2843 * target - The target ID on the controller's SCSI bus. 2844 * 2845 * Return Code: The number of luns attached on the required target and 2846 * controller. 2847 * -1 if target ID is not in parallel SCSI scope or the given 2848 * dip is not in the chain. 2849 * 2850 * Context: Kernel thread context 2851 */ 2852 2853 static int 2854 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2855 { 2856 struct sd_scsi_hba_tgt_lun *cp; 2857 2858 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2859 return (-1); 2860 } 2861 2862 mutex_enter(&sd_scsi_target_lun_mutex); 2863 2864 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2865 if (cp->pdip == dip) { 2866 break; 2867 } 2868 } 2869 2870 mutex_exit(&sd_scsi_target_lun_mutex); 2871 2872 if (cp == NULL) { 2873 return (-1); 2874 } 2875 2876 return (cp->nlun[target]); 2877 } 2878 2879 2880 /* 2881 * Function: sd_scsi_update_lun_on_target 2882 * 2883 * Description: This routine is used to update the attached lun chain when a 2884 * lun is attached or detached on a target. 2885 * 2886 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2887 * controller device. 2888 * target - The target ID on the controller's SCSI bus. 2889 * flag - Indicate the lun is attached or detached. 2890 * 2891 * Context: Kernel thread context 2892 */ 2893 2894 static void 2895 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2896 { 2897 struct sd_scsi_hba_tgt_lun *cp; 2898 2899 mutex_enter(&sd_scsi_target_lun_mutex); 2900 2901 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2902 if (cp->pdip == dip) { 2903 break; 2904 } 2905 } 2906 2907 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2908 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2909 KM_SLEEP); 2910 cp->pdip = dip; 2911 cp->next = sd_scsi_target_lun_head; 2912 sd_scsi_target_lun_head = cp; 2913 } 2914 2915 mutex_exit(&sd_scsi_target_lun_mutex); 2916 2917 if (cp != NULL) { 2918 if (flag == SD_SCSI_LUN_ATTACH) { 2919 cp->nlun[target] ++; 2920 } else { 2921 cp->nlun[target] --; 2922 } 2923 } 2924 } 2925 2926 2927 /* 2928 * Function: sd_spin_up_unit 2929 * 2930 * Description: Issues the following commands to spin-up the device: 2931 * START STOP UNIT, and INQUIRY. 2932 * 2933 * Arguments: un - driver soft state (unit) structure 2934 * 2935 * Return Code: 0 - success 2936 * EIO - failure 2937 * EACCES - reservation conflict 2938 * 2939 * Context: Kernel thread context 2940 */ 2941 2942 static int 2943 sd_spin_up_unit(struct sd_lun *un) 2944 { 2945 size_t resid = 0; 2946 int has_conflict = FALSE; 2947 uchar_t *bufaddr; 2948 2949 ASSERT(un != NULL); 2950 2951 /* 2952 * Send a throwaway START UNIT command. 2953 * 2954 * If we fail on this, we don't care presently what precisely 2955 * is wrong. EMC's arrays will also fail this with a check 2956 * condition (0x2/0x4/0x3) if the device is "inactive," but 2957 * we don't want to fail the attach because it may become 2958 * "active" later. 2959 */ 2960 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2961 == EACCES) 2962 has_conflict = TRUE; 2963 2964 /* 2965 * Send another INQUIRY command to the target. This is necessary for 2966 * non-removable media direct access devices because their INQUIRY data 2967 * may not be fully qualified until they are spun up (perhaps via the 2968 * START command above). Note: This seems to be needed for some 2969 * legacy devices only.) The INQUIRY command should succeed even if a 2970 * Reservation Conflict is present. 2971 */ 2972 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2973 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2974 kmem_free(bufaddr, SUN_INQSIZE); 2975 return (EIO); 2976 } 2977 2978 /* 2979 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2980 * Note that this routine does not return a failure here even if the 2981 * INQUIRY command did not return any data. This is a legacy behavior. 2982 */ 2983 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2984 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2985 } 2986 2987 kmem_free(bufaddr, SUN_INQSIZE); 2988 2989 /* If we hit a reservation conflict above, tell the caller. */ 2990 if (has_conflict == TRUE) { 2991 return (EACCES); 2992 } 2993 2994 return (0); 2995 } 2996 2997 #ifdef _LP64 2998 /* 2999 * Function: sd_enable_descr_sense 3000 * 3001 * Description: This routine attempts to select descriptor sense format 3002 * using the Control mode page. Devices that support 64 bit 3003 * LBAs (for >2TB luns) should also implement descriptor 3004 * sense data so we will call this function whenever we see 3005 * a lun larger than 2TB. If for some reason the device 3006 * supports 64 bit LBAs but doesn't support descriptor sense 3007 * presumably the mode select will fail. Everything will 3008 * continue to work normally except that we will not get 3009 * complete sense data for commands that fail with an LBA 3010 * larger than 32 bits. 3011 * 3012 * Arguments: un - driver soft state (unit) structure 3013 * 3014 * Context: Kernel thread context only 3015 */ 3016 3017 static void 3018 sd_enable_descr_sense(struct sd_lun *un) 3019 { 3020 uchar_t *header; 3021 struct mode_control_scsi3 *ctrl_bufp; 3022 size_t buflen; 3023 size_t bd_len; 3024 3025 /* 3026 * Read MODE SENSE page 0xA, Control Mode Page 3027 */ 3028 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3029 sizeof (struct mode_control_scsi3); 3030 header = kmem_zalloc(buflen, KM_SLEEP); 3031 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3032 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3033 SD_ERROR(SD_LOG_COMMON, un, 3034 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3035 goto eds_exit; 3036 } 3037 3038 /* 3039 * Determine size of Block Descriptors in order to locate 3040 * the mode page data. ATAPI devices return 0, SCSI devices 3041 * should return MODE_BLK_DESC_LENGTH. 3042 */ 3043 bd_len = ((struct mode_header *)header)->bdesc_length; 3044 3045 /* Clear the mode data length field for MODE SELECT */ 3046 ((struct mode_header *)header)->length = 0; 3047 3048 ctrl_bufp = (struct mode_control_scsi3 *) 3049 (header + MODE_HEADER_LENGTH + bd_len); 3050 3051 /* 3052 * If the page length is smaller than the expected value, 3053 * the target device doesn't support D_SENSE. Bail out here. 3054 */ 3055 if (ctrl_bufp->mode_page.length < 3056 sizeof (struct mode_control_scsi3) - 2) { 3057 SD_ERROR(SD_LOG_COMMON, un, 3058 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3059 goto eds_exit; 3060 } 3061 3062 /* 3063 * Clear PS bit for MODE SELECT 3064 */ 3065 ctrl_bufp->mode_page.ps = 0; 3066 3067 /* 3068 * Set D_SENSE to enable descriptor sense format. 3069 */ 3070 ctrl_bufp->d_sense = 1; 3071 3072 /* 3073 * Use MODE SELECT to commit the change to the D_SENSE bit 3074 */ 3075 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3076 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3077 SD_INFO(SD_LOG_COMMON, un, 3078 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3079 goto eds_exit; 3080 } 3081 3082 eds_exit: 3083 kmem_free(header, buflen); 3084 } 3085 3086 /* 3087 * Function: sd_reenable_dsense_task 3088 * 3089 * Description: Re-enable descriptor sense after device or bus reset 3090 * 3091 * Context: Executes in a taskq() thread context 3092 */ 3093 static void 3094 sd_reenable_dsense_task(void *arg) 3095 { 3096 struct sd_lun *un = arg; 3097 3098 ASSERT(un != NULL); 3099 sd_enable_descr_sense(un); 3100 } 3101 #endif /* _LP64 */ 3102 3103 /* 3104 * Function: sd_set_mmc_caps 3105 * 3106 * Description: This routine determines if the device is MMC compliant and if 3107 * the device supports CDDA via a mode sense of the CDVD 3108 * capabilities mode page. Also checks if the device is a 3109 * dvdram writable device. 3110 * 3111 * Arguments: un - driver soft state (unit) structure 3112 * 3113 * Context: Kernel thread context only 3114 */ 3115 3116 static void 3117 sd_set_mmc_caps(struct sd_lun *un) 3118 { 3119 struct mode_header_grp2 *sense_mhp; 3120 uchar_t *sense_page; 3121 caddr_t buf; 3122 int bd_len; 3123 int status; 3124 struct uscsi_cmd com; 3125 int rtn; 3126 uchar_t *out_data_rw, *out_data_hd; 3127 uchar_t *rqbuf_rw, *rqbuf_hd; 3128 3129 ASSERT(un != NULL); 3130 3131 /* 3132 * The flags which will be set in this function are - mmc compliant, 3133 * dvdram writable device, cdda support. Initialize them to FALSE 3134 * and if a capability is detected - it will be set to TRUE. 3135 */ 3136 un->un_f_mmc_cap = FALSE; 3137 un->un_f_dvdram_writable_device = FALSE; 3138 un->un_f_cfg_cdda = FALSE; 3139 3140 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3141 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3142 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3143 3144 if (status != 0) { 3145 /* command failed; just return */ 3146 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3147 return; 3148 } 3149 /* 3150 * If the mode sense request for the CDROM CAPABILITIES 3151 * page (0x2A) succeeds the device is assumed to be MMC. 3152 */ 3153 un->un_f_mmc_cap = TRUE; 3154 3155 /* Get to the page data */ 3156 sense_mhp = (struct mode_header_grp2 *)buf; 3157 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3158 sense_mhp->bdesc_length_lo; 3159 if (bd_len > MODE_BLK_DESC_LENGTH) { 3160 /* 3161 * We did not get back the expected block descriptor 3162 * length so we cannot determine if the device supports 3163 * CDDA. However, we still indicate the device is MMC 3164 * according to the successful response to the page 3165 * 0x2A mode sense request. 3166 */ 3167 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3168 "sd_set_mmc_caps: Mode Sense returned " 3169 "invalid block descriptor length\n"); 3170 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3171 return; 3172 } 3173 3174 /* See if read CDDA is supported */ 3175 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3176 bd_len); 3177 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3178 3179 /* See if writing DVD RAM is supported. */ 3180 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3181 if (un->un_f_dvdram_writable_device == TRUE) { 3182 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3183 return; 3184 } 3185 3186 /* 3187 * If the device presents DVD or CD capabilities in the mode 3188 * page, we can return here since a RRD will not have 3189 * these capabilities. 3190 */ 3191 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3192 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3193 return; 3194 } 3195 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3196 3197 /* 3198 * If un->un_f_dvdram_writable_device is still FALSE, 3199 * check for a Removable Rigid Disk (RRD). A RRD 3200 * device is identified by the features RANDOM_WRITABLE and 3201 * HARDWARE_DEFECT_MANAGEMENT. 3202 */ 3203 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3204 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3205 3206 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3207 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3208 RANDOM_WRITABLE, SD_PATH_STANDARD); 3209 if (rtn != 0) { 3210 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3211 kmem_free(rqbuf_rw, SENSE_LENGTH); 3212 return; 3213 } 3214 3215 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3216 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3217 3218 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3219 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3220 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3221 if (rtn == 0) { 3222 /* 3223 * We have good information, check for random writable 3224 * and hardware defect features. 3225 */ 3226 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3227 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3228 un->un_f_dvdram_writable_device = TRUE; 3229 } 3230 } 3231 3232 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3233 kmem_free(rqbuf_rw, SENSE_LENGTH); 3234 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3235 kmem_free(rqbuf_hd, SENSE_LENGTH); 3236 } 3237 3238 /* 3239 * Function: sd_check_for_writable_cd 3240 * 3241 * Description: This routine determines if the media in the device is 3242 * writable or not. It uses the get configuration command (0x46) 3243 * to determine if the media is writable 3244 * 3245 * Arguments: un - driver soft state (unit) structure 3246 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3247 * chain and the normal command waitq, or 3248 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3249 * "direct" chain and bypass the normal command 3250 * waitq. 3251 * 3252 * Context: Never called at interrupt context. 3253 */ 3254 3255 static void 3256 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3257 { 3258 struct uscsi_cmd com; 3259 uchar_t *out_data; 3260 uchar_t *rqbuf; 3261 int rtn; 3262 uchar_t *out_data_rw, *out_data_hd; 3263 uchar_t *rqbuf_rw, *rqbuf_hd; 3264 struct mode_header_grp2 *sense_mhp; 3265 uchar_t *sense_page; 3266 caddr_t buf; 3267 int bd_len; 3268 int status; 3269 3270 ASSERT(un != NULL); 3271 ASSERT(mutex_owned(SD_MUTEX(un))); 3272 3273 /* 3274 * Initialize the writable media to false, if configuration info. 3275 * tells us otherwise then only we will set it. 3276 */ 3277 un->un_f_mmc_writable_media = FALSE; 3278 mutex_exit(SD_MUTEX(un)); 3279 3280 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3281 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3282 3283 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3284 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3285 3286 mutex_enter(SD_MUTEX(un)); 3287 if (rtn == 0) { 3288 /* 3289 * We have good information, check for writable DVD. 3290 */ 3291 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3292 un->un_f_mmc_writable_media = TRUE; 3293 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3294 kmem_free(rqbuf, SENSE_LENGTH); 3295 return; 3296 } 3297 } 3298 3299 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3300 kmem_free(rqbuf, SENSE_LENGTH); 3301 3302 /* 3303 * Determine if this is a RRD type device. 3304 */ 3305 mutex_exit(SD_MUTEX(un)); 3306 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3307 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3308 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3309 mutex_enter(SD_MUTEX(un)); 3310 if (status != 0) { 3311 /* command failed; just return */ 3312 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3313 return; 3314 } 3315 3316 /* Get to the page data */ 3317 sense_mhp = (struct mode_header_grp2 *)buf; 3318 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3319 if (bd_len > MODE_BLK_DESC_LENGTH) { 3320 /* 3321 * We did not get back the expected block descriptor length so 3322 * we cannot check the mode page. 3323 */ 3324 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3325 "sd_check_for_writable_cd: Mode Sense returned " 3326 "invalid block descriptor length\n"); 3327 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3328 return; 3329 } 3330 3331 /* 3332 * If the device presents DVD or CD capabilities in the mode 3333 * page, we can return here since a RRD device will not have 3334 * these capabilities. 3335 */ 3336 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3337 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3338 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3339 return; 3340 } 3341 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3342 3343 /* 3344 * If un->un_f_mmc_writable_media is still FALSE, 3345 * check for RRD type media. A RRD device is identified 3346 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3347 */ 3348 mutex_exit(SD_MUTEX(un)); 3349 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3350 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3351 3352 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3353 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3354 RANDOM_WRITABLE, path_flag); 3355 if (rtn != 0) { 3356 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3357 kmem_free(rqbuf_rw, SENSE_LENGTH); 3358 mutex_enter(SD_MUTEX(un)); 3359 return; 3360 } 3361 3362 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3363 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3364 3365 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3366 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3367 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3368 mutex_enter(SD_MUTEX(un)); 3369 if (rtn == 0) { 3370 /* 3371 * We have good information, check for random writable 3372 * and hardware defect features as current. 3373 */ 3374 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3375 (out_data_rw[10] & 0x1) && 3376 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3377 (out_data_hd[10] & 0x1)) { 3378 un->un_f_mmc_writable_media = TRUE; 3379 } 3380 } 3381 3382 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3383 kmem_free(rqbuf_rw, SENSE_LENGTH); 3384 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3385 kmem_free(rqbuf_hd, SENSE_LENGTH); 3386 } 3387 3388 /* 3389 * Function: sd_read_unit_properties 3390 * 3391 * Description: The following implements a property lookup mechanism. 3392 * Properties for particular disks (keyed on vendor, model 3393 * and rev numbers) are sought in the sd.conf file via 3394 * sd_process_sdconf_file(), and if not found there, are 3395 * looked for in a list hardcoded in this driver via 3396 * sd_process_sdconf_table() Once located the properties 3397 * are used to update the driver unit structure. 3398 * 3399 * Arguments: un - driver soft state (unit) structure 3400 */ 3401 3402 static void 3403 sd_read_unit_properties(struct sd_lun *un) 3404 { 3405 /* 3406 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3407 * the "sd-config-list" property (from the sd.conf file) or if 3408 * there was not a match for the inquiry vid/pid. If this event 3409 * occurs the static driver configuration table is searched for 3410 * a match. 3411 */ 3412 ASSERT(un != NULL); 3413 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3414 sd_process_sdconf_table(un); 3415 } 3416 3417 /* check for LSI device */ 3418 sd_is_lsi(un); 3419 3420 3421 } 3422 3423 3424 /* 3425 * Function: sd_process_sdconf_file 3426 * 3427 * Description: Use ddi_getlongprop to obtain the properties from the 3428 * driver's config file (ie, sd.conf) and update the driver 3429 * soft state structure accordingly. 3430 * 3431 * Arguments: un - driver soft state (unit) structure 3432 * 3433 * Return Code: SD_SUCCESS - The properties were successfully set according 3434 * to the driver configuration file. 3435 * SD_FAILURE - The driver config list was not obtained or 3436 * there was no vid/pid match. This indicates that 3437 * the static config table should be used. 3438 * 3439 * The config file has a property, "sd-config-list", which consists of 3440 * one or more duplets as follows: 3441 * 3442 * sd-config-list= 3443 * <duplet>, 3444 * [<duplet>,] 3445 * [<duplet>]; 3446 * 3447 * The structure of each duplet is as follows: 3448 * 3449 * <duplet>:= <vid+pid>,<data-property-name_list> 3450 * 3451 * The first entry of the duplet is the device ID string (the concatenated 3452 * vid & pid; not to be confused with a device_id). This is defined in 3453 * the same way as in the sd_disk_table. 3454 * 3455 * The second part of the duplet is a string that identifies a 3456 * data-property-name-list. The data-property-name-list is defined as 3457 * follows: 3458 * 3459 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3460 * 3461 * The syntax of <data-property-name> depends on the <version> field. 3462 * 3463 * If version = SD_CONF_VERSION_1 we have the following syntax: 3464 * 3465 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3466 * 3467 * where the prop0 value will be used to set prop0 if bit0 set in the 3468 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3469 * 3470 */ 3471 3472 static int 3473 sd_process_sdconf_file(struct sd_lun *un) 3474 { 3475 char *config_list = NULL; 3476 int config_list_len; 3477 int len; 3478 int dupletlen = 0; 3479 char *vidptr; 3480 int vidlen; 3481 char *dnlist_ptr; 3482 char *dataname_ptr; 3483 int dnlist_len; 3484 int dataname_len; 3485 int *data_list; 3486 int data_list_len; 3487 int rval = SD_FAILURE; 3488 int i; 3489 3490 ASSERT(un != NULL); 3491 3492 /* Obtain the configuration list associated with the .conf file */ 3493 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3494 sd_config_list, (caddr_t)&config_list, &config_list_len) 3495 != DDI_PROP_SUCCESS) { 3496 return (SD_FAILURE); 3497 } 3498 3499 /* 3500 * Compare vids in each duplet to the inquiry vid - if a match is 3501 * made, get the data value and update the soft state structure 3502 * accordingly. 3503 * 3504 * Note: This algorithm is complex and difficult to maintain. It should 3505 * be replaced with a more robust implementation. 3506 */ 3507 for (len = config_list_len, vidptr = config_list; len > 0; 3508 vidptr += dupletlen, len -= dupletlen) { 3509 /* 3510 * Note: The assumption here is that each vid entry is on 3511 * a unique line from its associated duplet. 3512 */ 3513 vidlen = dupletlen = (int)strlen(vidptr); 3514 if ((vidlen == 0) || 3515 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3516 dupletlen++; 3517 continue; 3518 } 3519 3520 /* 3521 * dnlist contains 1 or more blank separated 3522 * data-property-name entries 3523 */ 3524 dnlist_ptr = vidptr + vidlen + 1; 3525 dnlist_len = (int)strlen(dnlist_ptr); 3526 dupletlen += dnlist_len + 2; 3527 3528 /* 3529 * Set a pointer for the first data-property-name 3530 * entry in the list 3531 */ 3532 dataname_ptr = dnlist_ptr; 3533 dataname_len = 0; 3534 3535 /* 3536 * Loop through all data-property-name entries in the 3537 * data-property-name-list setting the properties for each. 3538 */ 3539 while (dataname_len < dnlist_len) { 3540 int version; 3541 3542 /* 3543 * Determine the length of the current 3544 * data-property-name entry by indexing until a 3545 * blank or NULL is encountered. When the space is 3546 * encountered reset it to a NULL for compliance 3547 * with ddi_getlongprop(). 3548 */ 3549 for (i = 0; ((dataname_ptr[i] != ' ') && 3550 (dataname_ptr[i] != '\0')); i++) { 3551 ; 3552 } 3553 3554 dataname_len += i; 3555 /* If not null terminated, Make it so */ 3556 if (dataname_ptr[i] == ' ') { 3557 dataname_ptr[i] = '\0'; 3558 } 3559 dataname_len++; 3560 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3561 "sd_process_sdconf_file: disk:%s, data:%s\n", 3562 vidptr, dataname_ptr); 3563 3564 /* Get the data list */ 3565 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3566 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3567 != DDI_PROP_SUCCESS) { 3568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3569 "sd_process_sdconf_file: data property (%s)" 3570 " has no value\n", dataname_ptr); 3571 dataname_ptr = dnlist_ptr + dataname_len; 3572 continue; 3573 } 3574 3575 version = data_list[0]; 3576 3577 if (version == SD_CONF_VERSION_1) { 3578 sd_tunables values; 3579 3580 /* Set the properties */ 3581 if (sd_chk_vers1_data(un, data_list[1], 3582 &data_list[2], data_list_len, dataname_ptr) 3583 == SD_SUCCESS) { 3584 sd_get_tunables_from_conf(un, 3585 data_list[1], &data_list[2], 3586 &values); 3587 sd_set_vers1_properties(un, 3588 data_list[1], &values); 3589 rval = SD_SUCCESS; 3590 } else { 3591 rval = SD_FAILURE; 3592 } 3593 } else { 3594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3595 "data property %s version 0x%x is invalid.", 3596 dataname_ptr, version); 3597 rval = SD_FAILURE; 3598 } 3599 kmem_free(data_list, data_list_len); 3600 dataname_ptr = dnlist_ptr + dataname_len; 3601 } 3602 } 3603 3604 /* free up the memory allocated by ddi_getlongprop */ 3605 if (config_list) { 3606 kmem_free(config_list, config_list_len); 3607 } 3608 3609 return (rval); 3610 } 3611 3612 /* 3613 * Function: sd_get_tunables_from_conf() 3614 * 3615 * 3616 * This function reads the data list from the sd.conf file and pulls 3617 * the values that can have numeric values as arguments and places 3618 * the values in the appropriate sd_tunables member. 3619 * Since the order of the data list members varies across platforms 3620 * This function reads them from the data list in a platform specific 3621 * order and places them into the correct sd_tunable member that is 3622 * consistent across all platforms. 3623 */ 3624 static void 3625 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3626 sd_tunables *values) 3627 { 3628 int i; 3629 int mask; 3630 3631 bzero(values, sizeof (sd_tunables)); 3632 3633 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3634 3635 mask = 1 << i; 3636 if (mask > flags) { 3637 break; 3638 } 3639 3640 switch (mask & flags) { 3641 case 0: /* This mask bit not set in flags */ 3642 continue; 3643 case SD_CONF_BSET_THROTTLE: 3644 values->sdt_throttle = data_list[i]; 3645 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3646 "sd_get_tunables_from_conf: throttle = %d\n", 3647 values->sdt_throttle); 3648 break; 3649 case SD_CONF_BSET_CTYPE: 3650 values->sdt_ctype = data_list[i]; 3651 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3652 "sd_get_tunables_from_conf: ctype = %d\n", 3653 values->sdt_ctype); 3654 break; 3655 case SD_CONF_BSET_NRR_COUNT: 3656 values->sdt_not_rdy_retries = data_list[i]; 3657 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3658 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3659 values->sdt_not_rdy_retries); 3660 break; 3661 case SD_CONF_BSET_BSY_RETRY_COUNT: 3662 values->sdt_busy_retries = data_list[i]; 3663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3664 "sd_get_tunables_from_conf: busy_retries = %d\n", 3665 values->sdt_busy_retries); 3666 break; 3667 case SD_CONF_BSET_RST_RETRIES: 3668 values->sdt_reset_retries = data_list[i]; 3669 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3670 "sd_get_tunables_from_conf: reset_retries = %d\n", 3671 values->sdt_reset_retries); 3672 break; 3673 case SD_CONF_BSET_RSV_REL_TIME: 3674 values->sdt_reserv_rel_time = data_list[i]; 3675 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3676 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3677 values->sdt_reserv_rel_time); 3678 break; 3679 case SD_CONF_BSET_MIN_THROTTLE: 3680 values->sdt_min_throttle = data_list[i]; 3681 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3682 "sd_get_tunables_from_conf: min_throttle = %d\n", 3683 values->sdt_min_throttle); 3684 break; 3685 case SD_CONF_BSET_DISKSORT_DISABLED: 3686 values->sdt_disk_sort_dis = data_list[i]; 3687 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3688 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3689 values->sdt_disk_sort_dis); 3690 break; 3691 case SD_CONF_BSET_LUN_RESET_ENABLED: 3692 values->sdt_lun_reset_enable = data_list[i]; 3693 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3694 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3695 "\n", values->sdt_lun_reset_enable); 3696 break; 3697 } 3698 } 3699 } 3700 3701 /* 3702 * Function: sd_process_sdconf_table 3703 * 3704 * Description: Search the static configuration table for a match on the 3705 * inquiry vid/pid and update the driver soft state structure 3706 * according to the table property values for the device. 3707 * 3708 * The form of a configuration table entry is: 3709 * <vid+pid>,<flags>,<property-data> 3710 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3711 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3712 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3713 * 3714 * Arguments: un - driver soft state (unit) structure 3715 */ 3716 3717 static void 3718 sd_process_sdconf_table(struct sd_lun *un) 3719 { 3720 char *id = NULL; 3721 int table_index; 3722 int idlen; 3723 3724 ASSERT(un != NULL); 3725 for (table_index = 0; table_index < sd_disk_table_size; 3726 table_index++) { 3727 id = sd_disk_table[table_index].device_id; 3728 idlen = strlen(id); 3729 if (idlen == 0) { 3730 continue; 3731 } 3732 3733 /* 3734 * The static configuration table currently does not 3735 * implement version 10 properties. Additionally, 3736 * multiple data-property-name entries are not 3737 * implemented in the static configuration table. 3738 */ 3739 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3740 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3741 "sd_process_sdconf_table: disk %s\n", id); 3742 sd_set_vers1_properties(un, 3743 sd_disk_table[table_index].flags, 3744 sd_disk_table[table_index].properties); 3745 break; 3746 } 3747 } 3748 } 3749 3750 3751 /* 3752 * Function: sd_sdconf_id_match 3753 * 3754 * Description: This local function implements a case sensitive vid/pid 3755 * comparison as well as the boundary cases of wild card and 3756 * multiple blanks. 3757 * 3758 * Note: An implicit assumption made here is that the scsi 3759 * inquiry structure will always keep the vid, pid and 3760 * revision strings in consecutive sequence, so they can be 3761 * read as a single string. If this assumption is not the 3762 * case, a separate string, to be used for the check, needs 3763 * to be built with these strings concatenated. 3764 * 3765 * Arguments: un - driver soft state (unit) structure 3766 * id - table or config file vid/pid 3767 * idlen - length of the vid/pid (bytes) 3768 * 3769 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3770 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3771 */ 3772 3773 static int 3774 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3775 { 3776 struct scsi_inquiry *sd_inq; 3777 int rval = SD_SUCCESS; 3778 3779 ASSERT(un != NULL); 3780 sd_inq = un->un_sd->sd_inq; 3781 ASSERT(id != NULL); 3782 3783 /* 3784 * We use the inq_vid as a pointer to a buffer containing the 3785 * vid and pid and use the entire vid/pid length of the table 3786 * entry for the comparison. This works because the inq_pid 3787 * data member follows inq_vid in the scsi_inquiry structure. 3788 */ 3789 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3790 /* 3791 * The user id string is compared to the inquiry vid/pid 3792 * using a case insensitive comparison and ignoring 3793 * multiple spaces. 3794 */ 3795 rval = sd_blank_cmp(un, id, idlen); 3796 if (rval != SD_SUCCESS) { 3797 /* 3798 * User id strings that start and end with a "*" 3799 * are a special case. These do not have a 3800 * specific vendor, and the product string can 3801 * appear anywhere in the 16 byte PID portion of 3802 * the inquiry data. This is a simple strstr() 3803 * type search for the user id in the inquiry data. 3804 */ 3805 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3806 char *pidptr = &id[1]; 3807 int i; 3808 int j; 3809 int pidstrlen = idlen - 2; 3810 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3811 pidstrlen; 3812 3813 if (j < 0) { 3814 return (SD_FAILURE); 3815 } 3816 for (i = 0; i < j; i++) { 3817 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3818 pidptr, pidstrlen) == 0) { 3819 rval = SD_SUCCESS; 3820 break; 3821 } 3822 } 3823 } 3824 } 3825 } 3826 return (rval); 3827 } 3828 3829 3830 /* 3831 * Function: sd_blank_cmp 3832 * 3833 * Description: If the id string starts and ends with a space, treat 3834 * multiple consecutive spaces as equivalent to a single 3835 * space. For example, this causes a sd_disk_table entry 3836 * of " NEC CDROM " to match a device's id string of 3837 * "NEC CDROM". 3838 * 3839 * Note: The success exit condition for this routine is if 3840 * the pointer to the table entry is '\0' and the cnt of 3841 * the inquiry length is zero. This will happen if the inquiry 3842 * string returned by the device is padded with spaces to be 3843 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3844 * SCSI spec states that the inquiry string is to be padded with 3845 * spaces. 3846 * 3847 * Arguments: un - driver soft state (unit) structure 3848 * id - table or config file vid/pid 3849 * idlen - length of the vid/pid (bytes) 3850 * 3851 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3852 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3853 */ 3854 3855 static int 3856 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3857 { 3858 char *p1; 3859 char *p2; 3860 int cnt; 3861 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3862 sizeof (SD_INQUIRY(un)->inq_pid); 3863 3864 ASSERT(un != NULL); 3865 p2 = un->un_sd->sd_inq->inq_vid; 3866 ASSERT(id != NULL); 3867 p1 = id; 3868 3869 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3870 /* 3871 * Note: string p1 is terminated by a NUL but string p2 3872 * isn't. The end of p2 is determined by cnt. 3873 */ 3874 for (;;) { 3875 /* skip over any extra blanks in both strings */ 3876 while ((*p1 != '\0') && (*p1 == ' ')) { 3877 p1++; 3878 } 3879 while ((cnt != 0) && (*p2 == ' ')) { 3880 p2++; 3881 cnt--; 3882 } 3883 3884 /* compare the two strings */ 3885 if ((cnt == 0) || 3886 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3887 break; 3888 } 3889 while ((cnt > 0) && 3890 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3891 p1++; 3892 p2++; 3893 cnt--; 3894 } 3895 } 3896 } 3897 3898 /* return SD_SUCCESS if both strings match */ 3899 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3900 } 3901 3902 3903 /* 3904 * Function: sd_chk_vers1_data 3905 * 3906 * Description: Verify the version 1 device properties provided by the 3907 * user via the configuration file 3908 * 3909 * Arguments: un - driver soft state (unit) structure 3910 * flags - integer mask indicating properties to be set 3911 * prop_list - integer list of property values 3912 * list_len - length of user provided data 3913 * 3914 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3915 * SD_FAILURE - Indicates the user provided data is invalid 3916 */ 3917 3918 static int 3919 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3920 int list_len, char *dataname_ptr) 3921 { 3922 int i; 3923 int mask = 1; 3924 int index = 0; 3925 3926 ASSERT(un != NULL); 3927 3928 /* Check for a NULL property name and list */ 3929 if (dataname_ptr == NULL) { 3930 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3931 "sd_chk_vers1_data: NULL data property name."); 3932 return (SD_FAILURE); 3933 } 3934 if (prop_list == NULL) { 3935 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3936 "sd_chk_vers1_data: %s NULL data property list.", 3937 dataname_ptr); 3938 return (SD_FAILURE); 3939 } 3940 3941 /* Display a warning if undefined bits are set in the flags */ 3942 if (flags & ~SD_CONF_BIT_MASK) { 3943 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3944 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3945 "Properties not set.", 3946 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3947 return (SD_FAILURE); 3948 } 3949 3950 /* 3951 * Verify the length of the list by identifying the highest bit set 3952 * in the flags and validating that the property list has a length 3953 * up to the index of this bit. 3954 */ 3955 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3956 if (flags & mask) { 3957 index++; 3958 } 3959 mask = 1 << i; 3960 } 3961 if ((list_len / sizeof (int)) < (index + 2)) { 3962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3963 "sd_chk_vers1_data: " 3964 "Data property list %s size is incorrect. " 3965 "Properties not set.", dataname_ptr); 3966 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3967 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3968 return (SD_FAILURE); 3969 } 3970 return (SD_SUCCESS); 3971 } 3972 3973 3974 /* 3975 * Function: sd_set_vers1_properties 3976 * 3977 * Description: Set version 1 device properties based on a property list 3978 * retrieved from the driver configuration file or static 3979 * configuration table. Version 1 properties have the format: 3980 * 3981 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3982 * 3983 * where the prop0 value will be used to set prop0 if bit0 3984 * is set in the flags 3985 * 3986 * Arguments: un - driver soft state (unit) structure 3987 * flags - integer mask indicating properties to be set 3988 * prop_list - integer list of property values 3989 */ 3990 3991 static void 3992 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3993 { 3994 ASSERT(un != NULL); 3995 3996 /* 3997 * Set the flag to indicate cache is to be disabled. An attempt 3998 * to disable the cache via sd_cache_control() will be made 3999 * later during attach once the basic initialization is complete. 4000 */ 4001 if (flags & SD_CONF_BSET_NOCACHE) { 4002 un->un_f_opt_disable_cache = TRUE; 4003 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4004 "sd_set_vers1_properties: caching disabled flag set\n"); 4005 } 4006 4007 /* CD-specific configuration parameters */ 4008 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4009 un->un_f_cfg_playmsf_bcd = TRUE; 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4011 "sd_set_vers1_properties: playmsf_bcd set\n"); 4012 } 4013 if (flags & SD_CONF_BSET_READSUB_BCD) { 4014 un->un_f_cfg_readsub_bcd = TRUE; 4015 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4016 "sd_set_vers1_properties: readsub_bcd set\n"); 4017 } 4018 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4019 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4020 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4021 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4022 } 4023 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4024 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4026 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4027 } 4028 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4029 un->un_f_cfg_no_read_header = TRUE; 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4031 "sd_set_vers1_properties: no_read_header set\n"); 4032 } 4033 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4034 un->un_f_cfg_read_cd_xd4 = TRUE; 4035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4036 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4037 } 4038 4039 /* Support for devices which do not have valid/unique serial numbers */ 4040 if (flags & SD_CONF_BSET_FAB_DEVID) { 4041 un->un_f_opt_fab_devid = TRUE; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: fab_devid bit set\n"); 4044 } 4045 4046 /* Support for user throttle configuration */ 4047 if (flags & SD_CONF_BSET_THROTTLE) { 4048 ASSERT(prop_list != NULL); 4049 un->un_saved_throttle = un->un_throttle = 4050 prop_list->sdt_throttle; 4051 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4052 "sd_set_vers1_properties: throttle set to %d\n", 4053 prop_list->sdt_throttle); 4054 } 4055 4056 /* Set the per disk retry count according to the conf file or table. */ 4057 if (flags & SD_CONF_BSET_NRR_COUNT) { 4058 ASSERT(prop_list != NULL); 4059 if (prop_list->sdt_not_rdy_retries) { 4060 un->un_notready_retry_count = 4061 prop_list->sdt_not_rdy_retries; 4062 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4063 "sd_set_vers1_properties: not ready retry count" 4064 " set to %d\n", un->un_notready_retry_count); 4065 } 4066 } 4067 4068 /* The controller type is reported for generic disk driver ioctls */ 4069 if (flags & SD_CONF_BSET_CTYPE) { 4070 ASSERT(prop_list != NULL); 4071 switch (prop_list->sdt_ctype) { 4072 case CTYPE_CDROM: 4073 un->un_ctype = prop_list->sdt_ctype; 4074 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4075 "sd_set_vers1_properties: ctype set to " 4076 "CTYPE_CDROM\n"); 4077 break; 4078 case CTYPE_CCS: 4079 un->un_ctype = prop_list->sdt_ctype; 4080 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4081 "sd_set_vers1_properties: ctype set to " 4082 "CTYPE_CCS\n"); 4083 break; 4084 case CTYPE_ROD: /* RW optical */ 4085 un->un_ctype = prop_list->sdt_ctype; 4086 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4087 "sd_set_vers1_properties: ctype set to " 4088 "CTYPE_ROD\n"); 4089 break; 4090 default: 4091 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4092 "sd_set_vers1_properties: Could not set " 4093 "invalid ctype value (%d)", 4094 prop_list->sdt_ctype); 4095 } 4096 } 4097 4098 /* Purple failover timeout */ 4099 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4100 ASSERT(prop_list != NULL); 4101 un->un_busy_retry_count = 4102 prop_list->sdt_busy_retries; 4103 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4104 "sd_set_vers1_properties: " 4105 "busy retry count set to %d\n", 4106 un->un_busy_retry_count); 4107 } 4108 4109 /* Purple reset retry count */ 4110 if (flags & SD_CONF_BSET_RST_RETRIES) { 4111 ASSERT(prop_list != NULL); 4112 un->un_reset_retry_count = 4113 prop_list->sdt_reset_retries; 4114 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4115 "sd_set_vers1_properties: " 4116 "reset retry count set to %d\n", 4117 un->un_reset_retry_count); 4118 } 4119 4120 /* Purple reservation release timeout */ 4121 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4122 ASSERT(prop_list != NULL); 4123 un->un_reserve_release_time = 4124 prop_list->sdt_reserv_rel_time; 4125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4126 "sd_set_vers1_properties: " 4127 "reservation release timeout set to %d\n", 4128 un->un_reserve_release_time); 4129 } 4130 4131 /* 4132 * Driver flag telling the driver to verify that no commands are pending 4133 * for a device before issuing a Test Unit Ready. This is a workaround 4134 * for a firmware bug in some Seagate eliteI drives. 4135 */ 4136 if (flags & SD_CONF_BSET_TUR_CHECK) { 4137 un->un_f_cfg_tur_check = TRUE; 4138 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4139 "sd_set_vers1_properties: tur queue check set\n"); 4140 } 4141 4142 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4143 un->un_min_throttle = prop_list->sdt_min_throttle; 4144 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4145 "sd_set_vers1_properties: min throttle set to %d\n", 4146 un->un_min_throttle); 4147 } 4148 4149 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4150 un->un_f_disksort_disabled = 4151 (prop_list->sdt_disk_sort_dis != 0) ? 4152 TRUE : FALSE; 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4154 "sd_set_vers1_properties: disksort disabled " 4155 "flag set to %d\n", 4156 prop_list->sdt_disk_sort_dis); 4157 } 4158 4159 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4160 un->un_f_lun_reset_enabled = 4161 (prop_list->sdt_lun_reset_enable != 0) ? 4162 TRUE : FALSE; 4163 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4164 "sd_set_vers1_properties: lun reset enabled " 4165 "flag set to %d\n", 4166 prop_list->sdt_lun_reset_enable); 4167 } 4168 4169 /* 4170 * Validate the throttle values. 4171 * If any of the numbers are invalid, set everything to defaults. 4172 */ 4173 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4174 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4175 (un->un_min_throttle > un->un_throttle)) { 4176 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4177 un->un_min_throttle = sd_min_throttle; 4178 } 4179 } 4180 4181 /* 4182 * Function: sd_is_lsi() 4183 * 4184 * Description: Check for lsi devices, step through the static device 4185 * table to match vid/pid. 4186 * 4187 * Args: un - ptr to sd_lun 4188 * 4189 * Notes: When creating new LSI property, need to add the new LSI property 4190 * to this function. 4191 */ 4192 static void 4193 sd_is_lsi(struct sd_lun *un) 4194 { 4195 char *id = NULL; 4196 int table_index; 4197 int idlen; 4198 void *prop; 4199 4200 ASSERT(un != NULL); 4201 for (table_index = 0; table_index < sd_disk_table_size; 4202 table_index++) { 4203 id = sd_disk_table[table_index].device_id; 4204 idlen = strlen(id); 4205 if (idlen == 0) { 4206 continue; 4207 } 4208 4209 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4210 prop = sd_disk_table[table_index].properties; 4211 if (prop == &lsi_properties || 4212 prop == &lsi_oem_properties || 4213 prop == &lsi_properties_scsi || 4214 prop == &symbios_properties) { 4215 un->un_f_cfg_is_lsi = TRUE; 4216 } 4217 break; 4218 } 4219 } 4220 } 4221 4222 /* 4223 * Function: sd_get_physical_geometry 4224 * 4225 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4226 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4227 * target, and use this information to initialize the physical 4228 * geometry cache specified by pgeom_p. 4229 * 4230 * MODE SENSE is an optional command, so failure in this case 4231 * does not necessarily denote an error. We want to use the 4232 * MODE SENSE commands to derive the physical geometry of the 4233 * device, but if either command fails, the logical geometry is 4234 * used as the fallback for disk label geometry in cmlb. 4235 * 4236 * This requires that un->un_blockcount and un->un_tgt_blocksize 4237 * have already been initialized for the current target and 4238 * that the current values be passed as args so that we don't 4239 * end up ever trying to use -1 as a valid value. This could 4240 * happen if either value is reset while we're not holding 4241 * the mutex. 4242 * 4243 * Arguments: un - driver soft state (unit) structure 4244 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4245 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4246 * to use the USCSI "direct" chain and bypass the normal 4247 * command waitq. 4248 * 4249 * Context: Kernel thread only (can sleep). 4250 */ 4251 4252 static int 4253 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4254 diskaddr_t capacity, int lbasize, int path_flag) 4255 { 4256 struct mode_format *page3p; 4257 struct mode_geometry *page4p; 4258 struct mode_header *headerp; 4259 int sector_size; 4260 int nsect; 4261 int nhead; 4262 int ncyl; 4263 int intrlv; 4264 int spc; 4265 diskaddr_t modesense_capacity; 4266 int rpm; 4267 int bd_len; 4268 int mode_header_length; 4269 uchar_t *p3bufp; 4270 uchar_t *p4bufp; 4271 int cdbsize; 4272 int ret = EIO; 4273 4274 ASSERT(un != NULL); 4275 4276 if (lbasize == 0) { 4277 if (ISCD(un)) { 4278 lbasize = 2048; 4279 } else { 4280 lbasize = un->un_sys_blocksize; 4281 } 4282 } 4283 pgeom_p->g_secsize = (unsigned short)lbasize; 4284 4285 /* 4286 * If the unit is a cd/dvd drive MODE SENSE page three 4287 * and MODE SENSE page four are reserved (see SBC spec 4288 * and MMC spec). To prevent soft errors just return 4289 * using the default LBA size. 4290 */ 4291 if (ISCD(un)) 4292 return (ret); 4293 4294 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4295 4296 /* 4297 * Retrieve MODE SENSE page 3 - Format Device Page 4298 */ 4299 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4300 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4301 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4302 != 0) { 4303 SD_ERROR(SD_LOG_COMMON, un, 4304 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4305 goto page3_exit; 4306 } 4307 4308 /* 4309 * Determine size of Block Descriptors in order to locate the mode 4310 * page data. ATAPI devices return 0, SCSI devices should return 4311 * MODE_BLK_DESC_LENGTH. 4312 */ 4313 headerp = (struct mode_header *)p3bufp; 4314 if (un->un_f_cfg_is_atapi == TRUE) { 4315 struct mode_header_grp2 *mhp = 4316 (struct mode_header_grp2 *)headerp; 4317 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4318 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4319 } else { 4320 mode_header_length = MODE_HEADER_LENGTH; 4321 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4322 } 4323 4324 if (bd_len > MODE_BLK_DESC_LENGTH) { 4325 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4326 "received unexpected bd_len of %d, page3\n", bd_len); 4327 goto page3_exit; 4328 } 4329 4330 page3p = (struct mode_format *) 4331 ((caddr_t)headerp + mode_header_length + bd_len); 4332 4333 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4334 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4335 "mode sense pg3 code mismatch %d\n", 4336 page3p->mode_page.code); 4337 goto page3_exit; 4338 } 4339 4340 /* 4341 * Use this physical geometry data only if BOTH MODE SENSE commands 4342 * complete successfully; otherwise, revert to the logical geometry. 4343 * So, we need to save everything in temporary variables. 4344 */ 4345 sector_size = BE_16(page3p->data_bytes_sect); 4346 4347 /* 4348 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4349 */ 4350 if (sector_size == 0) { 4351 sector_size = un->un_sys_blocksize; 4352 } else { 4353 sector_size &= ~(un->un_sys_blocksize - 1); 4354 } 4355 4356 nsect = BE_16(page3p->sect_track); 4357 intrlv = BE_16(page3p->interleave); 4358 4359 SD_INFO(SD_LOG_COMMON, un, 4360 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4361 SD_INFO(SD_LOG_COMMON, un, 4362 " mode page: %d; nsect: %d; sector size: %d;\n", 4363 page3p->mode_page.code, nsect, sector_size); 4364 SD_INFO(SD_LOG_COMMON, un, 4365 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4366 BE_16(page3p->track_skew), 4367 BE_16(page3p->cylinder_skew)); 4368 4369 4370 /* 4371 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4372 */ 4373 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4374 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4375 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4376 != 0) { 4377 SD_ERROR(SD_LOG_COMMON, un, 4378 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4379 goto page4_exit; 4380 } 4381 4382 /* 4383 * Determine size of Block Descriptors in order to locate the mode 4384 * page data. ATAPI devices return 0, SCSI devices should return 4385 * MODE_BLK_DESC_LENGTH. 4386 */ 4387 headerp = (struct mode_header *)p4bufp; 4388 if (un->un_f_cfg_is_atapi == TRUE) { 4389 struct mode_header_grp2 *mhp = 4390 (struct mode_header_grp2 *)headerp; 4391 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4392 } else { 4393 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4394 } 4395 4396 if (bd_len > MODE_BLK_DESC_LENGTH) { 4397 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4398 "received unexpected bd_len of %d, page4\n", bd_len); 4399 goto page4_exit; 4400 } 4401 4402 page4p = (struct mode_geometry *) 4403 ((caddr_t)headerp + mode_header_length + bd_len); 4404 4405 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4406 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4407 "mode sense pg4 code mismatch %d\n", 4408 page4p->mode_page.code); 4409 goto page4_exit; 4410 } 4411 4412 /* 4413 * Stash the data now, after we know that both commands completed. 4414 */ 4415 4416 4417 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4418 spc = nhead * nsect; 4419 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4420 rpm = BE_16(page4p->rpm); 4421 4422 modesense_capacity = spc * ncyl; 4423 4424 SD_INFO(SD_LOG_COMMON, un, 4425 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4426 SD_INFO(SD_LOG_COMMON, un, 4427 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4428 SD_INFO(SD_LOG_COMMON, un, 4429 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4430 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4431 (void *)pgeom_p, capacity); 4432 4433 /* 4434 * Compensate if the drive's geometry is not rectangular, i.e., 4435 * the product of C * H * S returned by MODE SENSE >= that returned 4436 * by read capacity. This is an idiosyncrasy of the original x86 4437 * disk subsystem. 4438 */ 4439 if (modesense_capacity >= capacity) { 4440 SD_INFO(SD_LOG_COMMON, un, 4441 "sd_get_physical_geometry: adjusting acyl; " 4442 "old: %d; new: %d\n", pgeom_p->g_acyl, 4443 (modesense_capacity - capacity + spc - 1) / spc); 4444 if (sector_size != 0) { 4445 /* 1243403: NEC D38x7 drives don't support sec size */ 4446 pgeom_p->g_secsize = (unsigned short)sector_size; 4447 } 4448 pgeom_p->g_nsect = (unsigned short)nsect; 4449 pgeom_p->g_nhead = (unsigned short)nhead; 4450 pgeom_p->g_capacity = capacity; 4451 pgeom_p->g_acyl = 4452 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4453 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4454 } 4455 4456 pgeom_p->g_rpm = (unsigned short)rpm; 4457 pgeom_p->g_intrlv = (unsigned short)intrlv; 4458 ret = 0; 4459 4460 SD_INFO(SD_LOG_COMMON, un, 4461 "sd_get_physical_geometry: mode sense geometry:\n"); 4462 SD_INFO(SD_LOG_COMMON, un, 4463 " nsect: %d; sector size: %d; interlv: %d\n", 4464 nsect, sector_size, intrlv); 4465 SD_INFO(SD_LOG_COMMON, un, 4466 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4467 nhead, ncyl, rpm, modesense_capacity); 4468 SD_INFO(SD_LOG_COMMON, un, 4469 "sd_get_physical_geometry: (cached)\n"); 4470 SD_INFO(SD_LOG_COMMON, un, 4471 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4472 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4473 pgeom_p->g_nhead, pgeom_p->g_nsect); 4474 SD_INFO(SD_LOG_COMMON, un, 4475 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4476 pgeom_p->g_secsize, pgeom_p->g_capacity, 4477 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4478 4479 page4_exit: 4480 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4481 page3_exit: 4482 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4483 4484 return (ret); 4485 } 4486 4487 /* 4488 * Function: sd_get_virtual_geometry 4489 * 4490 * Description: Ask the controller to tell us about the target device. 4491 * 4492 * Arguments: un - pointer to softstate 4493 * capacity - disk capacity in #blocks 4494 * lbasize - disk block size in bytes 4495 * 4496 * Context: Kernel thread only 4497 */ 4498 4499 static int 4500 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4501 diskaddr_t capacity, int lbasize) 4502 { 4503 uint_t geombuf; 4504 int spc; 4505 4506 ASSERT(un != NULL); 4507 4508 /* Set sector size, and total number of sectors */ 4509 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4510 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4511 4512 /* Let the HBA tell us its geometry */ 4513 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4514 4515 /* A value of -1 indicates an undefined "geometry" property */ 4516 if (geombuf == (-1)) { 4517 return (EINVAL); 4518 } 4519 4520 /* Initialize the logical geometry cache. */ 4521 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4522 lgeom_p->g_nsect = geombuf & 0xffff; 4523 lgeom_p->g_secsize = un->un_sys_blocksize; 4524 4525 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4526 4527 /* 4528 * Note: The driver originally converted the capacity value from 4529 * target blocks to system blocks. However, the capacity value passed 4530 * to this routine is already in terms of system blocks (this scaling 4531 * is done when the READ CAPACITY command is issued and processed). 4532 * This 'error' may have gone undetected because the usage of g_ncyl 4533 * (which is based upon g_capacity) is very limited within the driver 4534 */ 4535 lgeom_p->g_capacity = capacity; 4536 4537 /* 4538 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4539 * hba may return zero values if the device has been removed. 4540 */ 4541 if (spc == 0) { 4542 lgeom_p->g_ncyl = 0; 4543 } else { 4544 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4545 } 4546 lgeom_p->g_acyl = 0; 4547 4548 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4549 return (0); 4550 4551 } 4552 /* 4553 * Function: sd_update_block_info 4554 * 4555 * Description: Calculate a byte count to sector count bitshift value 4556 * from sector size. 4557 * 4558 * Arguments: un: unit struct. 4559 * lbasize: new target sector size 4560 * capacity: new target capacity, ie. block count 4561 * 4562 * Context: Kernel thread context 4563 */ 4564 4565 static void 4566 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4567 { 4568 uint_t dblk; 4569 4570 if (lbasize != 0) { 4571 un->un_tgt_blocksize = lbasize; 4572 un->un_f_tgt_blocksize_is_valid = TRUE; 4573 } 4574 4575 if (capacity != 0) { 4576 un->un_blockcount = capacity; 4577 un->un_f_blockcount_is_valid = TRUE; 4578 } 4579 4580 /* 4581 * Update device capacity properties. 4582 * 4583 * 'device-nblocks' number of blocks in target's units 4584 * 'device-blksize' data bearing size of target's block 4585 * 4586 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4587 * not be a power of two for checksumming disks with 520/528 byte 4588 * sectors. 4589 */ 4590 if (un->un_f_tgt_blocksize_is_valid && 4591 un->un_f_blockcount_is_valid && 4592 un->un_sys_blocksize) { 4593 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4594 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4595 "device-nblocks", un->un_blockcount / dblk); 4596 /* 4597 * To save memory, only define "device-blksize" when its 4598 * value is differnet than the default DEV_BSIZE value. 4599 */ 4600 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4601 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4602 SD_DEVINFO(un), "device-blksize", 4603 un->un_sys_blocksize * dblk); 4604 } 4605 } 4606 4607 4608 /* 4609 * Function: sd_register_devid 4610 * 4611 * Description: This routine will obtain the device id information from the 4612 * target, obtain the serial number, and register the device 4613 * id with the ddi framework. 4614 * 4615 * Arguments: devi - the system's dev_info_t for the device. 4616 * un - driver soft state (unit) structure 4617 * reservation_flag - indicates if a reservation conflict 4618 * occurred during attach 4619 * 4620 * Context: Kernel Thread 4621 */ 4622 static void 4623 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4624 { 4625 int rval = 0; 4626 uchar_t *inq80 = NULL; 4627 size_t inq80_len = MAX_INQUIRY_SIZE; 4628 size_t inq80_resid = 0; 4629 uchar_t *inq83 = NULL; 4630 size_t inq83_len = MAX_INQUIRY_SIZE; 4631 size_t inq83_resid = 0; 4632 int dlen, len; 4633 char *sn; 4634 4635 ASSERT(un != NULL); 4636 ASSERT(mutex_owned(SD_MUTEX(un))); 4637 ASSERT((SD_DEVINFO(un)) == devi); 4638 4639 /* 4640 * This is the case of antiquated Sun disk drives that have the 4641 * FAB_DEVID property set in the disk_table. These drives 4642 * manage the devid's by storing them in last 2 available sectors 4643 * on the drive and have them fabricated by the ddi layer by calling 4644 * ddi_devid_init and passing the DEVID_FAB flag. 4645 */ 4646 if (un->un_f_opt_fab_devid == TRUE) { 4647 /* 4648 * Depending on EINVAL isn't reliable, since a reserved disk 4649 * may result in invalid geometry, so check to make sure a 4650 * reservation conflict did not occur during attach. 4651 */ 4652 if ((sd_get_devid(un) == EINVAL) && 4653 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4654 /* 4655 * The devid is invalid AND there is no reservation 4656 * conflict. Fabricate a new devid. 4657 */ 4658 (void) sd_create_devid(un); 4659 } 4660 4661 /* Register the devid if it exists */ 4662 if (un->un_devid != NULL) { 4663 (void) ddi_devid_register(SD_DEVINFO(un), 4664 un->un_devid); 4665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4666 "sd_register_devid: Devid Fabricated\n"); 4667 } 4668 return; 4669 } 4670 4671 /* 4672 * We check the availibility of the World Wide Name (0x83) and Unit 4673 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4674 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4675 * 0x83 is availible, that is the best choice. Our next choice is 4676 * 0x80. If neither are availible, we munge the devid from the device 4677 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4678 * to fabricate a devid for non-Sun qualified disks. 4679 */ 4680 if (sd_check_vpd_page_support(un) == 0) { 4681 /* collect page 80 data if available */ 4682 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4683 4684 mutex_exit(SD_MUTEX(un)); 4685 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4686 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4687 0x01, 0x80, &inq80_resid); 4688 4689 if (rval != 0) { 4690 kmem_free(inq80, inq80_len); 4691 inq80 = NULL; 4692 inq80_len = 0; 4693 } else if (ddi_prop_exists( 4694 DDI_DEV_T_NONE, SD_DEVINFO(un), 4695 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4696 INQUIRY_SERIAL_NO) == 0) { 4697 /* 4698 * If we don't already have a serial number 4699 * property, do quick verify of data returned 4700 * and define property. 4701 */ 4702 dlen = inq80_len - inq80_resid; 4703 len = (size_t)inq80[3]; 4704 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4705 /* 4706 * Ensure sn termination, skip leading 4707 * blanks, and create property 4708 * 'inquiry-serial-no'. 4709 */ 4710 sn = (char *)&inq80[4]; 4711 sn[len] = 0; 4712 while (*sn && (*sn == ' ')) 4713 sn++; 4714 if (*sn) { 4715 (void) ddi_prop_update_string( 4716 DDI_DEV_T_NONE, 4717 SD_DEVINFO(un), 4718 INQUIRY_SERIAL_NO, sn); 4719 } 4720 } 4721 } 4722 mutex_enter(SD_MUTEX(un)); 4723 } 4724 4725 /* collect page 83 data if available */ 4726 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4727 mutex_exit(SD_MUTEX(un)); 4728 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4729 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4730 0x01, 0x83, &inq83_resid); 4731 4732 if (rval != 0) { 4733 kmem_free(inq83, inq83_len); 4734 inq83 = NULL; 4735 inq83_len = 0; 4736 } 4737 mutex_enter(SD_MUTEX(un)); 4738 } 4739 } 4740 4741 /* encode best devid possible based on data available */ 4742 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4743 (char *)ddi_driver_name(SD_DEVINFO(un)), 4744 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4745 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4746 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4747 4748 /* devid successfully encoded, register devid */ 4749 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4750 4751 } else { 4752 /* 4753 * Unable to encode a devid based on data available. 4754 * This is not a Sun qualified disk. Older Sun disk 4755 * drives that have the SD_FAB_DEVID property 4756 * set in the disk_table and non Sun qualified 4757 * disks are treated in the same manner. These 4758 * drives manage the devid's by storing them in 4759 * last 2 available sectors on the drive and 4760 * have them fabricated by the ddi layer by 4761 * calling ddi_devid_init and passing the 4762 * DEVID_FAB flag. 4763 * Create a fabricate devid only if there's no 4764 * fabricate devid existed. 4765 */ 4766 if (sd_get_devid(un) == EINVAL) { 4767 (void) sd_create_devid(un); 4768 } 4769 un->un_f_opt_fab_devid = TRUE; 4770 4771 /* Register the devid if it exists */ 4772 if (un->un_devid != NULL) { 4773 (void) ddi_devid_register(SD_DEVINFO(un), 4774 un->un_devid); 4775 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4776 "sd_register_devid: devid fabricated using " 4777 "ddi framework\n"); 4778 } 4779 } 4780 4781 /* clean up resources */ 4782 if (inq80 != NULL) { 4783 kmem_free(inq80, inq80_len); 4784 } 4785 if (inq83 != NULL) { 4786 kmem_free(inq83, inq83_len); 4787 } 4788 } 4789 4790 4791 4792 /* 4793 * Function: sd_get_devid 4794 * 4795 * Description: This routine will return 0 if a valid device id has been 4796 * obtained from the target and stored in the soft state. If a 4797 * valid device id has not been previously read and stored, a 4798 * read attempt will be made. 4799 * 4800 * Arguments: un - driver soft state (unit) structure 4801 * 4802 * Return Code: 0 if we successfully get the device id 4803 * 4804 * Context: Kernel Thread 4805 */ 4806 4807 static int 4808 sd_get_devid(struct sd_lun *un) 4809 { 4810 struct dk_devid *dkdevid; 4811 ddi_devid_t tmpid; 4812 uint_t *ip; 4813 size_t sz; 4814 diskaddr_t blk; 4815 int status; 4816 int chksum; 4817 int i; 4818 size_t buffer_size; 4819 4820 ASSERT(un != NULL); 4821 ASSERT(mutex_owned(SD_MUTEX(un))); 4822 4823 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4824 un); 4825 4826 if (un->un_devid != NULL) { 4827 return (0); 4828 } 4829 4830 mutex_exit(SD_MUTEX(un)); 4831 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4832 (void *)SD_PATH_DIRECT) != 0) { 4833 mutex_enter(SD_MUTEX(un)); 4834 return (EINVAL); 4835 } 4836 4837 /* 4838 * Read and verify device id, stored in the reserved cylinders at the 4839 * end of the disk. Backup label is on the odd sectors of the last 4840 * track of the last cylinder. Device id will be on track of the next 4841 * to last cylinder. 4842 */ 4843 mutex_enter(SD_MUTEX(un)); 4844 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4845 mutex_exit(SD_MUTEX(un)); 4846 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4847 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4848 SD_PATH_DIRECT); 4849 if (status != 0) { 4850 goto error; 4851 } 4852 4853 /* Validate the revision */ 4854 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4855 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4856 status = EINVAL; 4857 goto error; 4858 } 4859 4860 /* Calculate the checksum */ 4861 chksum = 0; 4862 ip = (uint_t *)dkdevid; 4863 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4864 i++) { 4865 chksum ^= ip[i]; 4866 } 4867 4868 /* Compare the checksums */ 4869 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4870 status = EINVAL; 4871 goto error; 4872 } 4873 4874 /* Validate the device id */ 4875 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4876 status = EINVAL; 4877 goto error; 4878 } 4879 4880 /* 4881 * Store the device id in the driver soft state 4882 */ 4883 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4884 tmpid = kmem_alloc(sz, KM_SLEEP); 4885 4886 mutex_enter(SD_MUTEX(un)); 4887 4888 un->un_devid = tmpid; 4889 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4890 4891 kmem_free(dkdevid, buffer_size); 4892 4893 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4894 4895 return (status); 4896 error: 4897 mutex_enter(SD_MUTEX(un)); 4898 kmem_free(dkdevid, buffer_size); 4899 return (status); 4900 } 4901 4902 4903 /* 4904 * Function: sd_create_devid 4905 * 4906 * Description: This routine will fabricate the device id and write it 4907 * to the disk. 4908 * 4909 * Arguments: un - driver soft state (unit) structure 4910 * 4911 * Return Code: value of the fabricated device id 4912 * 4913 * Context: Kernel Thread 4914 */ 4915 4916 static ddi_devid_t 4917 sd_create_devid(struct sd_lun *un) 4918 { 4919 ASSERT(un != NULL); 4920 4921 /* Fabricate the devid */ 4922 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4923 == DDI_FAILURE) { 4924 return (NULL); 4925 } 4926 4927 /* Write the devid to disk */ 4928 if (sd_write_deviceid(un) != 0) { 4929 ddi_devid_free(un->un_devid); 4930 un->un_devid = NULL; 4931 } 4932 4933 return (un->un_devid); 4934 } 4935 4936 4937 /* 4938 * Function: sd_write_deviceid 4939 * 4940 * Description: This routine will write the device id to the disk 4941 * reserved sector. 4942 * 4943 * Arguments: un - driver soft state (unit) structure 4944 * 4945 * Return Code: EINVAL 4946 * value returned by sd_send_scsi_cmd 4947 * 4948 * Context: Kernel Thread 4949 */ 4950 4951 static int 4952 sd_write_deviceid(struct sd_lun *un) 4953 { 4954 struct dk_devid *dkdevid; 4955 diskaddr_t blk; 4956 uint_t *ip, chksum; 4957 int status; 4958 int i; 4959 4960 ASSERT(mutex_owned(SD_MUTEX(un))); 4961 4962 mutex_exit(SD_MUTEX(un)); 4963 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4964 (void *)SD_PATH_DIRECT) != 0) { 4965 mutex_enter(SD_MUTEX(un)); 4966 return (-1); 4967 } 4968 4969 4970 /* Allocate the buffer */ 4971 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4972 4973 /* Fill in the revision */ 4974 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4975 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4976 4977 /* Copy in the device id */ 4978 mutex_enter(SD_MUTEX(un)); 4979 bcopy(un->un_devid, &dkdevid->dkd_devid, 4980 ddi_devid_sizeof(un->un_devid)); 4981 mutex_exit(SD_MUTEX(un)); 4982 4983 /* Calculate the checksum */ 4984 chksum = 0; 4985 ip = (uint_t *)dkdevid; 4986 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4987 i++) { 4988 chksum ^= ip[i]; 4989 } 4990 4991 /* Fill-in checksum */ 4992 DKD_FORMCHKSUM(chksum, dkdevid); 4993 4994 /* Write the reserved sector */ 4995 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4996 SD_PATH_DIRECT); 4997 4998 kmem_free(dkdevid, un->un_sys_blocksize); 4999 5000 mutex_enter(SD_MUTEX(un)); 5001 return (status); 5002 } 5003 5004 5005 /* 5006 * Function: sd_check_vpd_page_support 5007 * 5008 * Description: This routine sends an inquiry command with the EVPD bit set and 5009 * a page code of 0x00 to the device. It is used to determine which 5010 * vital product pages are availible to find the devid. We are 5011 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5012 * device does not support that command. 5013 * 5014 * Arguments: un - driver soft state (unit) structure 5015 * 5016 * Return Code: 0 - success 5017 * 1 - check condition 5018 * 5019 * Context: This routine can sleep. 5020 */ 5021 5022 static int 5023 sd_check_vpd_page_support(struct sd_lun *un) 5024 { 5025 uchar_t *page_list = NULL; 5026 uchar_t page_length = 0xff; /* Use max possible length */ 5027 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5028 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5029 int rval = 0; 5030 int counter; 5031 5032 ASSERT(un != NULL); 5033 ASSERT(mutex_owned(SD_MUTEX(un))); 5034 5035 mutex_exit(SD_MUTEX(un)); 5036 5037 /* 5038 * We'll set the page length to the maximum to save figuring it out 5039 * with an additional call. 5040 */ 5041 page_list = kmem_zalloc(page_length, KM_SLEEP); 5042 5043 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5044 page_code, NULL); 5045 5046 mutex_enter(SD_MUTEX(un)); 5047 5048 /* 5049 * Now we must validate that the device accepted the command, as some 5050 * drives do not support it. If the drive does support it, we will 5051 * return 0, and the supported pages will be in un_vpd_page_mask. If 5052 * not, we return -1. 5053 */ 5054 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5055 /* Loop to find one of the 2 pages we need */ 5056 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5057 5058 /* 5059 * Pages are returned in ascending order, and 0x83 is what we 5060 * are hoping for. 5061 */ 5062 while ((page_list[counter] <= 0x83) && 5063 (counter <= (page_list[VPD_PAGE_LENGTH] + 5064 VPD_HEAD_OFFSET))) { 5065 /* 5066 * Add 3 because page_list[3] is the number of 5067 * pages minus 3 5068 */ 5069 5070 switch (page_list[counter]) { 5071 case 0x00: 5072 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5073 break; 5074 case 0x80: 5075 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5076 break; 5077 case 0x81: 5078 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5079 break; 5080 case 0x82: 5081 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5082 break; 5083 case 0x83: 5084 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5085 break; 5086 } 5087 counter++; 5088 } 5089 5090 } else { 5091 rval = -1; 5092 5093 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5094 "sd_check_vpd_page_support: This drive does not implement " 5095 "VPD pages.\n"); 5096 } 5097 5098 kmem_free(page_list, page_length); 5099 5100 return (rval); 5101 } 5102 5103 5104 /* 5105 * Function: sd_setup_pm 5106 * 5107 * Description: Initialize Power Management on the device 5108 * 5109 * Context: Kernel Thread 5110 */ 5111 5112 static void 5113 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5114 { 5115 uint_t log_page_size; 5116 uchar_t *log_page_data; 5117 int rval; 5118 5119 /* 5120 * Since we are called from attach, holding a mutex for 5121 * un is unnecessary. Because some of the routines called 5122 * from here require SD_MUTEX to not be held, assert this 5123 * right up front. 5124 */ 5125 ASSERT(!mutex_owned(SD_MUTEX(un))); 5126 /* 5127 * Since the sd device does not have the 'reg' property, 5128 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5129 * The following code is to tell cpr that this device 5130 * DOES need to be suspended and resumed. 5131 */ 5132 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5133 "pm-hardware-state", "needs-suspend-resume"); 5134 5135 /* 5136 * This complies with the new power management framework 5137 * for certain desktop machines. Create the pm_components 5138 * property as a string array property. 5139 */ 5140 if (un->un_f_pm_supported) { 5141 /* 5142 * not all devices have a motor, try it first. 5143 * some devices may return ILLEGAL REQUEST, some 5144 * will hang 5145 * The following START_STOP_UNIT is used to check if target 5146 * device has a motor. 5147 */ 5148 un->un_f_start_stop_supported = TRUE; 5149 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5150 SD_PATH_DIRECT) != 0) { 5151 un->un_f_start_stop_supported = FALSE; 5152 } 5153 5154 /* 5155 * create pm properties anyways otherwise the parent can't 5156 * go to sleep 5157 */ 5158 (void) sd_create_pm_components(devi, un); 5159 un->un_f_pm_is_enabled = TRUE; 5160 return; 5161 } 5162 5163 if (!un->un_f_log_sense_supported) { 5164 un->un_power_level = SD_SPINDLE_ON; 5165 un->un_f_pm_is_enabled = FALSE; 5166 return; 5167 } 5168 5169 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5170 5171 #ifdef SDDEBUG 5172 if (sd_force_pm_supported) { 5173 /* Force a successful result */ 5174 rval = 1; 5175 } 5176 #endif 5177 5178 /* 5179 * If the start-stop cycle counter log page is not supported 5180 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5181 * then we should not create the pm_components property. 5182 */ 5183 if (rval == -1) { 5184 /* 5185 * Error. 5186 * Reading log sense failed, most likely this is 5187 * an older drive that does not support log sense. 5188 * If this fails auto-pm is not supported. 5189 */ 5190 un->un_power_level = SD_SPINDLE_ON; 5191 un->un_f_pm_is_enabled = FALSE; 5192 5193 } else if (rval == 0) { 5194 /* 5195 * Page not found. 5196 * The start stop cycle counter is implemented as page 5197 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5198 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5199 */ 5200 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5201 /* 5202 * Page found, use this one. 5203 */ 5204 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5205 un->un_f_pm_is_enabled = TRUE; 5206 } else { 5207 /* 5208 * Error or page not found. 5209 * auto-pm is not supported for this device. 5210 */ 5211 un->un_power_level = SD_SPINDLE_ON; 5212 un->un_f_pm_is_enabled = FALSE; 5213 } 5214 } else { 5215 /* 5216 * Page found, use it. 5217 */ 5218 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5219 un->un_f_pm_is_enabled = TRUE; 5220 } 5221 5222 5223 if (un->un_f_pm_is_enabled == TRUE) { 5224 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5225 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5226 5227 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5228 log_page_size, un->un_start_stop_cycle_page, 5229 0x01, 0, SD_PATH_DIRECT); 5230 #ifdef SDDEBUG 5231 if (sd_force_pm_supported) { 5232 /* Force a successful result */ 5233 rval = 0; 5234 } 5235 #endif 5236 5237 /* 5238 * If the Log sense for Page( Start/stop cycle counter page) 5239 * succeeds, then power managment is supported and we can 5240 * enable auto-pm. 5241 */ 5242 if (rval == 0) { 5243 (void) sd_create_pm_components(devi, un); 5244 } else { 5245 un->un_power_level = SD_SPINDLE_ON; 5246 un->un_f_pm_is_enabled = FALSE; 5247 } 5248 5249 kmem_free(log_page_data, log_page_size); 5250 } 5251 } 5252 5253 5254 /* 5255 * Function: sd_create_pm_components 5256 * 5257 * Description: Initialize PM property. 5258 * 5259 * Context: Kernel thread context 5260 */ 5261 5262 static void 5263 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5264 { 5265 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5266 5267 ASSERT(!mutex_owned(SD_MUTEX(un))); 5268 5269 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5270 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5271 /* 5272 * When components are initially created they are idle, 5273 * power up any non-removables. 5274 * Note: the return value of pm_raise_power can't be used 5275 * for determining if PM should be enabled for this device. 5276 * Even if you check the return values and remove this 5277 * property created above, the PM framework will not honor the 5278 * change after the first call to pm_raise_power. Hence, 5279 * removal of that property does not help if pm_raise_power 5280 * fails. In the case of removable media, the start/stop 5281 * will fail if the media is not present. 5282 */ 5283 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5284 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5285 mutex_enter(SD_MUTEX(un)); 5286 un->un_power_level = SD_SPINDLE_ON; 5287 mutex_enter(&un->un_pm_mutex); 5288 /* Set to on and not busy. */ 5289 un->un_pm_count = 0; 5290 } else { 5291 mutex_enter(SD_MUTEX(un)); 5292 un->un_power_level = SD_SPINDLE_OFF; 5293 mutex_enter(&un->un_pm_mutex); 5294 /* Set to off. */ 5295 un->un_pm_count = -1; 5296 } 5297 mutex_exit(&un->un_pm_mutex); 5298 mutex_exit(SD_MUTEX(un)); 5299 } else { 5300 un->un_power_level = SD_SPINDLE_ON; 5301 un->un_f_pm_is_enabled = FALSE; 5302 } 5303 } 5304 5305 5306 /* 5307 * Function: sd_ddi_suspend 5308 * 5309 * Description: Performs system power-down operations. This includes 5310 * setting the drive state to indicate its suspended so 5311 * that no new commands will be accepted. Also, wait for 5312 * all commands that are in transport or queued to a timer 5313 * for retry to complete. All timeout threads are cancelled. 5314 * 5315 * Return Code: DDI_FAILURE or DDI_SUCCESS 5316 * 5317 * Context: Kernel thread context 5318 */ 5319 5320 static int 5321 sd_ddi_suspend(dev_info_t *devi) 5322 { 5323 struct sd_lun *un; 5324 clock_t wait_cmds_complete; 5325 5326 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5327 if (un == NULL) { 5328 return (DDI_FAILURE); 5329 } 5330 5331 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5332 5333 mutex_enter(SD_MUTEX(un)); 5334 5335 /* Return success if the device is already suspended. */ 5336 if (un->un_state == SD_STATE_SUSPENDED) { 5337 mutex_exit(SD_MUTEX(un)); 5338 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5339 "device already suspended, exiting\n"); 5340 return (DDI_SUCCESS); 5341 } 5342 5343 /* Return failure if the device is being used by HA */ 5344 if (un->un_resvd_status & 5345 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5346 mutex_exit(SD_MUTEX(un)); 5347 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5348 "device in use by HA, exiting\n"); 5349 return (DDI_FAILURE); 5350 } 5351 5352 /* 5353 * Return failure if the device is in a resource wait 5354 * or power changing state. 5355 */ 5356 if ((un->un_state == SD_STATE_RWAIT) || 5357 (un->un_state == SD_STATE_PM_CHANGING)) { 5358 mutex_exit(SD_MUTEX(un)); 5359 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5360 "device in resource wait state, exiting\n"); 5361 return (DDI_FAILURE); 5362 } 5363 5364 5365 un->un_save_state = un->un_last_state; 5366 New_state(un, SD_STATE_SUSPENDED); 5367 5368 /* 5369 * Wait for all commands that are in transport or queued to a timer 5370 * for retry to complete. 5371 * 5372 * While waiting, no new commands will be accepted or sent because of 5373 * the new state we set above. 5374 * 5375 * Wait till current operation has completed. If we are in the resource 5376 * wait state (with an intr outstanding) then we need to wait till the 5377 * intr completes and starts the next cmd. We want to wait for 5378 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5379 */ 5380 wait_cmds_complete = ddi_get_lbolt() + 5381 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5382 5383 while (un->un_ncmds_in_transport != 0) { 5384 /* 5385 * Fail if commands do not finish in the specified time. 5386 */ 5387 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5388 wait_cmds_complete) == -1) { 5389 /* 5390 * Undo the state changes made above. Everything 5391 * must go back to it's original value. 5392 */ 5393 Restore_state(un); 5394 un->un_last_state = un->un_save_state; 5395 /* Wake up any threads that might be waiting. */ 5396 cv_broadcast(&un->un_suspend_cv); 5397 mutex_exit(SD_MUTEX(un)); 5398 SD_ERROR(SD_LOG_IO_PM, un, 5399 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5400 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5401 return (DDI_FAILURE); 5402 } 5403 } 5404 5405 /* 5406 * Cancel SCSI watch thread and timeouts, if any are active 5407 */ 5408 5409 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5410 opaque_t temp_token = un->un_swr_token; 5411 mutex_exit(SD_MUTEX(un)); 5412 scsi_watch_suspend(temp_token); 5413 mutex_enter(SD_MUTEX(un)); 5414 } 5415 5416 if (un->un_reset_throttle_timeid != NULL) { 5417 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5418 un->un_reset_throttle_timeid = NULL; 5419 mutex_exit(SD_MUTEX(un)); 5420 (void) untimeout(temp_id); 5421 mutex_enter(SD_MUTEX(un)); 5422 } 5423 5424 if (un->un_dcvb_timeid != NULL) { 5425 timeout_id_t temp_id = un->un_dcvb_timeid; 5426 un->un_dcvb_timeid = NULL; 5427 mutex_exit(SD_MUTEX(un)); 5428 (void) untimeout(temp_id); 5429 mutex_enter(SD_MUTEX(un)); 5430 } 5431 5432 mutex_enter(&un->un_pm_mutex); 5433 if (un->un_pm_timeid != NULL) { 5434 timeout_id_t temp_id = un->un_pm_timeid; 5435 un->un_pm_timeid = NULL; 5436 mutex_exit(&un->un_pm_mutex); 5437 mutex_exit(SD_MUTEX(un)); 5438 (void) untimeout(temp_id); 5439 mutex_enter(SD_MUTEX(un)); 5440 } else { 5441 mutex_exit(&un->un_pm_mutex); 5442 } 5443 5444 if (un->un_retry_timeid != NULL) { 5445 timeout_id_t temp_id = un->un_retry_timeid; 5446 un->un_retry_timeid = NULL; 5447 mutex_exit(SD_MUTEX(un)); 5448 (void) untimeout(temp_id); 5449 mutex_enter(SD_MUTEX(un)); 5450 } 5451 5452 if (un->un_direct_priority_timeid != NULL) { 5453 timeout_id_t temp_id = un->un_direct_priority_timeid; 5454 un->un_direct_priority_timeid = NULL; 5455 mutex_exit(SD_MUTEX(un)); 5456 (void) untimeout(temp_id); 5457 mutex_enter(SD_MUTEX(un)); 5458 } 5459 5460 if (un->un_f_is_fibre == TRUE) { 5461 /* 5462 * Remove callbacks for insert and remove events 5463 */ 5464 if (un->un_insert_event != NULL) { 5465 mutex_exit(SD_MUTEX(un)); 5466 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5467 mutex_enter(SD_MUTEX(un)); 5468 un->un_insert_event = NULL; 5469 } 5470 5471 if (un->un_remove_event != NULL) { 5472 mutex_exit(SD_MUTEX(un)); 5473 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5474 mutex_enter(SD_MUTEX(un)); 5475 un->un_remove_event = NULL; 5476 } 5477 } 5478 5479 mutex_exit(SD_MUTEX(un)); 5480 5481 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5482 5483 return (DDI_SUCCESS); 5484 } 5485 5486 5487 /* 5488 * Function: sd_ddi_pm_suspend 5489 * 5490 * Description: Set the drive state to low power. 5491 * Someone else is required to actually change the drive 5492 * power level. 5493 * 5494 * Arguments: un - driver soft state (unit) structure 5495 * 5496 * Return Code: DDI_FAILURE or DDI_SUCCESS 5497 * 5498 * Context: Kernel thread context 5499 */ 5500 5501 static int 5502 sd_ddi_pm_suspend(struct sd_lun *un) 5503 { 5504 ASSERT(un != NULL); 5505 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5506 5507 ASSERT(!mutex_owned(SD_MUTEX(un))); 5508 mutex_enter(SD_MUTEX(un)); 5509 5510 /* 5511 * Exit if power management is not enabled for this device, or if 5512 * the device is being used by HA. 5513 */ 5514 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5515 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5516 mutex_exit(SD_MUTEX(un)); 5517 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5518 return (DDI_SUCCESS); 5519 } 5520 5521 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5522 un->un_ncmds_in_driver); 5523 5524 /* 5525 * See if the device is not busy, ie.: 5526 * - we have no commands in the driver for this device 5527 * - not waiting for resources 5528 */ 5529 if ((un->un_ncmds_in_driver == 0) && 5530 (un->un_state != SD_STATE_RWAIT)) { 5531 /* 5532 * The device is not busy, so it is OK to go to low power state. 5533 * Indicate low power, but rely on someone else to actually 5534 * change it. 5535 */ 5536 mutex_enter(&un->un_pm_mutex); 5537 un->un_pm_count = -1; 5538 mutex_exit(&un->un_pm_mutex); 5539 un->un_power_level = SD_SPINDLE_OFF; 5540 } 5541 5542 mutex_exit(SD_MUTEX(un)); 5543 5544 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5545 5546 return (DDI_SUCCESS); 5547 } 5548 5549 5550 /* 5551 * Function: sd_ddi_resume 5552 * 5553 * Description: Performs system power-up operations.. 5554 * 5555 * Return Code: DDI_SUCCESS 5556 * DDI_FAILURE 5557 * 5558 * Context: Kernel thread context 5559 */ 5560 5561 static int 5562 sd_ddi_resume(dev_info_t *devi) 5563 { 5564 struct sd_lun *un; 5565 5566 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5567 if (un == NULL) { 5568 return (DDI_FAILURE); 5569 } 5570 5571 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5572 5573 mutex_enter(SD_MUTEX(un)); 5574 Restore_state(un); 5575 5576 /* 5577 * Restore the state which was saved to give the 5578 * the right state in un_last_state 5579 */ 5580 un->un_last_state = un->un_save_state; 5581 /* 5582 * Note: throttle comes back at full. 5583 * Also note: this MUST be done before calling pm_raise_power 5584 * otherwise the system can get hung in biowait. The scenario where 5585 * this'll happen is under cpr suspend. Writing of the system 5586 * state goes through sddump, which writes 0 to un_throttle. If 5587 * writing the system state then fails, example if the partition is 5588 * too small, then cpr attempts a resume. If throttle isn't restored 5589 * from the saved value until after calling pm_raise_power then 5590 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5591 * in biowait. 5592 */ 5593 un->un_throttle = un->un_saved_throttle; 5594 5595 /* 5596 * The chance of failure is very rare as the only command done in power 5597 * entry point is START command when you transition from 0->1 or 5598 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5599 * which suspend was done. Ignore the return value as the resume should 5600 * not be failed. In the case of removable media the media need not be 5601 * inserted and hence there is a chance that raise power will fail with 5602 * media not present. 5603 */ 5604 if (un->un_f_attach_spinup) { 5605 mutex_exit(SD_MUTEX(un)); 5606 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5607 mutex_enter(SD_MUTEX(un)); 5608 } 5609 5610 /* 5611 * Don't broadcast to the suspend cv and therefore possibly 5612 * start I/O until after power has been restored. 5613 */ 5614 cv_broadcast(&un->un_suspend_cv); 5615 cv_broadcast(&un->un_state_cv); 5616 5617 /* restart thread */ 5618 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5619 scsi_watch_resume(un->un_swr_token); 5620 } 5621 5622 #if (defined(__fibre)) 5623 if (un->un_f_is_fibre == TRUE) { 5624 /* 5625 * Add callbacks for insert and remove events 5626 */ 5627 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5628 sd_init_event_callbacks(un); 5629 } 5630 } 5631 #endif 5632 5633 /* 5634 * Transport any pending commands to the target. 5635 * 5636 * If this is a low-activity device commands in queue will have to wait 5637 * until new commands come in, which may take awhile. Also, we 5638 * specifically don't check un_ncmds_in_transport because we know that 5639 * there really are no commands in progress after the unit was 5640 * suspended and we could have reached the throttle level, been 5641 * suspended, and have no new commands coming in for awhile. Highly 5642 * unlikely, but so is the low-activity disk scenario. 5643 */ 5644 ddi_xbuf_dispatch(un->un_xbuf_attr); 5645 5646 sd_start_cmds(un, NULL); 5647 mutex_exit(SD_MUTEX(un)); 5648 5649 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5650 5651 return (DDI_SUCCESS); 5652 } 5653 5654 5655 /* 5656 * Function: sd_ddi_pm_resume 5657 * 5658 * Description: Set the drive state to powered on. 5659 * Someone else is required to actually change the drive 5660 * power level. 5661 * 5662 * Arguments: un - driver soft state (unit) structure 5663 * 5664 * Return Code: DDI_SUCCESS 5665 * 5666 * Context: Kernel thread context 5667 */ 5668 5669 static int 5670 sd_ddi_pm_resume(struct sd_lun *un) 5671 { 5672 ASSERT(un != NULL); 5673 5674 ASSERT(!mutex_owned(SD_MUTEX(un))); 5675 mutex_enter(SD_MUTEX(un)); 5676 un->un_power_level = SD_SPINDLE_ON; 5677 5678 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5679 mutex_enter(&un->un_pm_mutex); 5680 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5681 un->un_pm_count++; 5682 ASSERT(un->un_pm_count == 0); 5683 /* 5684 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5685 * un_suspend_cv is for a system resume, not a power management 5686 * device resume. (4297749) 5687 * cv_broadcast(&un->un_suspend_cv); 5688 */ 5689 } 5690 mutex_exit(&un->un_pm_mutex); 5691 mutex_exit(SD_MUTEX(un)); 5692 5693 return (DDI_SUCCESS); 5694 } 5695 5696 5697 /* 5698 * Function: sd_pm_idletimeout_handler 5699 * 5700 * Description: A timer routine that's active only while a device is busy. 5701 * The purpose is to extend slightly the pm framework's busy 5702 * view of the device to prevent busy/idle thrashing for 5703 * back-to-back commands. Do this by comparing the current time 5704 * to the time at which the last command completed and when the 5705 * difference is greater than sd_pm_idletime, call 5706 * pm_idle_component. In addition to indicating idle to the pm 5707 * framework, update the chain type to again use the internal pm 5708 * layers of the driver. 5709 * 5710 * Arguments: arg - driver soft state (unit) structure 5711 * 5712 * Context: Executes in a timeout(9F) thread context 5713 */ 5714 5715 static void 5716 sd_pm_idletimeout_handler(void *arg) 5717 { 5718 struct sd_lun *un = arg; 5719 5720 time_t now; 5721 5722 mutex_enter(&sd_detach_mutex); 5723 if (un->un_detach_count != 0) { 5724 /* Abort if the instance is detaching */ 5725 mutex_exit(&sd_detach_mutex); 5726 return; 5727 } 5728 mutex_exit(&sd_detach_mutex); 5729 5730 now = ddi_get_time(); 5731 /* 5732 * Grab both mutexes, in the proper order, since we're accessing 5733 * both PM and softstate variables. 5734 */ 5735 mutex_enter(SD_MUTEX(un)); 5736 mutex_enter(&un->un_pm_mutex); 5737 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5738 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5739 /* 5740 * Update the chain types. 5741 * This takes affect on the next new command received. 5742 */ 5743 if (un->un_f_non_devbsize_supported) { 5744 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5745 } else { 5746 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5747 } 5748 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5749 5750 SD_TRACE(SD_LOG_IO_PM, un, 5751 "sd_pm_idletimeout_handler: idling device\n"); 5752 (void) pm_idle_component(SD_DEVINFO(un), 0); 5753 un->un_pm_idle_timeid = NULL; 5754 } else { 5755 un->un_pm_idle_timeid = 5756 timeout(sd_pm_idletimeout_handler, un, 5757 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5758 } 5759 mutex_exit(&un->un_pm_mutex); 5760 mutex_exit(SD_MUTEX(un)); 5761 } 5762 5763 5764 /* 5765 * Function: sd_pm_timeout_handler 5766 * 5767 * Description: Callback to tell framework we are idle. 5768 * 5769 * Context: timeout(9f) thread context. 5770 */ 5771 5772 static void 5773 sd_pm_timeout_handler(void *arg) 5774 { 5775 struct sd_lun *un = arg; 5776 5777 (void) pm_idle_component(SD_DEVINFO(un), 0); 5778 mutex_enter(&un->un_pm_mutex); 5779 un->un_pm_timeid = NULL; 5780 mutex_exit(&un->un_pm_mutex); 5781 } 5782 5783 5784 /* 5785 * Function: sdpower 5786 * 5787 * Description: PM entry point. 5788 * 5789 * Return Code: DDI_SUCCESS 5790 * DDI_FAILURE 5791 * 5792 * Context: Kernel thread context 5793 */ 5794 5795 static int 5796 sdpower(dev_info_t *devi, int component, int level) 5797 { 5798 struct sd_lun *un; 5799 int instance; 5800 int rval = DDI_SUCCESS; 5801 uint_t i, log_page_size, maxcycles, ncycles; 5802 uchar_t *log_page_data; 5803 int log_sense_page; 5804 int medium_present; 5805 time_t intvlp; 5806 dev_t dev; 5807 struct pm_trans_data sd_pm_tran_data; 5808 uchar_t save_state; 5809 int sval; 5810 uchar_t state_before_pm; 5811 int got_semaphore_here; 5812 5813 instance = ddi_get_instance(devi); 5814 5815 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5816 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5817 component != 0) { 5818 return (DDI_FAILURE); 5819 } 5820 5821 dev = sd_make_device(SD_DEVINFO(un)); 5822 5823 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5824 5825 /* 5826 * Must synchronize power down with close. 5827 * Attempt to decrement/acquire the open/close semaphore, 5828 * but do NOT wait on it. If it's not greater than zero, 5829 * ie. it can't be decremented without waiting, then 5830 * someone else, either open or close, already has it 5831 * and the try returns 0. Use that knowledge here to determine 5832 * if it's OK to change the device power level. 5833 * Also, only increment it on exit if it was decremented, ie. gotten, 5834 * here. 5835 */ 5836 got_semaphore_here = sema_tryp(&un->un_semoclose); 5837 5838 mutex_enter(SD_MUTEX(un)); 5839 5840 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5841 un->un_ncmds_in_driver); 5842 5843 /* 5844 * If un_ncmds_in_driver is non-zero it indicates commands are 5845 * already being processed in the driver, or if the semaphore was 5846 * not gotten here it indicates an open or close is being processed. 5847 * At the same time somebody is requesting to go low power which 5848 * can't happen, therefore we need to return failure. 5849 */ 5850 if ((level == SD_SPINDLE_OFF) && 5851 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5852 mutex_exit(SD_MUTEX(un)); 5853 5854 if (got_semaphore_here != 0) { 5855 sema_v(&un->un_semoclose); 5856 } 5857 SD_TRACE(SD_LOG_IO_PM, un, 5858 "sdpower: exit, device has queued cmds.\n"); 5859 return (DDI_FAILURE); 5860 } 5861 5862 /* 5863 * if it is OFFLINE that means the disk is completely dead 5864 * in our case we have to put the disk in on or off by sending commands 5865 * Of course that will fail anyway so return back here. 5866 * 5867 * Power changes to a device that's OFFLINE or SUSPENDED 5868 * are not allowed. 5869 */ 5870 if ((un->un_state == SD_STATE_OFFLINE) || 5871 (un->un_state == SD_STATE_SUSPENDED)) { 5872 mutex_exit(SD_MUTEX(un)); 5873 5874 if (got_semaphore_here != 0) { 5875 sema_v(&un->un_semoclose); 5876 } 5877 SD_TRACE(SD_LOG_IO_PM, un, 5878 "sdpower: exit, device is off-line.\n"); 5879 return (DDI_FAILURE); 5880 } 5881 5882 /* 5883 * Change the device's state to indicate it's power level 5884 * is being changed. Do this to prevent a power off in the 5885 * middle of commands, which is especially bad on devices 5886 * that are really powered off instead of just spun down. 5887 */ 5888 state_before_pm = un->un_state; 5889 un->un_state = SD_STATE_PM_CHANGING; 5890 5891 mutex_exit(SD_MUTEX(un)); 5892 5893 /* 5894 * If "pm-capable" property is set to TRUE by HBA drivers, 5895 * bypass the following checking, otherwise, check the log 5896 * sense information for this device 5897 */ 5898 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5899 /* 5900 * Get the log sense information to understand whether the 5901 * the powercycle counts have gone beyond the threshhold. 5902 */ 5903 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5904 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5905 5906 mutex_enter(SD_MUTEX(un)); 5907 log_sense_page = un->un_start_stop_cycle_page; 5908 mutex_exit(SD_MUTEX(un)); 5909 5910 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5911 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5912 #ifdef SDDEBUG 5913 if (sd_force_pm_supported) { 5914 /* Force a successful result */ 5915 rval = 0; 5916 } 5917 #endif 5918 if (rval != 0) { 5919 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5920 "Log Sense Failed\n"); 5921 kmem_free(log_page_data, log_page_size); 5922 /* Cannot support power management on those drives */ 5923 5924 if (got_semaphore_here != 0) { 5925 sema_v(&un->un_semoclose); 5926 } 5927 /* 5928 * On exit put the state back to it's original value 5929 * and broadcast to anyone waiting for the power 5930 * change completion. 5931 */ 5932 mutex_enter(SD_MUTEX(un)); 5933 un->un_state = state_before_pm; 5934 cv_broadcast(&un->un_suspend_cv); 5935 mutex_exit(SD_MUTEX(un)); 5936 SD_TRACE(SD_LOG_IO_PM, un, 5937 "sdpower: exit, Log Sense Failed.\n"); 5938 return (DDI_FAILURE); 5939 } 5940 5941 /* 5942 * From the page data - Convert the essential information to 5943 * pm_trans_data 5944 */ 5945 maxcycles = 5946 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5947 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5948 5949 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5950 5951 ncycles = 5952 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5953 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5954 5955 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5956 5957 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5958 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5959 log_page_data[8+i]; 5960 } 5961 5962 kmem_free(log_page_data, log_page_size); 5963 5964 /* 5965 * Call pm_trans_check routine to get the Ok from 5966 * the global policy 5967 */ 5968 5969 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5970 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5971 5972 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5973 #ifdef SDDEBUG 5974 if (sd_force_pm_supported) { 5975 /* Force a successful result */ 5976 rval = 1; 5977 } 5978 #endif 5979 switch (rval) { 5980 case 0: 5981 /* 5982 * Not Ok to Power cycle or error in parameters passed 5983 * Would have given the advised time to consider power 5984 * cycle. Based on the new intvlp parameter we are 5985 * supposed to pretend we are busy so that pm framework 5986 * will never call our power entry point. Because of 5987 * that install a timeout handler and wait for the 5988 * recommended time to elapse so that power management 5989 * can be effective again. 5990 * 5991 * To effect this behavior, call pm_busy_component to 5992 * indicate to the framework this device is busy. 5993 * By not adjusting un_pm_count the rest of PM in 5994 * the driver will function normally, and independant 5995 * of this but because the framework is told the device 5996 * is busy it won't attempt powering down until it gets 5997 * a matching idle. The timeout handler sends this. 5998 * Note: sd_pm_entry can't be called here to do this 5999 * because sdpower may have been called as a result 6000 * of a call to pm_raise_power from within sd_pm_entry. 6001 * 6002 * If a timeout handler is already active then 6003 * don't install another. 6004 */ 6005 mutex_enter(&un->un_pm_mutex); 6006 if (un->un_pm_timeid == NULL) { 6007 un->un_pm_timeid = 6008 timeout(sd_pm_timeout_handler, 6009 un, intvlp * drv_usectohz(1000000)); 6010 mutex_exit(&un->un_pm_mutex); 6011 (void) pm_busy_component(SD_DEVINFO(un), 0); 6012 } else { 6013 mutex_exit(&un->un_pm_mutex); 6014 } 6015 if (got_semaphore_here != 0) { 6016 sema_v(&un->un_semoclose); 6017 } 6018 /* 6019 * On exit put the state back to it's original value 6020 * and broadcast to anyone waiting for the power 6021 * change completion. 6022 */ 6023 mutex_enter(SD_MUTEX(un)); 6024 un->un_state = state_before_pm; 6025 cv_broadcast(&un->un_suspend_cv); 6026 mutex_exit(SD_MUTEX(un)); 6027 6028 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6029 "trans check Failed, not ok to power cycle.\n"); 6030 return (DDI_FAILURE); 6031 6032 case -1: 6033 if (got_semaphore_here != 0) { 6034 sema_v(&un->un_semoclose); 6035 } 6036 /* 6037 * On exit put the state back to it's original value 6038 * and broadcast to anyone waiting for the power 6039 * change completion. 6040 */ 6041 mutex_enter(SD_MUTEX(un)); 6042 un->un_state = state_before_pm; 6043 cv_broadcast(&un->un_suspend_cv); 6044 mutex_exit(SD_MUTEX(un)); 6045 SD_TRACE(SD_LOG_IO_PM, un, 6046 "sdpower: exit, trans check command Failed.\n"); 6047 return (DDI_FAILURE); 6048 } 6049 } 6050 6051 if (level == SD_SPINDLE_OFF) { 6052 /* 6053 * Save the last state... if the STOP FAILS we need it 6054 * for restoring 6055 */ 6056 mutex_enter(SD_MUTEX(un)); 6057 save_state = un->un_last_state; 6058 /* 6059 * There must not be any cmds. getting processed 6060 * in the driver when we get here. Power to the 6061 * device is potentially going off. 6062 */ 6063 ASSERT(un->un_ncmds_in_driver == 0); 6064 mutex_exit(SD_MUTEX(un)); 6065 6066 /* 6067 * For now suspend the device completely before spindle is 6068 * turned off 6069 */ 6070 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6071 if (got_semaphore_here != 0) { 6072 sema_v(&un->un_semoclose); 6073 } 6074 /* 6075 * On exit put the state back to it's original value 6076 * and broadcast to anyone waiting for the power 6077 * change completion. 6078 */ 6079 mutex_enter(SD_MUTEX(un)); 6080 un->un_state = state_before_pm; 6081 cv_broadcast(&un->un_suspend_cv); 6082 mutex_exit(SD_MUTEX(un)); 6083 SD_TRACE(SD_LOG_IO_PM, un, 6084 "sdpower: exit, PM suspend Failed.\n"); 6085 return (DDI_FAILURE); 6086 } 6087 } 6088 6089 /* 6090 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6091 * close, or strategy. Dump no long uses this routine, it uses it's 6092 * own code so it can be done in polled mode. 6093 */ 6094 6095 medium_present = TRUE; 6096 6097 /* 6098 * When powering up, issue a TUR in case the device is at unit 6099 * attention. Don't do retries. Bypass the PM layer, otherwise 6100 * a deadlock on un_pm_busy_cv will occur. 6101 */ 6102 if (level == SD_SPINDLE_ON) { 6103 (void) sd_send_scsi_TEST_UNIT_READY(un, 6104 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6105 } 6106 6107 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6108 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6109 6110 sval = sd_send_scsi_START_STOP_UNIT(un, 6111 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6112 SD_PATH_DIRECT); 6113 /* Command failed, check for media present. */ 6114 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6115 medium_present = FALSE; 6116 } 6117 6118 /* 6119 * The conditions of interest here are: 6120 * if a spindle off with media present fails, 6121 * then restore the state and return an error. 6122 * else if a spindle on fails, 6123 * then return an error (there's no state to restore). 6124 * In all other cases we setup for the new state 6125 * and return success. 6126 */ 6127 switch (level) { 6128 case SD_SPINDLE_OFF: 6129 if ((medium_present == TRUE) && (sval != 0)) { 6130 /* The stop command from above failed */ 6131 rval = DDI_FAILURE; 6132 /* 6133 * The stop command failed, and we have media 6134 * present. Put the level back by calling the 6135 * sd_pm_resume() and set the state back to 6136 * it's previous value. 6137 */ 6138 (void) sd_ddi_pm_resume(un); 6139 mutex_enter(SD_MUTEX(un)); 6140 un->un_last_state = save_state; 6141 mutex_exit(SD_MUTEX(un)); 6142 break; 6143 } 6144 /* 6145 * The stop command from above succeeded. 6146 */ 6147 if (un->un_f_monitor_media_state) { 6148 /* 6149 * Terminate watch thread in case of removable media 6150 * devices going into low power state. This is as per 6151 * the requirements of pm framework, otherwise commands 6152 * will be generated for the device (through watch 6153 * thread), even when the device is in low power state. 6154 */ 6155 mutex_enter(SD_MUTEX(un)); 6156 un->un_f_watcht_stopped = FALSE; 6157 if (un->un_swr_token != NULL) { 6158 opaque_t temp_token = un->un_swr_token; 6159 un->un_f_watcht_stopped = TRUE; 6160 un->un_swr_token = NULL; 6161 mutex_exit(SD_MUTEX(un)); 6162 (void) scsi_watch_request_terminate(temp_token, 6163 SCSI_WATCH_TERMINATE_WAIT); 6164 } else { 6165 mutex_exit(SD_MUTEX(un)); 6166 } 6167 } 6168 break; 6169 6170 default: /* The level requested is spindle on... */ 6171 /* 6172 * Legacy behavior: return success on a failed spinup 6173 * if there is no media in the drive. 6174 * Do this by looking at medium_present here. 6175 */ 6176 if ((sval != 0) && medium_present) { 6177 /* The start command from above failed */ 6178 rval = DDI_FAILURE; 6179 break; 6180 } 6181 /* 6182 * The start command from above succeeded 6183 * Resume the devices now that we have 6184 * started the disks 6185 */ 6186 (void) sd_ddi_pm_resume(un); 6187 6188 /* 6189 * Resume the watch thread since it was suspended 6190 * when the device went into low power mode. 6191 */ 6192 if (un->un_f_monitor_media_state) { 6193 mutex_enter(SD_MUTEX(un)); 6194 if (un->un_f_watcht_stopped == TRUE) { 6195 opaque_t temp_token; 6196 6197 un->un_f_watcht_stopped = FALSE; 6198 mutex_exit(SD_MUTEX(un)); 6199 temp_token = scsi_watch_request_submit( 6200 SD_SCSI_DEVP(un), 6201 sd_check_media_time, 6202 SENSE_LENGTH, sd_media_watch_cb, 6203 (caddr_t)dev); 6204 mutex_enter(SD_MUTEX(un)); 6205 un->un_swr_token = temp_token; 6206 } 6207 mutex_exit(SD_MUTEX(un)); 6208 } 6209 } 6210 if (got_semaphore_here != 0) { 6211 sema_v(&un->un_semoclose); 6212 } 6213 /* 6214 * On exit put the state back to it's original value 6215 * and broadcast to anyone waiting for the power 6216 * change completion. 6217 */ 6218 mutex_enter(SD_MUTEX(un)); 6219 un->un_state = state_before_pm; 6220 cv_broadcast(&un->un_suspend_cv); 6221 mutex_exit(SD_MUTEX(un)); 6222 6223 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6224 6225 return (rval); 6226 } 6227 6228 6229 6230 /* 6231 * Function: sdattach 6232 * 6233 * Description: Driver's attach(9e) entry point function. 6234 * 6235 * Arguments: devi - opaque device info handle 6236 * cmd - attach type 6237 * 6238 * Return Code: DDI_SUCCESS 6239 * DDI_FAILURE 6240 * 6241 * Context: Kernel thread context 6242 */ 6243 6244 static int 6245 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6246 { 6247 switch (cmd) { 6248 case DDI_ATTACH: 6249 return (sd_unit_attach(devi)); 6250 case DDI_RESUME: 6251 return (sd_ddi_resume(devi)); 6252 default: 6253 break; 6254 } 6255 return (DDI_FAILURE); 6256 } 6257 6258 6259 /* 6260 * Function: sddetach 6261 * 6262 * Description: Driver's detach(9E) entry point function. 6263 * 6264 * Arguments: devi - opaque device info handle 6265 * cmd - detach type 6266 * 6267 * Return Code: DDI_SUCCESS 6268 * DDI_FAILURE 6269 * 6270 * Context: Kernel thread context 6271 */ 6272 6273 static int 6274 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6275 { 6276 switch (cmd) { 6277 case DDI_DETACH: 6278 return (sd_unit_detach(devi)); 6279 case DDI_SUSPEND: 6280 return (sd_ddi_suspend(devi)); 6281 default: 6282 break; 6283 } 6284 return (DDI_FAILURE); 6285 } 6286 6287 6288 /* 6289 * Function: sd_sync_with_callback 6290 * 6291 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6292 * state while the callback routine is active. 6293 * 6294 * Arguments: un: softstate structure for the instance 6295 * 6296 * Context: Kernel thread context 6297 */ 6298 6299 static void 6300 sd_sync_with_callback(struct sd_lun *un) 6301 { 6302 ASSERT(un != NULL); 6303 6304 mutex_enter(SD_MUTEX(un)); 6305 6306 ASSERT(un->un_in_callback >= 0); 6307 6308 while (un->un_in_callback > 0) { 6309 mutex_exit(SD_MUTEX(un)); 6310 delay(2); 6311 mutex_enter(SD_MUTEX(un)); 6312 } 6313 6314 mutex_exit(SD_MUTEX(un)); 6315 } 6316 6317 /* 6318 * Function: sd_unit_attach 6319 * 6320 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6321 * the soft state structure for the device and performs 6322 * all necessary structure and device initializations. 6323 * 6324 * Arguments: devi: the system's dev_info_t for the device. 6325 * 6326 * Return Code: DDI_SUCCESS if attach is successful. 6327 * DDI_FAILURE if any part of the attach fails. 6328 * 6329 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6330 * Kernel thread context only. Can sleep. 6331 */ 6332 6333 static int 6334 sd_unit_attach(dev_info_t *devi) 6335 { 6336 struct scsi_device *devp; 6337 struct sd_lun *un; 6338 char *variantp; 6339 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6340 int instance; 6341 int rval; 6342 int wc_enabled; 6343 int tgt; 6344 uint64_t capacity; 6345 uint_t lbasize = 0; 6346 dev_info_t *pdip = ddi_get_parent(devi); 6347 int offbyone = 0; 6348 int geom_label_valid = 0; 6349 6350 /* 6351 * Retrieve the target driver's private data area. This was set 6352 * up by the HBA. 6353 */ 6354 devp = ddi_get_driver_private(devi); 6355 6356 /* 6357 * Retrieve the target ID of the device. 6358 */ 6359 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6360 SCSI_ADDR_PROP_TARGET, -1); 6361 6362 /* 6363 * Since we have no idea what state things were left in by the last 6364 * user of the device, set up some 'default' settings, ie. turn 'em 6365 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6366 * Do this before the scsi_probe, which sends an inquiry. 6367 * This is a fix for bug (4430280). 6368 * Of special importance is wide-xfer. The drive could have been left 6369 * in wide transfer mode by the last driver to communicate with it, 6370 * this includes us. If that's the case, and if the following is not 6371 * setup properly or we don't re-negotiate with the drive prior to 6372 * transferring data to/from the drive, it causes bus parity errors, 6373 * data overruns, and unexpected interrupts. This first occurred when 6374 * the fix for bug (4378686) was made. 6375 */ 6376 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6377 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6378 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6379 6380 /* 6381 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6382 * on a target. Setting it per lun instance actually sets the 6383 * capability of this target, which affects those luns already 6384 * attached on the same target. So during attach, we can only disable 6385 * this capability only when no other lun has been attached on this 6386 * target. By doing this, we assume a target has the same tagged-qing 6387 * capability for every lun. The condition can be removed when HBA 6388 * is changed to support per lun based tagged-qing capability. 6389 */ 6390 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6391 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6392 } 6393 6394 /* 6395 * Use scsi_probe() to issue an INQUIRY command to the device. 6396 * This call will allocate and fill in the scsi_inquiry structure 6397 * and point the sd_inq member of the scsi_device structure to it. 6398 * If the attach succeeds, then this memory will not be de-allocated 6399 * (via scsi_unprobe()) until the instance is detached. 6400 */ 6401 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6402 goto probe_failed; 6403 } 6404 6405 /* 6406 * Check the device type as specified in the inquiry data and 6407 * claim it if it is of a type that we support. 6408 */ 6409 switch (devp->sd_inq->inq_dtype) { 6410 case DTYPE_DIRECT: 6411 break; 6412 case DTYPE_RODIRECT: 6413 break; 6414 case DTYPE_OPTICAL: 6415 break; 6416 case DTYPE_NOTPRESENT: 6417 default: 6418 /* Unsupported device type; fail the attach. */ 6419 goto probe_failed; 6420 } 6421 6422 /* 6423 * Allocate the soft state structure for this unit. 6424 * 6425 * We rely upon this memory being set to all zeroes by 6426 * ddi_soft_state_zalloc(). We assume that any member of the 6427 * soft state structure that is not explicitly initialized by 6428 * this routine will have a value of zero. 6429 */ 6430 instance = ddi_get_instance(devp->sd_dev); 6431 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6432 goto probe_failed; 6433 } 6434 6435 /* 6436 * Retrieve a pointer to the newly-allocated soft state. 6437 * 6438 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6439 * was successful, unless something has gone horribly wrong and the 6440 * ddi's soft state internals are corrupt (in which case it is 6441 * probably better to halt here than just fail the attach....) 6442 */ 6443 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6444 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6445 instance); 6446 /*NOTREACHED*/ 6447 } 6448 6449 /* 6450 * Link the back ptr of the driver soft state to the scsi_device 6451 * struct for this lun. 6452 * Save a pointer to the softstate in the driver-private area of 6453 * the scsi_device struct. 6454 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6455 * we first set un->un_sd below. 6456 */ 6457 un->un_sd = devp; 6458 devp->sd_private = (opaque_t)un; 6459 6460 /* 6461 * The following must be after devp is stored in the soft state struct. 6462 */ 6463 #ifdef SDDEBUG 6464 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6465 "%s_unit_attach: un:0x%p instance:%d\n", 6466 ddi_driver_name(devi), un, instance); 6467 #endif 6468 6469 /* 6470 * Set up the device type and node type (for the minor nodes). 6471 * By default we assume that the device can at least support the 6472 * Common Command Set. Call it a CD-ROM if it reports itself 6473 * as a RODIRECT device. 6474 */ 6475 switch (devp->sd_inq->inq_dtype) { 6476 case DTYPE_RODIRECT: 6477 un->un_node_type = DDI_NT_CD_CHAN; 6478 un->un_ctype = CTYPE_CDROM; 6479 break; 6480 case DTYPE_OPTICAL: 6481 un->un_node_type = DDI_NT_BLOCK_CHAN; 6482 un->un_ctype = CTYPE_ROD; 6483 break; 6484 default: 6485 un->un_node_type = DDI_NT_BLOCK_CHAN; 6486 un->un_ctype = CTYPE_CCS; 6487 break; 6488 } 6489 6490 /* 6491 * Try to read the interconnect type from the HBA. 6492 * 6493 * Note: This driver is currently compiled as two binaries, a parallel 6494 * scsi version (sd) and a fibre channel version (ssd). All functional 6495 * differences are determined at compile time. In the future a single 6496 * binary will be provided and the inteconnect type will be used to 6497 * differentiate between fibre and parallel scsi behaviors. At that time 6498 * it will be necessary for all fibre channel HBAs to support this 6499 * property. 6500 * 6501 * set un_f_is_fiber to TRUE ( default fiber ) 6502 */ 6503 un->un_f_is_fibre = TRUE; 6504 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6505 case INTERCONNECT_SSA: 6506 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6507 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6508 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6509 break; 6510 case INTERCONNECT_PARALLEL: 6511 un->un_f_is_fibre = FALSE; 6512 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6513 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6514 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6515 break; 6516 case INTERCONNECT_SATA: 6517 un->un_f_is_fibre = FALSE; 6518 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6520 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6521 break; 6522 case INTERCONNECT_FIBRE: 6523 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6524 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6525 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6526 break; 6527 case INTERCONNECT_FABRIC: 6528 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6529 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6530 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6531 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6532 break; 6533 default: 6534 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6535 /* 6536 * The HBA does not support the "interconnect-type" property 6537 * (or did not provide a recognized type). 6538 * 6539 * Note: This will be obsoleted when a single fibre channel 6540 * and parallel scsi driver is delivered. In the meantime the 6541 * interconnect type will be set to the platform default.If that 6542 * type is not parallel SCSI, it means that we should be 6543 * assuming "ssd" semantics. However, here this also means that 6544 * the FC HBA is not supporting the "interconnect-type" property 6545 * like we expect it to, so log this occurrence. 6546 */ 6547 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6548 if (!SD_IS_PARALLEL_SCSI(un)) { 6549 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6550 "sd_unit_attach: un:0x%p Assuming " 6551 "INTERCONNECT_FIBRE\n", un); 6552 } else { 6553 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6554 "sd_unit_attach: un:0x%p Assuming " 6555 "INTERCONNECT_PARALLEL\n", un); 6556 un->un_f_is_fibre = FALSE; 6557 } 6558 #else 6559 /* 6560 * Note: This source will be implemented when a single fibre 6561 * channel and parallel scsi driver is delivered. The default 6562 * will be to assume that if a device does not support the 6563 * "interconnect-type" property it is a parallel SCSI HBA and 6564 * we will set the interconnect type for parallel scsi. 6565 */ 6566 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6567 un->un_f_is_fibre = FALSE; 6568 #endif 6569 break; 6570 } 6571 6572 if (un->un_f_is_fibre == TRUE) { 6573 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6574 SCSI_VERSION_3) { 6575 switch (un->un_interconnect_type) { 6576 case SD_INTERCONNECT_FIBRE: 6577 case SD_INTERCONNECT_SSA: 6578 un->un_node_type = DDI_NT_BLOCK_WWN; 6579 break; 6580 default: 6581 break; 6582 } 6583 } 6584 } 6585 6586 /* 6587 * Initialize the Request Sense command for the target 6588 */ 6589 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6590 goto alloc_rqs_failed; 6591 } 6592 6593 /* 6594 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6595 * with separate binary for sd and ssd. 6596 * 6597 * x86 has 1 binary, un_retry_count is set base on connection type. 6598 * The hardcoded values will go away when Sparc uses 1 binary 6599 * for sd and ssd. This hardcoded values need to match 6600 * SD_RETRY_COUNT in sddef.h 6601 * The value used is base on interconnect type. 6602 * fibre = 3, parallel = 5 6603 */ 6604 #if defined(__i386) || defined(__amd64) 6605 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6606 #else 6607 un->un_retry_count = SD_RETRY_COUNT; 6608 #endif 6609 6610 /* 6611 * Set the per disk retry count to the default number of retries 6612 * for disks and CDROMs. This value can be overridden by the 6613 * disk property list or an entry in sd.conf. 6614 */ 6615 un->un_notready_retry_count = 6616 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6617 : DISK_NOT_READY_RETRY_COUNT(un); 6618 6619 /* 6620 * Set the busy retry count to the default value of un_retry_count. 6621 * This can be overridden by entries in sd.conf or the device 6622 * config table. 6623 */ 6624 un->un_busy_retry_count = un->un_retry_count; 6625 6626 /* 6627 * Init the reset threshold for retries. This number determines 6628 * how many retries must be performed before a reset can be issued 6629 * (for certain error conditions). This can be overridden by entries 6630 * in sd.conf or the device config table. 6631 */ 6632 un->un_reset_retry_count = (un->un_retry_count / 2); 6633 6634 /* 6635 * Set the victim_retry_count to the default un_retry_count 6636 */ 6637 un->un_victim_retry_count = (2 * un->un_retry_count); 6638 6639 /* 6640 * Set the reservation release timeout to the default value of 6641 * 5 seconds. This can be overridden by entries in ssd.conf or the 6642 * device config table. 6643 */ 6644 un->un_reserve_release_time = 5; 6645 6646 /* 6647 * Set up the default maximum transfer size. Note that this may 6648 * get updated later in the attach, when setting up default wide 6649 * operations for disks. 6650 */ 6651 #if defined(__i386) || defined(__amd64) 6652 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6653 #else 6654 un->un_max_xfer_size = (uint_t)maxphys; 6655 #endif 6656 6657 /* 6658 * Get "allow bus device reset" property (defaults to "enabled" if 6659 * the property was not defined). This is to disable bus resets for 6660 * certain kinds of error recovery. Note: In the future when a run-time 6661 * fibre check is available the soft state flag should default to 6662 * enabled. 6663 */ 6664 if (un->un_f_is_fibre == TRUE) { 6665 un->un_f_allow_bus_device_reset = TRUE; 6666 } else { 6667 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6668 "allow-bus-device-reset", 1) != 0) { 6669 un->un_f_allow_bus_device_reset = TRUE; 6670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6671 "sd_unit_attach: un:0x%p Bus device reset " 6672 "enabled\n", un); 6673 } else { 6674 un->un_f_allow_bus_device_reset = FALSE; 6675 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6676 "sd_unit_attach: un:0x%p Bus device reset " 6677 "disabled\n", un); 6678 } 6679 } 6680 6681 /* 6682 * Check if this is an ATAPI device. ATAPI devices use Group 1 6683 * Read/Write commands and Group 2 Mode Sense/Select commands. 6684 * 6685 * Note: The "obsolete" way of doing this is to check for the "atapi" 6686 * property. The new "variant" property with a value of "atapi" has been 6687 * introduced so that future 'variants' of standard SCSI behavior (like 6688 * atapi) could be specified by the underlying HBA drivers by supplying 6689 * a new value for the "variant" property, instead of having to define a 6690 * new property. 6691 */ 6692 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6693 un->un_f_cfg_is_atapi = TRUE; 6694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6695 "sd_unit_attach: un:0x%p Atapi device\n", un); 6696 } 6697 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6698 &variantp) == DDI_PROP_SUCCESS) { 6699 if (strcmp(variantp, "atapi") == 0) { 6700 un->un_f_cfg_is_atapi = TRUE; 6701 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6702 "sd_unit_attach: un:0x%p Atapi device\n", un); 6703 } 6704 ddi_prop_free(variantp); 6705 } 6706 6707 un->un_cmd_timeout = SD_IO_TIME; 6708 6709 /* Info on current states, statuses, etc. (Updated frequently) */ 6710 un->un_state = SD_STATE_NORMAL; 6711 un->un_last_state = SD_STATE_NORMAL; 6712 6713 /* Control & status info for command throttling */ 6714 un->un_throttle = sd_max_throttle; 6715 un->un_saved_throttle = sd_max_throttle; 6716 un->un_min_throttle = sd_min_throttle; 6717 6718 if (un->un_f_is_fibre == TRUE) { 6719 un->un_f_use_adaptive_throttle = TRUE; 6720 } else { 6721 un->un_f_use_adaptive_throttle = FALSE; 6722 } 6723 6724 /* Removable media support. */ 6725 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6726 un->un_mediastate = DKIO_NONE; 6727 un->un_specified_mediastate = DKIO_NONE; 6728 6729 /* CVs for suspend/resume (PM or DR) */ 6730 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6731 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6732 6733 /* Power management support. */ 6734 un->un_power_level = SD_SPINDLE_UNINIT; 6735 6736 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6737 un->un_f_wcc_inprog = 0; 6738 6739 /* 6740 * The open/close semaphore is used to serialize threads executing 6741 * in the driver's open & close entry point routines for a given 6742 * instance. 6743 */ 6744 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6745 6746 /* 6747 * The conf file entry and softstate variable is a forceful override, 6748 * meaning a non-zero value must be entered to change the default. 6749 */ 6750 un->un_f_disksort_disabled = FALSE; 6751 6752 /* 6753 * Retrieve the properties from the static driver table or the driver 6754 * configuration file (.conf) for this unit and update the soft state 6755 * for the device as needed for the indicated properties. 6756 * Note: the property configuration needs to occur here as some of the 6757 * following routines may have dependancies on soft state flags set 6758 * as part of the driver property configuration. 6759 */ 6760 sd_read_unit_properties(un); 6761 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6762 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6763 6764 /* 6765 * Only if a device has "hotpluggable" property, it is 6766 * treated as hotpluggable device. Otherwise, it is 6767 * regarded as non-hotpluggable one. 6768 */ 6769 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6770 -1) != -1) { 6771 un->un_f_is_hotpluggable = TRUE; 6772 } 6773 6774 /* 6775 * set unit's attributes(flags) according to "hotpluggable" and 6776 * RMB bit in INQUIRY data. 6777 */ 6778 sd_set_unit_attributes(un, devi); 6779 6780 /* 6781 * By default, we mark the capacity, lbasize, and geometry 6782 * as invalid. Only if we successfully read a valid capacity 6783 * will we update the un_blockcount and un_tgt_blocksize with the 6784 * valid values (the geometry will be validated later). 6785 */ 6786 un->un_f_blockcount_is_valid = FALSE; 6787 un->un_f_tgt_blocksize_is_valid = FALSE; 6788 6789 /* 6790 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6791 * otherwise. 6792 */ 6793 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6794 un->un_blockcount = 0; 6795 6796 /* 6797 * Set up the per-instance info needed to determine the correct 6798 * CDBs and other info for issuing commands to the target. 6799 */ 6800 sd_init_cdb_limits(un); 6801 6802 /* 6803 * Set up the IO chains to use, based upon the target type. 6804 */ 6805 if (un->un_f_non_devbsize_supported) { 6806 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6807 } else { 6808 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6809 } 6810 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6811 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6812 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6813 6814 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6815 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6816 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6817 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6818 6819 6820 if (ISCD(un)) { 6821 un->un_additional_codes = sd_additional_codes; 6822 } else { 6823 un->un_additional_codes = NULL; 6824 } 6825 6826 /* 6827 * Create the kstats here so they can be available for attach-time 6828 * routines that send commands to the unit (either polled or via 6829 * sd_send_scsi_cmd). 6830 * 6831 * Note: This is a critical sequence that needs to be maintained: 6832 * 1) Instantiate the kstats here, before any routines using the 6833 * iopath (i.e. sd_send_scsi_cmd). 6834 * 2) Instantiate and initialize the partition stats 6835 * (sd_set_pstats). 6836 * 3) Initialize the error stats (sd_set_errstats), following 6837 * sd_validate_geometry(),sd_register_devid(), 6838 * and sd_cache_control(). 6839 */ 6840 6841 un->un_stats = kstat_create(sd_label, instance, 6842 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6843 if (un->un_stats != NULL) { 6844 un->un_stats->ks_lock = SD_MUTEX(un); 6845 kstat_install(un->un_stats); 6846 } 6847 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6848 "sd_unit_attach: un:0x%p un_stats created\n", un); 6849 6850 sd_create_errstats(un, instance); 6851 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6852 "sd_unit_attach: un:0x%p errstats created\n", un); 6853 6854 /* 6855 * The following if/else code was relocated here from below as part 6856 * of the fix for bug (4430280). However with the default setup added 6857 * on entry to this routine, it's no longer absolutely necessary for 6858 * this to be before the call to sd_spin_up_unit. 6859 */ 6860 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6861 /* 6862 * If SCSI-2 tagged queueing is supported by the target 6863 * and by the host adapter then we will enable it. 6864 */ 6865 un->un_tagflags = 0; 6866 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6867 (devp->sd_inq->inq_cmdque) && 6868 (un->un_f_arq_enabled == TRUE)) { 6869 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6870 1, 1) == 1) { 6871 un->un_tagflags = FLAG_STAG; 6872 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6873 "sd_unit_attach: un:0x%p tag queueing " 6874 "enabled\n", un); 6875 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6876 "untagged-qing", 0) == 1) { 6877 un->un_f_opt_queueing = TRUE; 6878 un->un_saved_throttle = un->un_throttle = 6879 min(un->un_throttle, 3); 6880 } else { 6881 un->un_f_opt_queueing = FALSE; 6882 un->un_saved_throttle = un->un_throttle = 1; 6883 } 6884 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6885 == 1) && (un->un_f_arq_enabled == TRUE)) { 6886 /* The Host Adapter supports internal queueing. */ 6887 un->un_f_opt_queueing = TRUE; 6888 un->un_saved_throttle = un->un_throttle = 6889 min(un->un_throttle, 3); 6890 } else { 6891 un->un_f_opt_queueing = FALSE; 6892 un->un_saved_throttle = un->un_throttle = 1; 6893 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6894 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6895 } 6896 6897 /* 6898 * Enable large transfers for SATA/SAS drives 6899 */ 6900 if (SD_IS_SERIAL(un)) { 6901 un->un_max_xfer_size = 6902 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6903 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6904 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6905 "sd_unit_attach: un:0x%p max transfer " 6906 "size=0x%x\n", un, un->un_max_xfer_size); 6907 6908 } 6909 6910 /* Setup or tear down default wide operations for disks */ 6911 6912 /* 6913 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6914 * and "ssd_max_xfer_size" to exist simultaneously on the same 6915 * system and be set to different values. In the future this 6916 * code may need to be updated when the ssd module is 6917 * obsoleted and removed from the system. (4299588) 6918 */ 6919 if (SD_IS_PARALLEL_SCSI(un) && 6920 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6921 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6922 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6923 1, 1) == 1) { 6924 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6925 "sd_unit_attach: un:0x%p Wide Transfer " 6926 "enabled\n", un); 6927 } 6928 6929 /* 6930 * If tagged queuing has also been enabled, then 6931 * enable large xfers 6932 */ 6933 if (un->un_saved_throttle == sd_max_throttle) { 6934 un->un_max_xfer_size = 6935 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6936 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6937 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6938 "sd_unit_attach: un:0x%p max transfer " 6939 "size=0x%x\n", un, un->un_max_xfer_size); 6940 } 6941 } else { 6942 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6943 0, 1) == 1) { 6944 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6945 "sd_unit_attach: un:0x%p " 6946 "Wide Transfer disabled\n", un); 6947 } 6948 } 6949 } else { 6950 un->un_tagflags = FLAG_STAG; 6951 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6952 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6953 } 6954 6955 /* 6956 * If this target supports LUN reset, try to enable it. 6957 */ 6958 if (un->un_f_lun_reset_enabled) { 6959 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6960 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6961 "un:0x%p lun_reset capability set\n", un); 6962 } else { 6963 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6964 "un:0x%p lun-reset capability not set\n", un); 6965 } 6966 } 6967 6968 /* 6969 * At this point in the attach, we have enough info in the 6970 * soft state to be able to issue commands to the target. 6971 * 6972 * All command paths used below MUST issue their commands as 6973 * SD_PATH_DIRECT. This is important as intermediate layers 6974 * are not all initialized yet (such as PM). 6975 */ 6976 6977 /* 6978 * Send a TEST UNIT READY command to the device. This should clear 6979 * any outstanding UNIT ATTENTION that may be present. 6980 * 6981 * Note: Don't check for success, just track if there is a reservation, 6982 * this is a throw away command to clear any unit attentions. 6983 * 6984 * Note: This MUST be the first command issued to the target during 6985 * attach to ensure power on UNIT ATTENTIONS are cleared. 6986 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6987 * with attempts at spinning up a device with no media. 6988 */ 6989 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6990 reservation_flag = SD_TARGET_IS_RESERVED; 6991 } 6992 6993 /* 6994 * If the device is NOT a removable media device, attempt to spin 6995 * it up (using the START_STOP_UNIT command) and read its capacity 6996 * (using the READ CAPACITY command). Note, however, that either 6997 * of these could fail and in some cases we would continue with 6998 * the attach despite the failure (see below). 6999 */ 7000 if (un->un_f_descr_format_supported) { 7001 switch (sd_spin_up_unit(un)) { 7002 case 0: 7003 /* 7004 * Spin-up was successful; now try to read the 7005 * capacity. If successful then save the results 7006 * and mark the capacity & lbasize as valid. 7007 */ 7008 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7009 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7010 7011 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7012 &lbasize, SD_PATH_DIRECT)) { 7013 case 0: { 7014 if (capacity > DK_MAX_BLOCKS) { 7015 #ifdef _LP64 7016 if (capacity + 1 > 7017 SD_GROUP1_MAX_ADDRESS) { 7018 /* 7019 * Enable descriptor format 7020 * sense data so that we can 7021 * get 64 bit sense data 7022 * fields. 7023 */ 7024 sd_enable_descr_sense(un); 7025 } 7026 #else 7027 /* 32-bit kernels can't handle this */ 7028 scsi_log(SD_DEVINFO(un), 7029 sd_label, CE_WARN, 7030 "disk has %llu blocks, which " 7031 "is too large for a 32-bit " 7032 "kernel", capacity); 7033 7034 #if defined(__i386) || defined(__amd64) 7035 /* 7036 * 1TB disk was treated as (1T - 512)B 7037 * in the past, so that it might have 7038 * valid VTOC and solaris partitions, 7039 * we have to allow it to continue to 7040 * work. 7041 */ 7042 if (capacity -1 > DK_MAX_BLOCKS) 7043 #endif 7044 goto spinup_failed; 7045 #endif 7046 } 7047 7048 /* 7049 * Here it's not necessary to check the case: 7050 * the capacity of the device is bigger than 7051 * what the max hba cdb can support. Because 7052 * sd_send_scsi_READ_CAPACITY will retrieve 7053 * the capacity by sending USCSI command, which 7054 * is constrained by the max hba cdb. Actually, 7055 * sd_send_scsi_READ_CAPACITY will return 7056 * EINVAL when using bigger cdb than required 7057 * cdb length. Will handle this case in 7058 * "case EINVAL". 7059 */ 7060 7061 /* 7062 * The following relies on 7063 * sd_send_scsi_READ_CAPACITY never 7064 * returning 0 for capacity and/or lbasize. 7065 */ 7066 sd_update_block_info(un, lbasize, capacity); 7067 7068 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7069 "sd_unit_attach: un:0x%p capacity = %ld " 7070 "blocks; lbasize= %ld.\n", un, 7071 un->un_blockcount, un->un_tgt_blocksize); 7072 7073 break; 7074 } 7075 case EINVAL: 7076 /* 7077 * In the case where the max-cdb-length property 7078 * is smaller than the required CDB length for 7079 * a SCSI device, a target driver can fail to 7080 * attach to that device. 7081 */ 7082 scsi_log(SD_DEVINFO(un), 7083 sd_label, CE_WARN, 7084 "disk capacity is too large " 7085 "for current cdb length"); 7086 goto spinup_failed; 7087 case EACCES: 7088 /* 7089 * Should never get here if the spin-up 7090 * succeeded, but code it in anyway. 7091 * From here, just continue with the attach... 7092 */ 7093 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7094 "sd_unit_attach: un:0x%p " 7095 "sd_send_scsi_READ_CAPACITY " 7096 "returned reservation conflict\n", un); 7097 reservation_flag = SD_TARGET_IS_RESERVED; 7098 break; 7099 default: 7100 /* 7101 * Likewise, should never get here if the 7102 * spin-up succeeded. Just continue with 7103 * the attach... 7104 */ 7105 break; 7106 } 7107 break; 7108 case EACCES: 7109 /* 7110 * Device is reserved by another host. In this case 7111 * we could not spin it up or read the capacity, but 7112 * we continue with the attach anyway. 7113 */ 7114 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7115 "sd_unit_attach: un:0x%p spin-up reservation " 7116 "conflict.\n", un); 7117 reservation_flag = SD_TARGET_IS_RESERVED; 7118 break; 7119 default: 7120 /* Fail the attach if the spin-up failed. */ 7121 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7122 "sd_unit_attach: un:0x%p spin-up failed.", un); 7123 goto spinup_failed; 7124 } 7125 } 7126 7127 /* 7128 * Check to see if this is a MMC drive 7129 */ 7130 if (ISCD(un)) { 7131 sd_set_mmc_caps(un); 7132 } 7133 7134 7135 /* 7136 * Add a zero-length attribute to tell the world we support 7137 * kernel ioctls (for layered drivers) 7138 */ 7139 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7140 DDI_KERNEL_IOCTL, NULL, 0); 7141 7142 /* 7143 * Add a boolean property to tell the world we support 7144 * the B_FAILFAST flag (for layered drivers) 7145 */ 7146 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7147 "ddi-failfast-supported", NULL, 0); 7148 7149 /* 7150 * Initialize power management 7151 */ 7152 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7153 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7154 sd_setup_pm(un, devi); 7155 if (un->un_f_pm_is_enabled == FALSE) { 7156 /* 7157 * For performance, point to a jump table that does 7158 * not include pm. 7159 * The direct and priority chains don't change with PM. 7160 * 7161 * Note: this is currently done based on individual device 7162 * capabilities. When an interface for determining system 7163 * power enabled state becomes available, or when additional 7164 * layers are added to the command chain, these values will 7165 * have to be re-evaluated for correctness. 7166 */ 7167 if (un->un_f_non_devbsize_supported) { 7168 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7169 } else { 7170 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7171 } 7172 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7173 } 7174 7175 /* 7176 * This property is set to 0 by HA software to avoid retries 7177 * on a reserved disk. (The preferred property name is 7178 * "retry-on-reservation-conflict") (1189689) 7179 * 7180 * Note: The use of a global here can have unintended consequences. A 7181 * per instance variable is preferrable to match the capabilities of 7182 * different underlying hba's (4402600) 7183 */ 7184 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7185 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7186 sd_retry_on_reservation_conflict); 7187 if (sd_retry_on_reservation_conflict != 0) { 7188 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7189 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7190 sd_retry_on_reservation_conflict); 7191 } 7192 7193 /* Set up options for QFULL handling. */ 7194 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7195 "qfull-retries", -1)) != -1) { 7196 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7197 rval, 1); 7198 } 7199 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7200 "qfull-retry-interval", -1)) != -1) { 7201 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7202 rval, 1); 7203 } 7204 7205 /* 7206 * This just prints a message that announces the existence of the 7207 * device. The message is always printed in the system logfile, but 7208 * only appears on the console if the system is booted with the 7209 * -v (verbose) argument. 7210 */ 7211 ddi_report_dev(devi); 7212 7213 un->un_mediastate = DKIO_NONE; 7214 7215 cmlb_alloc_handle(&un->un_cmlbhandle); 7216 7217 #if defined(__i386) || defined(__amd64) 7218 /* 7219 * On x86, compensate for off-by-1 legacy error 7220 */ 7221 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7222 (lbasize == un->un_sys_blocksize)) 7223 offbyone = CMLB_OFF_BY_ONE; 7224 #endif 7225 7226 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7227 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7228 un->un_node_type, offbyone, un->un_cmlbhandle, 7229 (void *)SD_PATH_DIRECT) != 0) { 7230 goto cmlb_attach_failed; 7231 } 7232 7233 7234 /* 7235 * Read and validate the device's geometry (ie, disk label) 7236 * A new unformatted drive will not have a valid geometry, but 7237 * the driver needs to successfully attach to this device so 7238 * the drive can be formatted via ioctls. 7239 */ 7240 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7241 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7242 7243 mutex_enter(SD_MUTEX(un)); 7244 7245 /* 7246 * Read and initialize the devid for the unit. 7247 */ 7248 ASSERT(un->un_errstats != NULL); 7249 if (un->un_f_devid_supported) { 7250 sd_register_devid(un, devi, reservation_flag); 7251 } 7252 mutex_exit(SD_MUTEX(un)); 7253 7254 #if (defined(__fibre)) 7255 /* 7256 * Register callbacks for fibre only. You can't do this soley 7257 * on the basis of the devid_type because this is hba specific. 7258 * We need to query our hba capabilities to find out whether to 7259 * register or not. 7260 */ 7261 if (un->un_f_is_fibre) { 7262 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7263 sd_init_event_callbacks(un); 7264 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7265 "sd_unit_attach: un:0x%p event callbacks inserted", 7266 un); 7267 } 7268 } 7269 #endif 7270 7271 if (un->un_f_opt_disable_cache == TRUE) { 7272 /* 7273 * Disable both read cache and write cache. This is 7274 * the historic behavior of the keywords in the config file. 7275 */ 7276 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7277 0) { 7278 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7279 "sd_unit_attach: un:0x%p Could not disable " 7280 "caching", un); 7281 goto devid_failed; 7282 } 7283 } 7284 7285 /* 7286 * Check the value of the WCE bit now and 7287 * set un_f_write_cache_enabled accordingly. 7288 */ 7289 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7290 mutex_enter(SD_MUTEX(un)); 7291 un->un_f_write_cache_enabled = (wc_enabled != 0); 7292 mutex_exit(SD_MUTEX(un)); 7293 7294 /* 7295 * Find out what type of reservation this disk supports. 7296 */ 7297 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7298 case 0: 7299 /* 7300 * SCSI-3 reservations are supported. 7301 */ 7302 un->un_reservation_type = SD_SCSI3_RESERVATION; 7303 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7304 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7305 break; 7306 case ENOTSUP: 7307 /* 7308 * The PERSISTENT RESERVE IN command would not be recognized by 7309 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7310 */ 7311 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7312 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7313 un->un_reservation_type = SD_SCSI2_RESERVATION; 7314 break; 7315 default: 7316 /* 7317 * default to SCSI-3 reservations 7318 */ 7319 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7320 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7321 un->un_reservation_type = SD_SCSI3_RESERVATION; 7322 break; 7323 } 7324 7325 /* 7326 * Set the pstat and error stat values here, so data obtained during the 7327 * previous attach-time routines is available. 7328 * 7329 * Note: This is a critical sequence that needs to be maintained: 7330 * 1) Instantiate the kstats before any routines using the iopath 7331 * (i.e. sd_send_scsi_cmd). 7332 * 2) Initialize the error stats (sd_set_errstats) and partition 7333 * stats (sd_set_pstats)here, following 7334 * cmlb_validate_geometry(), sd_register_devid(), and 7335 * sd_cache_control(). 7336 */ 7337 7338 if (un->un_f_pkstats_enabled && geom_label_valid) { 7339 sd_set_pstats(un); 7340 SD_TRACE(SD_LOG_IO_PARTITION, un, 7341 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7342 } 7343 7344 sd_set_errstats(un); 7345 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7346 "sd_unit_attach: un:0x%p errstats set\n", un); 7347 7348 7349 /* 7350 * After successfully attaching an instance, we record the information 7351 * of how many luns have been attached on the relative target and 7352 * controller for parallel SCSI. This information is used when sd tries 7353 * to set the tagged queuing capability in HBA. 7354 */ 7355 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7356 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7357 } 7358 7359 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7360 "sd_unit_attach: un:0x%p exit success\n", un); 7361 7362 return (DDI_SUCCESS); 7363 7364 /* 7365 * An error occurred during the attach; clean up & return failure. 7366 */ 7367 7368 devid_failed: 7369 7370 setup_pm_failed: 7371 ddi_remove_minor_node(devi, NULL); 7372 7373 cmlb_attach_failed: 7374 /* 7375 * Cleanup from the scsi_ifsetcap() calls (437868) 7376 */ 7377 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7378 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7379 7380 /* 7381 * Refer to the comments of setting tagged-qing in the beginning of 7382 * sd_unit_attach. We can only disable tagged queuing when there is 7383 * no lun attached on the target. 7384 */ 7385 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7386 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7387 } 7388 7389 if (un->un_f_is_fibre == FALSE) { 7390 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7391 } 7392 7393 spinup_failed: 7394 7395 mutex_enter(SD_MUTEX(un)); 7396 7397 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7398 if (un->un_direct_priority_timeid != NULL) { 7399 timeout_id_t temp_id = un->un_direct_priority_timeid; 7400 un->un_direct_priority_timeid = NULL; 7401 mutex_exit(SD_MUTEX(un)); 7402 (void) untimeout(temp_id); 7403 mutex_enter(SD_MUTEX(un)); 7404 } 7405 7406 /* Cancel any pending start/stop timeouts */ 7407 if (un->un_startstop_timeid != NULL) { 7408 timeout_id_t temp_id = un->un_startstop_timeid; 7409 un->un_startstop_timeid = NULL; 7410 mutex_exit(SD_MUTEX(un)); 7411 (void) untimeout(temp_id); 7412 mutex_enter(SD_MUTEX(un)); 7413 } 7414 7415 /* Cancel any pending reset-throttle timeouts */ 7416 if (un->un_reset_throttle_timeid != NULL) { 7417 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7418 un->un_reset_throttle_timeid = NULL; 7419 mutex_exit(SD_MUTEX(un)); 7420 (void) untimeout(temp_id); 7421 mutex_enter(SD_MUTEX(un)); 7422 } 7423 7424 /* Cancel any pending retry timeouts */ 7425 if (un->un_retry_timeid != NULL) { 7426 timeout_id_t temp_id = un->un_retry_timeid; 7427 un->un_retry_timeid = NULL; 7428 mutex_exit(SD_MUTEX(un)); 7429 (void) untimeout(temp_id); 7430 mutex_enter(SD_MUTEX(un)); 7431 } 7432 7433 /* Cancel any pending delayed cv broadcast timeouts */ 7434 if (un->un_dcvb_timeid != NULL) { 7435 timeout_id_t temp_id = un->un_dcvb_timeid; 7436 un->un_dcvb_timeid = NULL; 7437 mutex_exit(SD_MUTEX(un)); 7438 (void) untimeout(temp_id); 7439 mutex_enter(SD_MUTEX(un)); 7440 } 7441 7442 mutex_exit(SD_MUTEX(un)); 7443 7444 /* There should not be any in-progress I/O so ASSERT this check */ 7445 ASSERT(un->un_ncmds_in_transport == 0); 7446 ASSERT(un->un_ncmds_in_driver == 0); 7447 7448 /* Do not free the softstate if the callback routine is active */ 7449 sd_sync_with_callback(un); 7450 7451 /* 7452 * Partition stats apparently are not used with removables. These would 7453 * not have been created during attach, so no need to clean them up... 7454 */ 7455 if (un->un_stats != NULL) { 7456 kstat_delete(un->un_stats); 7457 un->un_stats = NULL; 7458 } 7459 if (un->un_errstats != NULL) { 7460 kstat_delete(un->un_errstats); 7461 un->un_errstats = NULL; 7462 } 7463 7464 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7465 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7466 7467 ddi_prop_remove_all(devi); 7468 sema_destroy(&un->un_semoclose); 7469 cv_destroy(&un->un_state_cv); 7470 7471 getrbuf_failed: 7472 7473 sd_free_rqs(un); 7474 7475 alloc_rqs_failed: 7476 7477 devp->sd_private = NULL; 7478 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7479 7480 get_softstate_failed: 7481 /* 7482 * Note: the man pages are unclear as to whether or not doing a 7483 * ddi_soft_state_free(sd_state, instance) is the right way to 7484 * clean up after the ddi_soft_state_zalloc() if the subsequent 7485 * ddi_get_soft_state() fails. The implication seems to be 7486 * that the get_soft_state cannot fail if the zalloc succeeds. 7487 */ 7488 ddi_soft_state_free(sd_state, instance); 7489 7490 probe_failed: 7491 scsi_unprobe(devp); 7492 #ifdef SDDEBUG 7493 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7494 (sd_level_mask & SD_LOGMASK_TRACE)) { 7495 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7496 (void *)un); 7497 } 7498 #endif 7499 return (DDI_FAILURE); 7500 } 7501 7502 7503 /* 7504 * Function: sd_unit_detach 7505 * 7506 * Description: Performs DDI_DETACH processing for sddetach(). 7507 * 7508 * Return Code: DDI_SUCCESS 7509 * DDI_FAILURE 7510 * 7511 * Context: Kernel thread context 7512 */ 7513 7514 static int 7515 sd_unit_detach(dev_info_t *devi) 7516 { 7517 struct scsi_device *devp; 7518 struct sd_lun *un; 7519 int i; 7520 int tgt; 7521 dev_t dev; 7522 dev_info_t *pdip = ddi_get_parent(devi); 7523 int instance = ddi_get_instance(devi); 7524 7525 mutex_enter(&sd_detach_mutex); 7526 7527 /* 7528 * Fail the detach for any of the following: 7529 * - Unable to get the sd_lun struct for the instance 7530 * - A layered driver has an outstanding open on the instance 7531 * - Another thread is already detaching this instance 7532 * - Another thread is currently performing an open 7533 */ 7534 devp = ddi_get_driver_private(devi); 7535 if ((devp == NULL) || 7536 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7537 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7538 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7539 mutex_exit(&sd_detach_mutex); 7540 return (DDI_FAILURE); 7541 } 7542 7543 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7544 7545 /* 7546 * Mark this instance as currently in a detach, to inhibit any 7547 * opens from a layered driver. 7548 */ 7549 un->un_detach_count++; 7550 mutex_exit(&sd_detach_mutex); 7551 7552 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7553 SCSI_ADDR_PROP_TARGET, -1); 7554 7555 dev = sd_make_device(SD_DEVINFO(un)); 7556 7557 #ifndef lint 7558 _NOTE(COMPETING_THREADS_NOW); 7559 #endif 7560 7561 mutex_enter(SD_MUTEX(un)); 7562 7563 /* 7564 * Fail the detach if there are any outstanding layered 7565 * opens on this device. 7566 */ 7567 for (i = 0; i < NDKMAP; i++) { 7568 if (un->un_ocmap.lyropen[i] != 0) { 7569 goto err_notclosed; 7570 } 7571 } 7572 7573 /* 7574 * Verify there are NO outstanding commands issued to this device. 7575 * ie, un_ncmds_in_transport == 0. 7576 * It's possible to have outstanding commands through the physio 7577 * code path, even though everything's closed. 7578 */ 7579 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7580 (un->un_direct_priority_timeid != NULL) || 7581 (un->un_state == SD_STATE_RWAIT)) { 7582 mutex_exit(SD_MUTEX(un)); 7583 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7584 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7585 goto err_stillbusy; 7586 } 7587 7588 /* 7589 * If we have the device reserved, release the reservation. 7590 */ 7591 if ((un->un_resvd_status & SD_RESERVE) && 7592 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7593 mutex_exit(SD_MUTEX(un)); 7594 /* 7595 * Note: sd_reserve_release sends a command to the device 7596 * via the sd_ioctlcmd() path, and can sleep. 7597 */ 7598 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7599 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7600 "sd_dr_detach: Cannot release reservation \n"); 7601 } 7602 } else { 7603 mutex_exit(SD_MUTEX(un)); 7604 } 7605 7606 /* 7607 * Untimeout any reserve recover, throttle reset, restart unit 7608 * and delayed broadcast timeout threads. Protect the timeout pointer 7609 * from getting nulled by their callback functions. 7610 */ 7611 mutex_enter(SD_MUTEX(un)); 7612 if (un->un_resvd_timeid != NULL) { 7613 timeout_id_t temp_id = un->un_resvd_timeid; 7614 un->un_resvd_timeid = NULL; 7615 mutex_exit(SD_MUTEX(un)); 7616 (void) untimeout(temp_id); 7617 mutex_enter(SD_MUTEX(un)); 7618 } 7619 7620 if (un->un_reset_throttle_timeid != NULL) { 7621 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7622 un->un_reset_throttle_timeid = NULL; 7623 mutex_exit(SD_MUTEX(un)); 7624 (void) untimeout(temp_id); 7625 mutex_enter(SD_MUTEX(un)); 7626 } 7627 7628 if (un->un_startstop_timeid != NULL) { 7629 timeout_id_t temp_id = un->un_startstop_timeid; 7630 un->un_startstop_timeid = NULL; 7631 mutex_exit(SD_MUTEX(un)); 7632 (void) untimeout(temp_id); 7633 mutex_enter(SD_MUTEX(un)); 7634 } 7635 7636 if (un->un_dcvb_timeid != NULL) { 7637 timeout_id_t temp_id = un->un_dcvb_timeid; 7638 un->un_dcvb_timeid = NULL; 7639 mutex_exit(SD_MUTEX(un)); 7640 (void) untimeout(temp_id); 7641 } else { 7642 mutex_exit(SD_MUTEX(un)); 7643 } 7644 7645 /* Remove any pending reservation reclaim requests for this device */ 7646 sd_rmv_resv_reclaim_req(dev); 7647 7648 mutex_enter(SD_MUTEX(un)); 7649 7650 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7651 if (un->un_direct_priority_timeid != NULL) { 7652 timeout_id_t temp_id = un->un_direct_priority_timeid; 7653 un->un_direct_priority_timeid = NULL; 7654 mutex_exit(SD_MUTEX(un)); 7655 (void) untimeout(temp_id); 7656 mutex_enter(SD_MUTEX(un)); 7657 } 7658 7659 /* Cancel any active multi-host disk watch thread requests */ 7660 if (un->un_mhd_token != NULL) { 7661 mutex_exit(SD_MUTEX(un)); 7662 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7663 if (scsi_watch_request_terminate(un->un_mhd_token, 7664 SCSI_WATCH_TERMINATE_NOWAIT)) { 7665 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7666 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7667 /* 7668 * Note: We are returning here after having removed 7669 * some driver timeouts above. This is consistent with 7670 * the legacy implementation but perhaps the watch 7671 * terminate call should be made with the wait flag set. 7672 */ 7673 goto err_stillbusy; 7674 } 7675 mutex_enter(SD_MUTEX(un)); 7676 un->un_mhd_token = NULL; 7677 } 7678 7679 if (un->un_swr_token != NULL) { 7680 mutex_exit(SD_MUTEX(un)); 7681 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7682 if (scsi_watch_request_terminate(un->un_swr_token, 7683 SCSI_WATCH_TERMINATE_NOWAIT)) { 7684 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7685 "sd_dr_detach: Cannot cancel swr watch request\n"); 7686 /* 7687 * Note: We are returning here after having removed 7688 * some driver timeouts above. This is consistent with 7689 * the legacy implementation but perhaps the watch 7690 * terminate call should be made with the wait flag set. 7691 */ 7692 goto err_stillbusy; 7693 } 7694 mutex_enter(SD_MUTEX(un)); 7695 un->un_swr_token = NULL; 7696 } 7697 7698 mutex_exit(SD_MUTEX(un)); 7699 7700 /* 7701 * Clear any scsi_reset_notifies. We clear the reset notifies 7702 * if we have not registered one. 7703 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7704 */ 7705 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7706 sd_mhd_reset_notify_cb, (caddr_t)un); 7707 7708 /* 7709 * protect the timeout pointers from getting nulled by 7710 * their callback functions during the cancellation process. 7711 * In such a scenario untimeout can be invoked with a null value. 7712 */ 7713 _NOTE(NO_COMPETING_THREADS_NOW); 7714 7715 mutex_enter(&un->un_pm_mutex); 7716 if (un->un_pm_idle_timeid != NULL) { 7717 timeout_id_t temp_id = un->un_pm_idle_timeid; 7718 un->un_pm_idle_timeid = NULL; 7719 mutex_exit(&un->un_pm_mutex); 7720 7721 /* 7722 * Timeout is active; cancel it. 7723 * Note that it'll never be active on a device 7724 * that does not support PM therefore we don't 7725 * have to check before calling pm_idle_component. 7726 */ 7727 (void) untimeout(temp_id); 7728 (void) pm_idle_component(SD_DEVINFO(un), 0); 7729 mutex_enter(&un->un_pm_mutex); 7730 } 7731 7732 /* 7733 * Check whether there is already a timeout scheduled for power 7734 * management. If yes then don't lower the power here, that's. 7735 * the timeout handler's job. 7736 */ 7737 if (un->un_pm_timeid != NULL) { 7738 timeout_id_t temp_id = un->un_pm_timeid; 7739 un->un_pm_timeid = NULL; 7740 mutex_exit(&un->un_pm_mutex); 7741 /* 7742 * Timeout is active; cancel it. 7743 * Note that it'll never be active on a device 7744 * that does not support PM therefore we don't 7745 * have to check before calling pm_idle_component. 7746 */ 7747 (void) untimeout(temp_id); 7748 (void) pm_idle_component(SD_DEVINFO(un), 0); 7749 7750 } else { 7751 mutex_exit(&un->un_pm_mutex); 7752 if ((un->un_f_pm_is_enabled == TRUE) && 7753 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7754 DDI_SUCCESS)) { 7755 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7756 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7757 /* 7758 * Fix for bug: 4297749, item # 13 7759 * The above test now includes a check to see if PM is 7760 * supported by this device before call 7761 * pm_lower_power(). 7762 * Note, the following is not dead code. The call to 7763 * pm_lower_power above will generate a call back into 7764 * our sdpower routine which might result in a timeout 7765 * handler getting activated. Therefore the following 7766 * code is valid and necessary. 7767 */ 7768 mutex_enter(&un->un_pm_mutex); 7769 if (un->un_pm_timeid != NULL) { 7770 timeout_id_t temp_id = un->un_pm_timeid; 7771 un->un_pm_timeid = NULL; 7772 mutex_exit(&un->un_pm_mutex); 7773 (void) untimeout(temp_id); 7774 (void) pm_idle_component(SD_DEVINFO(un), 0); 7775 } else { 7776 mutex_exit(&un->un_pm_mutex); 7777 } 7778 } 7779 } 7780 7781 /* 7782 * Cleanup from the scsi_ifsetcap() calls (437868) 7783 * Relocated here from above to be after the call to 7784 * pm_lower_power, which was getting errors. 7785 */ 7786 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7787 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7788 7789 /* 7790 * Currently, tagged queuing is supported per target based by HBA. 7791 * Setting this per lun instance actually sets the capability of this 7792 * target in HBA, which affects those luns already attached on the 7793 * same target. So during detach, we can only disable this capability 7794 * only when this is the only lun left on this target. By doing 7795 * this, we assume a target has the same tagged queuing capability 7796 * for every lun. The condition can be removed when HBA is changed to 7797 * support per lun based tagged queuing capability. 7798 */ 7799 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7800 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7801 } 7802 7803 if (un->un_f_is_fibre == FALSE) { 7804 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7805 } 7806 7807 /* 7808 * Remove any event callbacks, fibre only 7809 */ 7810 if (un->un_f_is_fibre == TRUE) { 7811 if ((un->un_insert_event != NULL) && 7812 (ddi_remove_event_handler(un->un_insert_cb_id) != 7813 DDI_SUCCESS)) { 7814 /* 7815 * Note: We are returning here after having done 7816 * substantial cleanup above. This is consistent 7817 * with the legacy implementation but this may not 7818 * be the right thing to do. 7819 */ 7820 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7821 "sd_dr_detach: Cannot cancel insert event\n"); 7822 goto err_remove_event; 7823 } 7824 un->un_insert_event = NULL; 7825 7826 if ((un->un_remove_event != NULL) && 7827 (ddi_remove_event_handler(un->un_remove_cb_id) != 7828 DDI_SUCCESS)) { 7829 /* 7830 * Note: We are returning here after having done 7831 * substantial cleanup above. This is consistent 7832 * with the legacy implementation but this may not 7833 * be the right thing to do. 7834 */ 7835 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7836 "sd_dr_detach: Cannot cancel remove event\n"); 7837 goto err_remove_event; 7838 } 7839 un->un_remove_event = NULL; 7840 } 7841 7842 /* Do not free the softstate if the callback routine is active */ 7843 sd_sync_with_callback(un); 7844 7845 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7846 cmlb_free_handle(&un->un_cmlbhandle); 7847 7848 /* 7849 * Hold the detach mutex here, to make sure that no other threads ever 7850 * can access a (partially) freed soft state structure. 7851 */ 7852 mutex_enter(&sd_detach_mutex); 7853 7854 /* 7855 * Clean up the soft state struct. 7856 * Cleanup is done in reverse order of allocs/inits. 7857 * At this point there should be no competing threads anymore. 7858 */ 7859 7860 /* Unregister and free device id. */ 7861 ddi_devid_unregister(devi); 7862 if (un->un_devid) { 7863 ddi_devid_free(un->un_devid); 7864 un->un_devid = NULL; 7865 } 7866 7867 /* 7868 * Destroy wmap cache if it exists. 7869 */ 7870 if (un->un_wm_cache != NULL) { 7871 kmem_cache_destroy(un->un_wm_cache); 7872 un->un_wm_cache = NULL; 7873 } 7874 7875 /* 7876 * kstat cleanup is done in detach for all device types (4363169). 7877 * We do not want to fail detach if the device kstats are not deleted 7878 * since there is a confusion about the devo_refcnt for the device. 7879 * We just delete the kstats and let detach complete successfully. 7880 */ 7881 if (un->un_stats != NULL) { 7882 kstat_delete(un->un_stats); 7883 un->un_stats = NULL; 7884 } 7885 if (un->un_errstats != NULL) { 7886 kstat_delete(un->un_errstats); 7887 un->un_errstats = NULL; 7888 } 7889 7890 /* Remove partition stats */ 7891 if (un->un_f_pkstats_enabled) { 7892 for (i = 0; i < NSDMAP; i++) { 7893 if (un->un_pstats[i] != NULL) { 7894 kstat_delete(un->un_pstats[i]); 7895 un->un_pstats[i] = NULL; 7896 } 7897 } 7898 } 7899 7900 /* Remove xbuf registration */ 7901 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7902 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7903 7904 /* Remove driver properties */ 7905 ddi_prop_remove_all(devi); 7906 7907 mutex_destroy(&un->un_pm_mutex); 7908 cv_destroy(&un->un_pm_busy_cv); 7909 7910 cv_destroy(&un->un_wcc_cv); 7911 7912 /* Open/close semaphore */ 7913 sema_destroy(&un->un_semoclose); 7914 7915 /* Removable media condvar. */ 7916 cv_destroy(&un->un_state_cv); 7917 7918 /* Suspend/resume condvar. */ 7919 cv_destroy(&un->un_suspend_cv); 7920 cv_destroy(&un->un_disk_busy_cv); 7921 7922 sd_free_rqs(un); 7923 7924 /* Free up soft state */ 7925 devp->sd_private = NULL; 7926 7927 bzero(un, sizeof (struct sd_lun)); 7928 ddi_soft_state_free(sd_state, instance); 7929 7930 mutex_exit(&sd_detach_mutex); 7931 7932 /* This frees up the INQUIRY data associated with the device. */ 7933 scsi_unprobe(devp); 7934 7935 /* 7936 * After successfully detaching an instance, we update the information 7937 * of how many luns have been attached in the relative target and 7938 * controller for parallel SCSI. This information is used when sd tries 7939 * to set the tagged queuing capability in HBA. 7940 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7941 * check if the device is parallel SCSI. However, we don't need to 7942 * check here because we've already checked during attach. No device 7943 * that is not parallel SCSI is in the chain. 7944 */ 7945 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7946 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7947 } 7948 7949 return (DDI_SUCCESS); 7950 7951 err_notclosed: 7952 mutex_exit(SD_MUTEX(un)); 7953 7954 err_stillbusy: 7955 _NOTE(NO_COMPETING_THREADS_NOW); 7956 7957 err_remove_event: 7958 mutex_enter(&sd_detach_mutex); 7959 un->un_detach_count--; 7960 mutex_exit(&sd_detach_mutex); 7961 7962 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7963 return (DDI_FAILURE); 7964 } 7965 7966 7967 /* 7968 * Function: sd_create_errstats 7969 * 7970 * Description: This routine instantiates the device error stats. 7971 * 7972 * Note: During attach the stats are instantiated first so they are 7973 * available for attach-time routines that utilize the driver 7974 * iopath to send commands to the device. The stats are initialized 7975 * separately so data obtained during some attach-time routines is 7976 * available. (4362483) 7977 * 7978 * Arguments: un - driver soft state (unit) structure 7979 * instance - driver instance 7980 * 7981 * Context: Kernel thread context 7982 */ 7983 7984 static void 7985 sd_create_errstats(struct sd_lun *un, int instance) 7986 { 7987 struct sd_errstats *stp; 7988 char kstatmodule_err[KSTAT_STRLEN]; 7989 char kstatname[KSTAT_STRLEN]; 7990 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7991 7992 ASSERT(un != NULL); 7993 7994 if (un->un_errstats != NULL) { 7995 return; 7996 } 7997 7998 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7999 "%serr", sd_label); 8000 (void) snprintf(kstatname, sizeof (kstatname), 8001 "%s%d,err", sd_label, instance); 8002 8003 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8004 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8005 8006 if (un->un_errstats == NULL) { 8007 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8008 "sd_create_errstats: Failed kstat_create\n"); 8009 return; 8010 } 8011 8012 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8013 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8014 KSTAT_DATA_UINT32); 8015 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8016 KSTAT_DATA_UINT32); 8017 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8018 KSTAT_DATA_UINT32); 8019 kstat_named_init(&stp->sd_vid, "Vendor", 8020 KSTAT_DATA_CHAR); 8021 kstat_named_init(&stp->sd_pid, "Product", 8022 KSTAT_DATA_CHAR); 8023 kstat_named_init(&stp->sd_revision, "Revision", 8024 KSTAT_DATA_CHAR); 8025 kstat_named_init(&stp->sd_serial, "Serial No", 8026 KSTAT_DATA_CHAR); 8027 kstat_named_init(&stp->sd_capacity, "Size", 8028 KSTAT_DATA_ULONGLONG); 8029 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8030 KSTAT_DATA_UINT32); 8031 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8032 KSTAT_DATA_UINT32); 8033 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8034 KSTAT_DATA_UINT32); 8035 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8036 KSTAT_DATA_UINT32); 8037 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8038 KSTAT_DATA_UINT32); 8039 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8040 KSTAT_DATA_UINT32); 8041 8042 un->un_errstats->ks_private = un; 8043 un->un_errstats->ks_update = nulldev; 8044 8045 kstat_install(un->un_errstats); 8046 } 8047 8048 8049 /* 8050 * Function: sd_set_errstats 8051 * 8052 * Description: This routine sets the value of the vendor id, product id, 8053 * revision, serial number, and capacity device error stats. 8054 * 8055 * Note: During attach the stats are instantiated first so they are 8056 * available for attach-time routines that utilize the driver 8057 * iopath to send commands to the device. The stats are initialized 8058 * separately so data obtained during some attach-time routines is 8059 * available. (4362483) 8060 * 8061 * Arguments: un - driver soft state (unit) structure 8062 * 8063 * Context: Kernel thread context 8064 */ 8065 8066 static void 8067 sd_set_errstats(struct sd_lun *un) 8068 { 8069 struct sd_errstats *stp; 8070 8071 ASSERT(un != NULL); 8072 ASSERT(un->un_errstats != NULL); 8073 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8074 ASSERT(stp != NULL); 8075 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8076 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8077 (void) strncpy(stp->sd_revision.value.c, 8078 un->un_sd->sd_inq->inq_revision, 4); 8079 8080 /* 8081 * All the errstats are persistent across detach/attach, 8082 * so reset all the errstats here in case of the hot 8083 * replacement of disk drives, except for not changed 8084 * Sun qualified drives. 8085 */ 8086 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8087 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8088 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8089 stp->sd_softerrs.value.ui32 = 0; 8090 stp->sd_harderrs.value.ui32 = 0; 8091 stp->sd_transerrs.value.ui32 = 0; 8092 stp->sd_rq_media_err.value.ui32 = 0; 8093 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8094 stp->sd_rq_nodev_err.value.ui32 = 0; 8095 stp->sd_rq_recov_err.value.ui32 = 0; 8096 stp->sd_rq_illrq_err.value.ui32 = 0; 8097 stp->sd_rq_pfa_err.value.ui32 = 0; 8098 } 8099 8100 /* 8101 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8102 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8103 * (4376302)) 8104 */ 8105 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8106 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8107 sizeof (SD_INQUIRY(un)->inq_serial)); 8108 } 8109 8110 if (un->un_f_blockcount_is_valid != TRUE) { 8111 /* 8112 * Set capacity error stat to 0 for no media. This ensures 8113 * a valid capacity is displayed in response to 'iostat -E' 8114 * when no media is present in the device. 8115 */ 8116 stp->sd_capacity.value.ui64 = 0; 8117 } else { 8118 /* 8119 * Multiply un_blockcount by un->un_sys_blocksize to get 8120 * capacity. 8121 * 8122 * Note: for non-512 blocksize devices "un_blockcount" has been 8123 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8124 * (un_tgt_blocksize / un->un_sys_blocksize). 8125 */ 8126 stp->sd_capacity.value.ui64 = (uint64_t) 8127 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8128 } 8129 } 8130 8131 8132 /* 8133 * Function: sd_set_pstats 8134 * 8135 * Description: This routine instantiates and initializes the partition 8136 * stats for each partition with more than zero blocks. 8137 * (4363169) 8138 * 8139 * Arguments: un - driver soft state (unit) structure 8140 * 8141 * Context: Kernel thread context 8142 */ 8143 8144 static void 8145 sd_set_pstats(struct sd_lun *un) 8146 { 8147 char kstatname[KSTAT_STRLEN]; 8148 int instance; 8149 int i; 8150 diskaddr_t nblks = 0; 8151 char *partname = NULL; 8152 8153 ASSERT(un != NULL); 8154 8155 instance = ddi_get_instance(SD_DEVINFO(un)); 8156 8157 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8158 for (i = 0; i < NSDMAP; i++) { 8159 8160 if (cmlb_partinfo(un->un_cmlbhandle, i, 8161 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8162 continue; 8163 mutex_enter(SD_MUTEX(un)); 8164 8165 if ((un->un_pstats[i] == NULL) && 8166 (nblks != 0)) { 8167 8168 (void) snprintf(kstatname, sizeof (kstatname), 8169 "%s%d,%s", sd_label, instance, 8170 partname); 8171 8172 un->un_pstats[i] = kstat_create(sd_label, 8173 instance, kstatname, "partition", KSTAT_TYPE_IO, 8174 1, KSTAT_FLAG_PERSISTENT); 8175 if (un->un_pstats[i] != NULL) { 8176 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8177 kstat_install(un->un_pstats[i]); 8178 } 8179 } 8180 mutex_exit(SD_MUTEX(un)); 8181 } 8182 } 8183 8184 8185 #if (defined(__fibre)) 8186 /* 8187 * Function: sd_init_event_callbacks 8188 * 8189 * Description: This routine initializes the insertion and removal event 8190 * callbacks. (fibre only) 8191 * 8192 * Arguments: un - driver soft state (unit) structure 8193 * 8194 * Context: Kernel thread context 8195 */ 8196 8197 static void 8198 sd_init_event_callbacks(struct sd_lun *un) 8199 { 8200 ASSERT(un != NULL); 8201 8202 if ((un->un_insert_event == NULL) && 8203 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8204 &un->un_insert_event) == DDI_SUCCESS)) { 8205 /* 8206 * Add the callback for an insertion event 8207 */ 8208 (void) ddi_add_event_handler(SD_DEVINFO(un), 8209 un->un_insert_event, sd_event_callback, (void *)un, 8210 &(un->un_insert_cb_id)); 8211 } 8212 8213 if ((un->un_remove_event == NULL) && 8214 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8215 &un->un_remove_event) == DDI_SUCCESS)) { 8216 /* 8217 * Add the callback for a removal event 8218 */ 8219 (void) ddi_add_event_handler(SD_DEVINFO(un), 8220 un->un_remove_event, sd_event_callback, (void *)un, 8221 &(un->un_remove_cb_id)); 8222 } 8223 } 8224 8225 8226 /* 8227 * Function: sd_event_callback 8228 * 8229 * Description: This routine handles insert/remove events (photon). The 8230 * state is changed to OFFLINE which can be used to supress 8231 * error msgs. (fibre only) 8232 * 8233 * Arguments: un - driver soft state (unit) structure 8234 * 8235 * Context: Callout thread context 8236 */ 8237 /* ARGSUSED */ 8238 static void 8239 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8240 void *bus_impldata) 8241 { 8242 struct sd_lun *un = (struct sd_lun *)arg; 8243 8244 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8245 if (event == un->un_insert_event) { 8246 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8247 mutex_enter(SD_MUTEX(un)); 8248 if (un->un_state == SD_STATE_OFFLINE) { 8249 if (un->un_last_state != SD_STATE_SUSPENDED) { 8250 un->un_state = un->un_last_state; 8251 } else { 8252 /* 8253 * We have gone through SUSPEND/RESUME while 8254 * we were offline. Restore the last state 8255 */ 8256 un->un_state = un->un_save_state; 8257 } 8258 } 8259 mutex_exit(SD_MUTEX(un)); 8260 8261 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8262 } else if (event == un->un_remove_event) { 8263 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8264 mutex_enter(SD_MUTEX(un)); 8265 /* 8266 * We need to handle an event callback that occurs during 8267 * the suspend operation, since we don't prevent it. 8268 */ 8269 if (un->un_state != SD_STATE_OFFLINE) { 8270 if (un->un_state != SD_STATE_SUSPENDED) { 8271 New_state(un, SD_STATE_OFFLINE); 8272 } else { 8273 un->un_last_state = SD_STATE_OFFLINE; 8274 } 8275 } 8276 mutex_exit(SD_MUTEX(un)); 8277 } else { 8278 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8279 "!Unknown event\n"); 8280 } 8281 8282 } 8283 #endif 8284 8285 /* 8286 * Function: sd_cache_control() 8287 * 8288 * Description: This routine is the driver entry point for setting 8289 * read and write caching by modifying the WCE (write cache 8290 * enable) and RCD (read cache disable) bits of mode 8291 * page 8 (MODEPAGE_CACHING). 8292 * 8293 * Arguments: un - driver soft state (unit) structure 8294 * rcd_flag - flag for controlling the read cache 8295 * wce_flag - flag for controlling the write cache 8296 * 8297 * Return Code: EIO 8298 * code returned by sd_send_scsi_MODE_SENSE and 8299 * sd_send_scsi_MODE_SELECT 8300 * 8301 * Context: Kernel Thread 8302 */ 8303 8304 static int 8305 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8306 { 8307 struct mode_caching *mode_caching_page; 8308 uchar_t *header; 8309 size_t buflen; 8310 int hdrlen; 8311 int bd_len; 8312 int rval = 0; 8313 struct mode_header_grp2 *mhp; 8314 8315 ASSERT(un != NULL); 8316 8317 /* 8318 * Do a test unit ready, otherwise a mode sense may not work if this 8319 * is the first command sent to the device after boot. 8320 */ 8321 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8322 8323 if (un->un_f_cfg_is_atapi == TRUE) { 8324 hdrlen = MODE_HEADER_LENGTH_GRP2; 8325 } else { 8326 hdrlen = MODE_HEADER_LENGTH; 8327 } 8328 8329 /* 8330 * Allocate memory for the retrieved mode page and its headers. Set 8331 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8332 * we get all of the mode sense data otherwise, the mode select 8333 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8334 */ 8335 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8336 sizeof (struct mode_cache_scsi3); 8337 8338 header = kmem_zalloc(buflen, KM_SLEEP); 8339 8340 /* Get the information from the device. */ 8341 if (un->un_f_cfg_is_atapi == TRUE) { 8342 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8343 MODEPAGE_CACHING, SD_PATH_DIRECT); 8344 } else { 8345 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8346 MODEPAGE_CACHING, SD_PATH_DIRECT); 8347 } 8348 if (rval != 0) { 8349 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8350 "sd_cache_control: Mode Sense Failed\n"); 8351 kmem_free(header, buflen); 8352 return (rval); 8353 } 8354 8355 /* 8356 * Determine size of Block Descriptors in order to locate 8357 * the mode page data. ATAPI devices return 0, SCSI devices 8358 * should return MODE_BLK_DESC_LENGTH. 8359 */ 8360 if (un->un_f_cfg_is_atapi == TRUE) { 8361 mhp = (struct mode_header_grp2 *)header; 8362 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8363 } else { 8364 bd_len = ((struct mode_header *)header)->bdesc_length; 8365 } 8366 8367 if (bd_len > MODE_BLK_DESC_LENGTH) { 8368 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8369 "sd_cache_control: Mode Sense returned invalid " 8370 "block descriptor length\n"); 8371 kmem_free(header, buflen); 8372 return (EIO); 8373 } 8374 8375 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8376 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8377 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8378 " caching page code mismatch %d\n", 8379 mode_caching_page->mode_page.code); 8380 kmem_free(header, buflen); 8381 return (EIO); 8382 } 8383 8384 /* Check the relevant bits on successful mode sense. */ 8385 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8386 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8387 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8388 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8389 8390 size_t sbuflen; 8391 uchar_t save_pg; 8392 8393 /* 8394 * Construct select buffer length based on the 8395 * length of the sense data returned. 8396 */ 8397 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8398 sizeof (struct mode_page) + 8399 (int)mode_caching_page->mode_page.length; 8400 8401 /* 8402 * Set the caching bits as requested. 8403 */ 8404 if (rcd_flag == SD_CACHE_ENABLE) 8405 mode_caching_page->rcd = 0; 8406 else if (rcd_flag == SD_CACHE_DISABLE) 8407 mode_caching_page->rcd = 1; 8408 8409 if (wce_flag == SD_CACHE_ENABLE) 8410 mode_caching_page->wce = 1; 8411 else if (wce_flag == SD_CACHE_DISABLE) 8412 mode_caching_page->wce = 0; 8413 8414 /* 8415 * Save the page if the mode sense says the 8416 * drive supports it. 8417 */ 8418 save_pg = mode_caching_page->mode_page.ps ? 8419 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8420 8421 /* Clear reserved bits before mode select. */ 8422 mode_caching_page->mode_page.ps = 0; 8423 8424 /* 8425 * Clear out mode header for mode select. 8426 * The rest of the retrieved page will be reused. 8427 */ 8428 bzero(header, hdrlen); 8429 8430 if (un->un_f_cfg_is_atapi == TRUE) { 8431 mhp = (struct mode_header_grp2 *)header; 8432 mhp->bdesc_length_hi = bd_len >> 8; 8433 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8434 } else { 8435 ((struct mode_header *)header)->bdesc_length = bd_len; 8436 } 8437 8438 /* Issue mode select to change the cache settings */ 8439 if (un->un_f_cfg_is_atapi == TRUE) { 8440 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8441 sbuflen, save_pg, SD_PATH_DIRECT); 8442 } else { 8443 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8444 sbuflen, save_pg, SD_PATH_DIRECT); 8445 } 8446 } 8447 8448 kmem_free(header, buflen); 8449 return (rval); 8450 } 8451 8452 8453 /* 8454 * Function: sd_get_write_cache_enabled() 8455 * 8456 * Description: This routine is the driver entry point for determining if 8457 * write caching is enabled. It examines the WCE (write cache 8458 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8459 * 8460 * Arguments: un - driver soft state (unit) structure 8461 * is_enabled - pointer to int where write cache enabled state 8462 * is returned (non-zero -> write cache enabled) 8463 * 8464 * 8465 * Return Code: EIO 8466 * code returned by sd_send_scsi_MODE_SENSE 8467 * 8468 * Context: Kernel Thread 8469 * 8470 * NOTE: If ioctl is added to disable write cache, this sequence should 8471 * be followed so that no locking is required for accesses to 8472 * un->un_f_write_cache_enabled: 8473 * do mode select to clear wce 8474 * do synchronize cache to flush cache 8475 * set un->un_f_write_cache_enabled = FALSE 8476 * 8477 * Conversely, an ioctl to enable the write cache should be done 8478 * in this order: 8479 * set un->un_f_write_cache_enabled = TRUE 8480 * do mode select to set wce 8481 */ 8482 8483 static int 8484 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8485 { 8486 struct mode_caching *mode_caching_page; 8487 uchar_t *header; 8488 size_t buflen; 8489 int hdrlen; 8490 int bd_len; 8491 int rval = 0; 8492 8493 ASSERT(un != NULL); 8494 ASSERT(is_enabled != NULL); 8495 8496 /* in case of error, flag as enabled */ 8497 *is_enabled = TRUE; 8498 8499 /* 8500 * Do a test unit ready, otherwise a mode sense may not work if this 8501 * is the first command sent to the device after boot. 8502 */ 8503 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8504 8505 if (un->un_f_cfg_is_atapi == TRUE) { 8506 hdrlen = MODE_HEADER_LENGTH_GRP2; 8507 } else { 8508 hdrlen = MODE_HEADER_LENGTH; 8509 } 8510 8511 /* 8512 * Allocate memory for the retrieved mode page and its headers. Set 8513 * a pointer to the page itself. 8514 */ 8515 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8516 header = kmem_zalloc(buflen, KM_SLEEP); 8517 8518 /* Get the information from the device. */ 8519 if (un->un_f_cfg_is_atapi == TRUE) { 8520 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8521 MODEPAGE_CACHING, SD_PATH_DIRECT); 8522 } else { 8523 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8524 MODEPAGE_CACHING, SD_PATH_DIRECT); 8525 } 8526 if (rval != 0) { 8527 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8528 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8529 kmem_free(header, buflen); 8530 return (rval); 8531 } 8532 8533 /* 8534 * Determine size of Block Descriptors in order to locate 8535 * the mode page data. ATAPI devices return 0, SCSI devices 8536 * should return MODE_BLK_DESC_LENGTH. 8537 */ 8538 if (un->un_f_cfg_is_atapi == TRUE) { 8539 struct mode_header_grp2 *mhp; 8540 mhp = (struct mode_header_grp2 *)header; 8541 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8542 } else { 8543 bd_len = ((struct mode_header *)header)->bdesc_length; 8544 } 8545 8546 if (bd_len > MODE_BLK_DESC_LENGTH) { 8547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8548 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8549 "block descriptor length\n"); 8550 kmem_free(header, buflen); 8551 return (EIO); 8552 } 8553 8554 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8555 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8556 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8557 " caching page code mismatch %d\n", 8558 mode_caching_page->mode_page.code); 8559 kmem_free(header, buflen); 8560 return (EIO); 8561 } 8562 *is_enabled = mode_caching_page->wce; 8563 8564 kmem_free(header, buflen); 8565 return (0); 8566 } 8567 8568 8569 /* 8570 * Function: sd_make_device 8571 * 8572 * Description: Utility routine to return the Solaris device number from 8573 * the data in the device's dev_info structure. 8574 * 8575 * Return Code: The Solaris device number 8576 * 8577 * Context: Any 8578 */ 8579 8580 static dev_t 8581 sd_make_device(dev_info_t *devi) 8582 { 8583 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8584 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8585 } 8586 8587 8588 /* 8589 * Function: sd_pm_entry 8590 * 8591 * Description: Called at the start of a new command to manage power 8592 * and busy status of a device. This includes determining whether 8593 * the current power state of the device is sufficient for 8594 * performing the command or whether it must be changed. 8595 * The PM framework is notified appropriately. 8596 * Only with a return status of DDI_SUCCESS will the 8597 * component be busy to the framework. 8598 * 8599 * All callers of sd_pm_entry must check the return status 8600 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8601 * of DDI_FAILURE indicates the device failed to power up. 8602 * In this case un_pm_count has been adjusted so the result 8603 * on exit is still powered down, ie. count is less than 0. 8604 * Calling sd_pm_exit with this count value hits an ASSERT. 8605 * 8606 * Return Code: DDI_SUCCESS or DDI_FAILURE 8607 * 8608 * Context: Kernel thread context. 8609 */ 8610 8611 static int 8612 sd_pm_entry(struct sd_lun *un) 8613 { 8614 int return_status = DDI_SUCCESS; 8615 8616 ASSERT(!mutex_owned(SD_MUTEX(un))); 8617 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8618 8619 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8620 8621 if (un->un_f_pm_is_enabled == FALSE) { 8622 SD_TRACE(SD_LOG_IO_PM, un, 8623 "sd_pm_entry: exiting, PM not enabled\n"); 8624 return (return_status); 8625 } 8626 8627 /* 8628 * Just increment a counter if PM is enabled. On the transition from 8629 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8630 * the count with each IO and mark the device as idle when the count 8631 * hits 0. 8632 * 8633 * If the count is less than 0 the device is powered down. If a powered 8634 * down device is successfully powered up then the count must be 8635 * incremented to reflect the power up. Note that it'll get incremented 8636 * a second time to become busy. 8637 * 8638 * Because the following has the potential to change the device state 8639 * and must release the un_pm_mutex to do so, only one thread can be 8640 * allowed through at a time. 8641 */ 8642 8643 mutex_enter(&un->un_pm_mutex); 8644 while (un->un_pm_busy == TRUE) { 8645 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8646 } 8647 un->un_pm_busy = TRUE; 8648 8649 if (un->un_pm_count < 1) { 8650 8651 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8652 8653 /* 8654 * Indicate we are now busy so the framework won't attempt to 8655 * power down the device. This call will only fail if either 8656 * we passed a bad component number or the device has no 8657 * components. Neither of these should ever happen. 8658 */ 8659 mutex_exit(&un->un_pm_mutex); 8660 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8661 ASSERT(return_status == DDI_SUCCESS); 8662 8663 mutex_enter(&un->un_pm_mutex); 8664 8665 if (un->un_pm_count < 0) { 8666 mutex_exit(&un->un_pm_mutex); 8667 8668 SD_TRACE(SD_LOG_IO_PM, un, 8669 "sd_pm_entry: power up component\n"); 8670 8671 /* 8672 * pm_raise_power will cause sdpower to be called 8673 * which brings the device power level to the 8674 * desired state, ON in this case. If successful, 8675 * un_pm_count and un_power_level will be updated 8676 * appropriately. 8677 */ 8678 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8679 SD_SPINDLE_ON); 8680 8681 mutex_enter(&un->un_pm_mutex); 8682 8683 if (return_status != DDI_SUCCESS) { 8684 /* 8685 * Power up failed. 8686 * Idle the device and adjust the count 8687 * so the result on exit is that we're 8688 * still powered down, ie. count is less than 0. 8689 */ 8690 SD_TRACE(SD_LOG_IO_PM, un, 8691 "sd_pm_entry: power up failed," 8692 " idle the component\n"); 8693 8694 (void) pm_idle_component(SD_DEVINFO(un), 0); 8695 un->un_pm_count--; 8696 } else { 8697 /* 8698 * Device is powered up, verify the 8699 * count is non-negative. 8700 * This is debug only. 8701 */ 8702 ASSERT(un->un_pm_count == 0); 8703 } 8704 } 8705 8706 if (return_status == DDI_SUCCESS) { 8707 /* 8708 * For performance, now that the device has been tagged 8709 * as busy, and it's known to be powered up, update the 8710 * chain types to use jump tables that do not include 8711 * pm. This significantly lowers the overhead and 8712 * therefore improves performance. 8713 */ 8714 8715 mutex_exit(&un->un_pm_mutex); 8716 mutex_enter(SD_MUTEX(un)); 8717 SD_TRACE(SD_LOG_IO_PM, un, 8718 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8719 un->un_uscsi_chain_type); 8720 8721 if (un->un_f_non_devbsize_supported) { 8722 un->un_buf_chain_type = 8723 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8724 } else { 8725 un->un_buf_chain_type = 8726 SD_CHAIN_INFO_DISK_NO_PM; 8727 } 8728 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8729 8730 SD_TRACE(SD_LOG_IO_PM, un, 8731 " changed uscsi_chain_type to %d\n", 8732 un->un_uscsi_chain_type); 8733 mutex_exit(SD_MUTEX(un)); 8734 mutex_enter(&un->un_pm_mutex); 8735 8736 if (un->un_pm_idle_timeid == NULL) { 8737 /* 300 ms. */ 8738 un->un_pm_idle_timeid = 8739 timeout(sd_pm_idletimeout_handler, un, 8740 (drv_usectohz((clock_t)300000))); 8741 /* 8742 * Include an extra call to busy which keeps the 8743 * device busy with-respect-to the PM layer 8744 * until the timer fires, at which time it'll 8745 * get the extra idle call. 8746 */ 8747 (void) pm_busy_component(SD_DEVINFO(un), 0); 8748 } 8749 } 8750 } 8751 un->un_pm_busy = FALSE; 8752 /* Next... */ 8753 cv_signal(&un->un_pm_busy_cv); 8754 8755 un->un_pm_count++; 8756 8757 SD_TRACE(SD_LOG_IO_PM, un, 8758 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8759 8760 mutex_exit(&un->un_pm_mutex); 8761 8762 return (return_status); 8763 } 8764 8765 8766 /* 8767 * Function: sd_pm_exit 8768 * 8769 * Description: Called at the completion of a command to manage busy 8770 * status for the device. If the device becomes idle the 8771 * PM framework is notified. 8772 * 8773 * Context: Kernel thread context 8774 */ 8775 8776 static void 8777 sd_pm_exit(struct sd_lun *un) 8778 { 8779 ASSERT(!mutex_owned(SD_MUTEX(un))); 8780 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8781 8782 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8783 8784 /* 8785 * After attach the following flag is only read, so don't 8786 * take the penalty of acquiring a mutex for it. 8787 */ 8788 if (un->un_f_pm_is_enabled == TRUE) { 8789 8790 mutex_enter(&un->un_pm_mutex); 8791 un->un_pm_count--; 8792 8793 SD_TRACE(SD_LOG_IO_PM, un, 8794 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8795 8796 ASSERT(un->un_pm_count >= 0); 8797 if (un->un_pm_count == 0) { 8798 mutex_exit(&un->un_pm_mutex); 8799 8800 SD_TRACE(SD_LOG_IO_PM, un, 8801 "sd_pm_exit: idle component\n"); 8802 8803 (void) pm_idle_component(SD_DEVINFO(un), 0); 8804 8805 } else { 8806 mutex_exit(&un->un_pm_mutex); 8807 } 8808 } 8809 8810 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8811 } 8812 8813 8814 /* 8815 * Function: sdopen 8816 * 8817 * Description: Driver's open(9e) entry point function. 8818 * 8819 * Arguments: dev_i - pointer to device number 8820 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8821 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8822 * cred_p - user credential pointer 8823 * 8824 * Return Code: EINVAL 8825 * ENXIO 8826 * EIO 8827 * EROFS 8828 * EBUSY 8829 * 8830 * Context: Kernel thread context 8831 */ 8832 /* ARGSUSED */ 8833 static int 8834 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8835 { 8836 struct sd_lun *un; 8837 int nodelay; 8838 int part; 8839 uint64_t partmask; 8840 int instance; 8841 dev_t dev; 8842 int rval = EIO; 8843 diskaddr_t nblks = 0; 8844 8845 /* Validate the open type */ 8846 if (otyp >= OTYPCNT) { 8847 return (EINVAL); 8848 } 8849 8850 dev = *dev_p; 8851 instance = SDUNIT(dev); 8852 mutex_enter(&sd_detach_mutex); 8853 8854 /* 8855 * Fail the open if there is no softstate for the instance, or 8856 * if another thread somewhere is trying to detach the instance. 8857 */ 8858 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8859 (un->un_detach_count != 0)) { 8860 mutex_exit(&sd_detach_mutex); 8861 /* 8862 * The probe cache only needs to be cleared when open (9e) fails 8863 * with ENXIO (4238046). 8864 */ 8865 /* 8866 * un-conditionally clearing probe cache is ok with 8867 * separate sd/ssd binaries 8868 * x86 platform can be an issue with both parallel 8869 * and fibre in 1 binary 8870 */ 8871 sd_scsi_clear_probe_cache(); 8872 return (ENXIO); 8873 } 8874 8875 /* 8876 * The un_layer_count is to prevent another thread in specfs from 8877 * trying to detach the instance, which can happen when we are 8878 * called from a higher-layer driver instead of thru specfs. 8879 * This will not be needed when DDI provides a layered driver 8880 * interface that allows specfs to know that an instance is in 8881 * use by a layered driver & should not be detached. 8882 * 8883 * Note: the semantics for layered driver opens are exactly one 8884 * close for every open. 8885 */ 8886 if (otyp == OTYP_LYR) { 8887 un->un_layer_count++; 8888 } 8889 8890 /* 8891 * Keep a count of the current # of opens in progress. This is because 8892 * some layered drivers try to call us as a regular open. This can 8893 * cause problems that we cannot prevent, however by keeping this count 8894 * we can at least keep our open and detach routines from racing against 8895 * each other under such conditions. 8896 */ 8897 un->un_opens_in_progress++; 8898 mutex_exit(&sd_detach_mutex); 8899 8900 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8901 part = SDPART(dev); 8902 partmask = 1 << part; 8903 8904 /* 8905 * We use a semaphore here in order to serialize 8906 * open and close requests on the device. 8907 */ 8908 sema_p(&un->un_semoclose); 8909 8910 mutex_enter(SD_MUTEX(un)); 8911 8912 /* 8913 * All device accesses go thru sdstrategy() where we check 8914 * on suspend status but there could be a scsi_poll command, 8915 * which bypasses sdstrategy(), so we need to check pm 8916 * status. 8917 */ 8918 8919 if (!nodelay) { 8920 while ((un->un_state == SD_STATE_SUSPENDED) || 8921 (un->un_state == SD_STATE_PM_CHANGING)) { 8922 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8923 } 8924 8925 mutex_exit(SD_MUTEX(un)); 8926 if (sd_pm_entry(un) != DDI_SUCCESS) { 8927 rval = EIO; 8928 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8929 "sdopen: sd_pm_entry failed\n"); 8930 goto open_failed_with_pm; 8931 } 8932 mutex_enter(SD_MUTEX(un)); 8933 } 8934 8935 /* check for previous exclusive open */ 8936 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8937 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8938 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8939 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8940 8941 if (un->un_exclopen & (partmask)) { 8942 goto excl_open_fail; 8943 } 8944 8945 if (flag & FEXCL) { 8946 int i; 8947 if (un->un_ocmap.lyropen[part]) { 8948 goto excl_open_fail; 8949 } 8950 for (i = 0; i < (OTYPCNT - 1); i++) { 8951 if (un->un_ocmap.regopen[i] & (partmask)) { 8952 goto excl_open_fail; 8953 } 8954 } 8955 } 8956 8957 /* 8958 * Check the write permission if this is a removable media device, 8959 * NDELAY has not been set, and writable permission is requested. 8960 * 8961 * Note: If NDELAY was set and this is write-protected media the WRITE 8962 * attempt will fail with EIO as part of the I/O processing. This is a 8963 * more permissive implementation that allows the open to succeed and 8964 * WRITE attempts to fail when appropriate. 8965 */ 8966 if (un->un_f_chk_wp_open) { 8967 if ((flag & FWRITE) && (!nodelay)) { 8968 mutex_exit(SD_MUTEX(un)); 8969 /* 8970 * Defer the check for write permission on writable 8971 * DVD drive till sdstrategy and will not fail open even 8972 * if FWRITE is set as the device can be writable 8973 * depending upon the media and the media can change 8974 * after the call to open(). 8975 */ 8976 if (un->un_f_dvdram_writable_device == FALSE) { 8977 if (ISCD(un) || sr_check_wp(dev)) { 8978 rval = EROFS; 8979 mutex_enter(SD_MUTEX(un)); 8980 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8981 "write to cd or write protected media\n"); 8982 goto open_fail; 8983 } 8984 } 8985 mutex_enter(SD_MUTEX(un)); 8986 } 8987 } 8988 8989 /* 8990 * If opening in NDELAY/NONBLOCK mode, just return. 8991 * Check if disk is ready and has a valid geometry later. 8992 */ 8993 if (!nodelay) { 8994 mutex_exit(SD_MUTEX(un)); 8995 rval = sd_ready_and_valid(un); 8996 mutex_enter(SD_MUTEX(un)); 8997 /* 8998 * Fail if device is not ready or if the number of disk 8999 * blocks is zero or negative for non CD devices. 9000 */ 9001 9002 nblks = 0; 9003 9004 if (rval == SD_READY_VALID && (!ISCD(un))) { 9005 /* if cmlb_partinfo fails, nblks remains 0 */ 9006 mutex_exit(SD_MUTEX(un)); 9007 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9008 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9009 mutex_enter(SD_MUTEX(un)); 9010 } 9011 9012 if ((rval != SD_READY_VALID) || 9013 (!ISCD(un) && nblks <= 0)) { 9014 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9015 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9016 "device not ready or invalid disk block value\n"); 9017 goto open_fail; 9018 } 9019 #if defined(__i386) || defined(__amd64) 9020 } else { 9021 uchar_t *cp; 9022 /* 9023 * x86 requires special nodelay handling, so that p0 is 9024 * always defined and accessible. 9025 * Invalidate geometry only if device is not already open. 9026 */ 9027 cp = &un->un_ocmap.chkd[0]; 9028 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9029 if (*cp != (uchar_t)0) { 9030 break; 9031 } 9032 cp++; 9033 } 9034 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9035 mutex_exit(SD_MUTEX(un)); 9036 cmlb_invalidate(un->un_cmlbhandle, 9037 (void *)SD_PATH_DIRECT); 9038 mutex_enter(SD_MUTEX(un)); 9039 } 9040 9041 #endif 9042 } 9043 9044 if (otyp == OTYP_LYR) { 9045 un->un_ocmap.lyropen[part]++; 9046 } else { 9047 un->un_ocmap.regopen[otyp] |= partmask; 9048 } 9049 9050 /* Set up open and exclusive open flags */ 9051 if (flag & FEXCL) { 9052 un->un_exclopen |= (partmask); 9053 } 9054 9055 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9056 "open of part %d type %d\n", part, otyp); 9057 9058 mutex_exit(SD_MUTEX(un)); 9059 if (!nodelay) { 9060 sd_pm_exit(un); 9061 } 9062 9063 sema_v(&un->un_semoclose); 9064 9065 mutex_enter(&sd_detach_mutex); 9066 un->un_opens_in_progress--; 9067 mutex_exit(&sd_detach_mutex); 9068 9069 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9070 return (DDI_SUCCESS); 9071 9072 excl_open_fail: 9073 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9074 rval = EBUSY; 9075 9076 open_fail: 9077 mutex_exit(SD_MUTEX(un)); 9078 9079 /* 9080 * On a failed open we must exit the pm management. 9081 */ 9082 if (!nodelay) { 9083 sd_pm_exit(un); 9084 } 9085 open_failed_with_pm: 9086 sema_v(&un->un_semoclose); 9087 9088 mutex_enter(&sd_detach_mutex); 9089 un->un_opens_in_progress--; 9090 if (otyp == OTYP_LYR) { 9091 un->un_layer_count--; 9092 } 9093 mutex_exit(&sd_detach_mutex); 9094 9095 return (rval); 9096 } 9097 9098 9099 /* 9100 * Function: sdclose 9101 * 9102 * Description: Driver's close(9e) entry point function. 9103 * 9104 * Arguments: dev - device number 9105 * flag - file status flag, informational only 9106 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9107 * cred_p - user credential pointer 9108 * 9109 * Return Code: ENXIO 9110 * 9111 * Context: Kernel thread context 9112 */ 9113 /* ARGSUSED */ 9114 static int 9115 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9116 { 9117 struct sd_lun *un; 9118 uchar_t *cp; 9119 int part; 9120 int nodelay; 9121 int rval = 0; 9122 9123 /* Validate the open type */ 9124 if (otyp >= OTYPCNT) { 9125 return (ENXIO); 9126 } 9127 9128 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9129 return (ENXIO); 9130 } 9131 9132 part = SDPART(dev); 9133 nodelay = flag & (FNDELAY | FNONBLOCK); 9134 9135 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9136 "sdclose: close of part %d type %d\n", part, otyp); 9137 9138 /* 9139 * We use a semaphore here in order to serialize 9140 * open and close requests on the device. 9141 */ 9142 sema_p(&un->un_semoclose); 9143 9144 mutex_enter(SD_MUTEX(un)); 9145 9146 /* Don't proceed if power is being changed. */ 9147 while (un->un_state == SD_STATE_PM_CHANGING) { 9148 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9149 } 9150 9151 if (un->un_exclopen & (1 << part)) { 9152 un->un_exclopen &= ~(1 << part); 9153 } 9154 9155 /* Update the open partition map */ 9156 if (otyp == OTYP_LYR) { 9157 un->un_ocmap.lyropen[part] -= 1; 9158 } else { 9159 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9160 } 9161 9162 cp = &un->un_ocmap.chkd[0]; 9163 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9164 if (*cp != NULL) { 9165 break; 9166 } 9167 cp++; 9168 } 9169 9170 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9171 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9172 9173 /* 9174 * We avoid persistance upon the last close, and set 9175 * the throttle back to the maximum. 9176 */ 9177 un->un_throttle = un->un_saved_throttle; 9178 9179 if (un->un_state == SD_STATE_OFFLINE) { 9180 if (un->un_f_is_fibre == FALSE) { 9181 scsi_log(SD_DEVINFO(un), sd_label, 9182 CE_WARN, "offline\n"); 9183 } 9184 mutex_exit(SD_MUTEX(un)); 9185 cmlb_invalidate(un->un_cmlbhandle, 9186 (void *)SD_PATH_DIRECT); 9187 mutex_enter(SD_MUTEX(un)); 9188 9189 } else { 9190 /* 9191 * Flush any outstanding writes in NVRAM cache. 9192 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9193 * cmd, it may not work for non-Pluto devices. 9194 * SYNCHRONIZE CACHE is not required for removables, 9195 * except DVD-RAM drives. 9196 * 9197 * Also note: because SYNCHRONIZE CACHE is currently 9198 * the only command issued here that requires the 9199 * drive be powered up, only do the power up before 9200 * sending the Sync Cache command. If additional 9201 * commands are added which require a powered up 9202 * drive, the following sequence may have to change. 9203 * 9204 * And finally, note that parallel SCSI on SPARC 9205 * only issues a Sync Cache to DVD-RAM, a newly 9206 * supported device. 9207 */ 9208 #if defined(__i386) || defined(__amd64) 9209 if (un->un_f_sync_cache_supported || 9210 un->un_f_dvdram_writable_device == TRUE) { 9211 #else 9212 if (un->un_f_dvdram_writable_device == TRUE) { 9213 #endif 9214 mutex_exit(SD_MUTEX(un)); 9215 if (sd_pm_entry(un) == DDI_SUCCESS) { 9216 rval = 9217 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9218 NULL); 9219 /* ignore error if not supported */ 9220 if (rval == ENOTSUP) { 9221 rval = 0; 9222 } else if (rval != 0) { 9223 rval = EIO; 9224 } 9225 sd_pm_exit(un); 9226 } else { 9227 rval = EIO; 9228 } 9229 mutex_enter(SD_MUTEX(un)); 9230 } 9231 9232 /* 9233 * For devices which supports DOOR_LOCK, send an ALLOW 9234 * MEDIA REMOVAL command, but don't get upset if it 9235 * fails. We need to raise the power of the drive before 9236 * we can call sd_send_scsi_DOORLOCK() 9237 */ 9238 if (un->un_f_doorlock_supported) { 9239 mutex_exit(SD_MUTEX(un)); 9240 if (sd_pm_entry(un) == DDI_SUCCESS) { 9241 rval = sd_send_scsi_DOORLOCK(un, 9242 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9243 9244 sd_pm_exit(un); 9245 if (ISCD(un) && (rval != 0) && 9246 (nodelay != 0)) { 9247 rval = ENXIO; 9248 } 9249 } else { 9250 rval = EIO; 9251 } 9252 mutex_enter(SD_MUTEX(un)); 9253 } 9254 9255 /* 9256 * If a device has removable media, invalidate all 9257 * parameters related to media, such as geometry, 9258 * blocksize, and blockcount. 9259 */ 9260 if (un->un_f_has_removable_media) { 9261 sr_ejected(un); 9262 } 9263 9264 /* 9265 * Destroy the cache (if it exists) which was 9266 * allocated for the write maps since this is 9267 * the last close for this media. 9268 */ 9269 if (un->un_wm_cache) { 9270 /* 9271 * Check if there are pending commands. 9272 * and if there are give a warning and 9273 * do not destroy the cache. 9274 */ 9275 if (un->un_ncmds_in_driver > 0) { 9276 scsi_log(SD_DEVINFO(un), 9277 sd_label, CE_WARN, 9278 "Unable to clean up memory " 9279 "because of pending I/O\n"); 9280 } else { 9281 kmem_cache_destroy( 9282 un->un_wm_cache); 9283 un->un_wm_cache = NULL; 9284 } 9285 } 9286 } 9287 } 9288 9289 mutex_exit(SD_MUTEX(un)); 9290 sema_v(&un->un_semoclose); 9291 9292 if (otyp == OTYP_LYR) { 9293 mutex_enter(&sd_detach_mutex); 9294 /* 9295 * The detach routine may run when the layer count 9296 * drops to zero. 9297 */ 9298 un->un_layer_count--; 9299 mutex_exit(&sd_detach_mutex); 9300 } 9301 9302 return (rval); 9303 } 9304 9305 9306 /* 9307 * Function: sd_ready_and_valid 9308 * 9309 * Description: Test if device is ready and has a valid geometry. 9310 * 9311 * Arguments: dev - device number 9312 * un - driver soft state (unit) structure 9313 * 9314 * Return Code: SD_READY_VALID ready and valid label 9315 * SD_NOT_READY_VALID not ready, no label 9316 * SD_RESERVED_BY_OTHERS reservation conflict 9317 * 9318 * Context: Never called at interrupt context. 9319 */ 9320 9321 static int 9322 sd_ready_and_valid(struct sd_lun *un) 9323 { 9324 struct sd_errstats *stp; 9325 uint64_t capacity; 9326 uint_t lbasize; 9327 int rval = SD_READY_VALID; 9328 char name_str[48]; 9329 int is_valid; 9330 9331 ASSERT(un != NULL); 9332 ASSERT(!mutex_owned(SD_MUTEX(un))); 9333 9334 mutex_enter(SD_MUTEX(un)); 9335 /* 9336 * If a device has removable media, we must check if media is 9337 * ready when checking if this device is ready and valid. 9338 */ 9339 if (un->un_f_has_removable_media) { 9340 mutex_exit(SD_MUTEX(un)); 9341 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9342 rval = SD_NOT_READY_VALID; 9343 mutex_enter(SD_MUTEX(un)); 9344 goto done; 9345 } 9346 9347 is_valid = SD_IS_VALID_LABEL(un); 9348 mutex_enter(SD_MUTEX(un)); 9349 if (!is_valid || 9350 (un->un_f_blockcount_is_valid == FALSE) || 9351 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9352 9353 /* capacity has to be read every open. */ 9354 mutex_exit(SD_MUTEX(un)); 9355 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9356 &lbasize, SD_PATH_DIRECT) != 0) { 9357 cmlb_invalidate(un->un_cmlbhandle, 9358 (void *)SD_PATH_DIRECT); 9359 mutex_enter(SD_MUTEX(un)); 9360 rval = SD_NOT_READY_VALID; 9361 goto done; 9362 } else { 9363 mutex_enter(SD_MUTEX(un)); 9364 sd_update_block_info(un, lbasize, capacity); 9365 } 9366 } 9367 9368 /* 9369 * Check if the media in the device is writable or not. 9370 */ 9371 if (!is_valid && ISCD(un)) { 9372 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9373 } 9374 9375 } else { 9376 /* 9377 * Do a test unit ready to clear any unit attention from non-cd 9378 * devices. 9379 */ 9380 mutex_exit(SD_MUTEX(un)); 9381 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9382 mutex_enter(SD_MUTEX(un)); 9383 } 9384 9385 9386 /* 9387 * If this is a non 512 block device, allocate space for 9388 * the wmap cache. This is being done here since every time 9389 * a media is changed this routine will be called and the 9390 * block size is a function of media rather than device. 9391 */ 9392 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9393 if (!(un->un_wm_cache)) { 9394 (void) snprintf(name_str, sizeof (name_str), 9395 "%s%d_cache", 9396 ddi_driver_name(SD_DEVINFO(un)), 9397 ddi_get_instance(SD_DEVINFO(un))); 9398 un->un_wm_cache = kmem_cache_create( 9399 name_str, sizeof (struct sd_w_map), 9400 8, sd_wm_cache_constructor, 9401 sd_wm_cache_destructor, NULL, 9402 (void *)un, NULL, 0); 9403 if (!(un->un_wm_cache)) { 9404 rval = ENOMEM; 9405 goto done; 9406 } 9407 } 9408 } 9409 9410 if (un->un_state == SD_STATE_NORMAL) { 9411 /* 9412 * If the target is not yet ready here (defined by a TUR 9413 * failure), invalidate the geometry and print an 'offline' 9414 * message. This is a legacy message, as the state of the 9415 * target is not actually changed to SD_STATE_OFFLINE. 9416 * 9417 * If the TUR fails for EACCES (Reservation Conflict), 9418 * SD_RESERVED_BY_OTHERS will be returned to indicate 9419 * reservation conflict. If the TUR fails for other 9420 * reasons, SD_NOT_READY_VALID will be returned. 9421 */ 9422 int err; 9423 9424 mutex_exit(SD_MUTEX(un)); 9425 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9426 mutex_enter(SD_MUTEX(un)); 9427 9428 if (err != 0) { 9429 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9430 "offline or reservation conflict\n"); 9431 mutex_exit(SD_MUTEX(un)); 9432 cmlb_invalidate(un->un_cmlbhandle, 9433 (void *)SD_PATH_DIRECT); 9434 mutex_enter(SD_MUTEX(un)); 9435 if (err == EACCES) { 9436 rval = SD_RESERVED_BY_OTHERS; 9437 } else { 9438 rval = SD_NOT_READY_VALID; 9439 } 9440 goto done; 9441 } 9442 } 9443 9444 if (un->un_f_format_in_progress == FALSE) { 9445 mutex_exit(SD_MUTEX(un)); 9446 if (cmlb_validate(un->un_cmlbhandle, 0, 9447 (void *)SD_PATH_DIRECT) != 0) { 9448 rval = SD_NOT_READY_VALID; 9449 mutex_enter(SD_MUTEX(un)); 9450 goto done; 9451 } 9452 if (un->un_f_pkstats_enabled) { 9453 sd_set_pstats(un); 9454 SD_TRACE(SD_LOG_IO_PARTITION, un, 9455 "sd_ready_and_valid: un:0x%p pstats created and " 9456 "set\n", un); 9457 } 9458 mutex_enter(SD_MUTEX(un)); 9459 } 9460 9461 /* 9462 * If this device supports DOOR_LOCK command, try and send 9463 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9464 * if it fails. For a CD, however, it is an error 9465 */ 9466 if (un->un_f_doorlock_supported) { 9467 mutex_exit(SD_MUTEX(un)); 9468 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9469 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9470 rval = SD_NOT_READY_VALID; 9471 mutex_enter(SD_MUTEX(un)); 9472 goto done; 9473 } 9474 mutex_enter(SD_MUTEX(un)); 9475 } 9476 9477 /* The state has changed, inform the media watch routines */ 9478 un->un_mediastate = DKIO_INSERTED; 9479 cv_broadcast(&un->un_state_cv); 9480 rval = SD_READY_VALID; 9481 9482 done: 9483 9484 /* 9485 * Initialize the capacity kstat value, if no media previously 9486 * (capacity kstat is 0) and a media has been inserted 9487 * (un_blockcount > 0). 9488 */ 9489 if (un->un_errstats != NULL) { 9490 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9491 if ((stp->sd_capacity.value.ui64 == 0) && 9492 (un->un_f_blockcount_is_valid == TRUE)) { 9493 stp->sd_capacity.value.ui64 = 9494 (uint64_t)((uint64_t)un->un_blockcount * 9495 un->un_sys_blocksize); 9496 } 9497 } 9498 9499 mutex_exit(SD_MUTEX(un)); 9500 return (rval); 9501 } 9502 9503 9504 /* 9505 * Function: sdmin 9506 * 9507 * Description: Routine to limit the size of a data transfer. Used in 9508 * conjunction with physio(9F). 9509 * 9510 * Arguments: bp - pointer to the indicated buf(9S) struct. 9511 * 9512 * Context: Kernel thread context. 9513 */ 9514 9515 static void 9516 sdmin(struct buf *bp) 9517 { 9518 struct sd_lun *un; 9519 int instance; 9520 9521 instance = SDUNIT(bp->b_edev); 9522 9523 un = ddi_get_soft_state(sd_state, instance); 9524 ASSERT(un != NULL); 9525 9526 if (bp->b_bcount > un->un_max_xfer_size) { 9527 bp->b_bcount = un->un_max_xfer_size; 9528 } 9529 } 9530 9531 9532 /* 9533 * Function: sdread 9534 * 9535 * Description: Driver's read(9e) entry point function. 9536 * 9537 * Arguments: dev - device number 9538 * uio - structure pointer describing where data is to be stored 9539 * in user's space 9540 * cred_p - user credential pointer 9541 * 9542 * Return Code: ENXIO 9543 * EIO 9544 * EINVAL 9545 * value returned by physio 9546 * 9547 * Context: Kernel thread context. 9548 */ 9549 /* ARGSUSED */ 9550 static int 9551 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9552 { 9553 struct sd_lun *un = NULL; 9554 int secmask; 9555 int err; 9556 9557 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9558 return (ENXIO); 9559 } 9560 9561 ASSERT(!mutex_owned(SD_MUTEX(un))); 9562 9563 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9564 mutex_enter(SD_MUTEX(un)); 9565 /* 9566 * Because the call to sd_ready_and_valid will issue I/O we 9567 * must wait here if either the device is suspended or 9568 * if it's power level is changing. 9569 */ 9570 while ((un->un_state == SD_STATE_SUSPENDED) || 9571 (un->un_state == SD_STATE_PM_CHANGING)) { 9572 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9573 } 9574 un->un_ncmds_in_driver++; 9575 mutex_exit(SD_MUTEX(un)); 9576 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9577 mutex_enter(SD_MUTEX(un)); 9578 un->un_ncmds_in_driver--; 9579 ASSERT(un->un_ncmds_in_driver >= 0); 9580 mutex_exit(SD_MUTEX(un)); 9581 return (EIO); 9582 } 9583 mutex_enter(SD_MUTEX(un)); 9584 un->un_ncmds_in_driver--; 9585 ASSERT(un->un_ncmds_in_driver >= 0); 9586 mutex_exit(SD_MUTEX(un)); 9587 } 9588 9589 /* 9590 * Read requests are restricted to multiples of the system block size. 9591 */ 9592 secmask = un->un_sys_blocksize - 1; 9593 9594 if (uio->uio_loffset & ((offset_t)(secmask))) { 9595 SD_ERROR(SD_LOG_READ_WRITE, un, 9596 "sdread: file offset not modulo %d\n", 9597 un->un_sys_blocksize); 9598 err = EINVAL; 9599 } else if (uio->uio_iov->iov_len & (secmask)) { 9600 SD_ERROR(SD_LOG_READ_WRITE, un, 9601 "sdread: transfer length not modulo %d\n", 9602 un->un_sys_blocksize); 9603 err = EINVAL; 9604 } else { 9605 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9606 } 9607 return (err); 9608 } 9609 9610 9611 /* 9612 * Function: sdwrite 9613 * 9614 * Description: Driver's write(9e) entry point function. 9615 * 9616 * Arguments: dev - device number 9617 * uio - structure pointer describing where data is stored in 9618 * user's space 9619 * cred_p - user credential pointer 9620 * 9621 * Return Code: ENXIO 9622 * EIO 9623 * EINVAL 9624 * value returned by physio 9625 * 9626 * Context: Kernel thread context. 9627 */ 9628 /* ARGSUSED */ 9629 static int 9630 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9631 { 9632 struct sd_lun *un = NULL; 9633 int secmask; 9634 int err; 9635 9636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9637 return (ENXIO); 9638 } 9639 9640 ASSERT(!mutex_owned(SD_MUTEX(un))); 9641 9642 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9643 mutex_enter(SD_MUTEX(un)); 9644 /* 9645 * Because the call to sd_ready_and_valid will issue I/O we 9646 * must wait here if either the device is suspended or 9647 * if it's power level is changing. 9648 */ 9649 while ((un->un_state == SD_STATE_SUSPENDED) || 9650 (un->un_state == SD_STATE_PM_CHANGING)) { 9651 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9652 } 9653 un->un_ncmds_in_driver++; 9654 mutex_exit(SD_MUTEX(un)); 9655 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9656 mutex_enter(SD_MUTEX(un)); 9657 un->un_ncmds_in_driver--; 9658 ASSERT(un->un_ncmds_in_driver >= 0); 9659 mutex_exit(SD_MUTEX(un)); 9660 return (EIO); 9661 } 9662 mutex_enter(SD_MUTEX(un)); 9663 un->un_ncmds_in_driver--; 9664 ASSERT(un->un_ncmds_in_driver >= 0); 9665 mutex_exit(SD_MUTEX(un)); 9666 } 9667 9668 /* 9669 * Write requests are restricted to multiples of the system block size. 9670 */ 9671 secmask = un->un_sys_blocksize - 1; 9672 9673 if (uio->uio_loffset & ((offset_t)(secmask))) { 9674 SD_ERROR(SD_LOG_READ_WRITE, un, 9675 "sdwrite: file offset not modulo %d\n", 9676 un->un_sys_blocksize); 9677 err = EINVAL; 9678 } else if (uio->uio_iov->iov_len & (secmask)) { 9679 SD_ERROR(SD_LOG_READ_WRITE, un, 9680 "sdwrite: transfer length not modulo %d\n", 9681 un->un_sys_blocksize); 9682 err = EINVAL; 9683 } else { 9684 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9685 } 9686 return (err); 9687 } 9688 9689 9690 /* 9691 * Function: sdaread 9692 * 9693 * Description: Driver's aread(9e) entry point function. 9694 * 9695 * Arguments: dev - device number 9696 * aio - structure pointer describing where data is to be stored 9697 * cred_p - user credential pointer 9698 * 9699 * Return Code: ENXIO 9700 * EIO 9701 * EINVAL 9702 * value returned by aphysio 9703 * 9704 * Context: Kernel thread context. 9705 */ 9706 /* ARGSUSED */ 9707 static int 9708 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9709 { 9710 struct sd_lun *un = NULL; 9711 struct uio *uio = aio->aio_uio; 9712 int secmask; 9713 int err; 9714 9715 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9716 return (ENXIO); 9717 } 9718 9719 ASSERT(!mutex_owned(SD_MUTEX(un))); 9720 9721 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9722 mutex_enter(SD_MUTEX(un)); 9723 /* 9724 * Because the call to sd_ready_and_valid will issue I/O we 9725 * must wait here if either the device is suspended or 9726 * if it's power level is changing. 9727 */ 9728 while ((un->un_state == SD_STATE_SUSPENDED) || 9729 (un->un_state == SD_STATE_PM_CHANGING)) { 9730 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9731 } 9732 un->un_ncmds_in_driver++; 9733 mutex_exit(SD_MUTEX(un)); 9734 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9735 mutex_enter(SD_MUTEX(un)); 9736 un->un_ncmds_in_driver--; 9737 ASSERT(un->un_ncmds_in_driver >= 0); 9738 mutex_exit(SD_MUTEX(un)); 9739 return (EIO); 9740 } 9741 mutex_enter(SD_MUTEX(un)); 9742 un->un_ncmds_in_driver--; 9743 ASSERT(un->un_ncmds_in_driver >= 0); 9744 mutex_exit(SD_MUTEX(un)); 9745 } 9746 9747 /* 9748 * Read requests are restricted to multiples of the system block size. 9749 */ 9750 secmask = un->un_sys_blocksize - 1; 9751 9752 if (uio->uio_loffset & ((offset_t)(secmask))) { 9753 SD_ERROR(SD_LOG_READ_WRITE, un, 9754 "sdaread: file offset not modulo %d\n", 9755 un->un_sys_blocksize); 9756 err = EINVAL; 9757 } else if (uio->uio_iov->iov_len & (secmask)) { 9758 SD_ERROR(SD_LOG_READ_WRITE, un, 9759 "sdaread: transfer length not modulo %d\n", 9760 un->un_sys_blocksize); 9761 err = EINVAL; 9762 } else { 9763 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9764 } 9765 return (err); 9766 } 9767 9768 9769 /* 9770 * Function: sdawrite 9771 * 9772 * Description: Driver's awrite(9e) entry point function. 9773 * 9774 * Arguments: dev - device number 9775 * aio - structure pointer describing where data is stored 9776 * cred_p - user credential pointer 9777 * 9778 * Return Code: ENXIO 9779 * EIO 9780 * EINVAL 9781 * value returned by aphysio 9782 * 9783 * Context: Kernel thread context. 9784 */ 9785 /* ARGSUSED */ 9786 static int 9787 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9788 { 9789 struct sd_lun *un = NULL; 9790 struct uio *uio = aio->aio_uio; 9791 int secmask; 9792 int err; 9793 9794 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9795 return (ENXIO); 9796 } 9797 9798 ASSERT(!mutex_owned(SD_MUTEX(un))); 9799 9800 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9801 mutex_enter(SD_MUTEX(un)); 9802 /* 9803 * Because the call to sd_ready_and_valid will issue I/O we 9804 * must wait here if either the device is suspended or 9805 * if it's power level is changing. 9806 */ 9807 while ((un->un_state == SD_STATE_SUSPENDED) || 9808 (un->un_state == SD_STATE_PM_CHANGING)) { 9809 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9810 } 9811 un->un_ncmds_in_driver++; 9812 mutex_exit(SD_MUTEX(un)); 9813 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9814 mutex_enter(SD_MUTEX(un)); 9815 un->un_ncmds_in_driver--; 9816 ASSERT(un->un_ncmds_in_driver >= 0); 9817 mutex_exit(SD_MUTEX(un)); 9818 return (EIO); 9819 } 9820 mutex_enter(SD_MUTEX(un)); 9821 un->un_ncmds_in_driver--; 9822 ASSERT(un->un_ncmds_in_driver >= 0); 9823 mutex_exit(SD_MUTEX(un)); 9824 } 9825 9826 /* 9827 * Write requests are restricted to multiples of the system block size. 9828 */ 9829 secmask = un->un_sys_blocksize - 1; 9830 9831 if (uio->uio_loffset & ((offset_t)(secmask))) { 9832 SD_ERROR(SD_LOG_READ_WRITE, un, 9833 "sdawrite: file offset not modulo %d\n", 9834 un->un_sys_blocksize); 9835 err = EINVAL; 9836 } else if (uio->uio_iov->iov_len & (secmask)) { 9837 SD_ERROR(SD_LOG_READ_WRITE, un, 9838 "sdawrite: transfer length not modulo %d\n", 9839 un->un_sys_blocksize); 9840 err = EINVAL; 9841 } else { 9842 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9843 } 9844 return (err); 9845 } 9846 9847 9848 9849 9850 9851 /* 9852 * Driver IO processing follows the following sequence: 9853 * 9854 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9855 * | | ^ 9856 * v v | 9857 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9858 * | | | | 9859 * v | | | 9860 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9861 * | | ^ ^ 9862 * v v | | 9863 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9864 * | | | | 9865 * +---+ | +------------+ +-------+ 9866 * | | | | 9867 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9868 * | v | | 9869 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9870 * | | ^ | 9871 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9872 * | v | | 9873 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9874 * | | ^ | 9875 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9876 * | v | | 9877 * | sd_checksum_iostart() sd_checksum_iodone() | 9878 * | | ^ | 9879 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9880 * | v | | 9881 * | sd_pm_iostart() sd_pm_iodone() | 9882 * | | ^ | 9883 * | | | | 9884 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9885 * | ^ 9886 * v | 9887 * sd_core_iostart() | 9888 * | | 9889 * | +------>(*destroypkt)() 9890 * +-> sd_start_cmds() <-+ | | 9891 * | | | v 9892 * | | | scsi_destroy_pkt(9F) 9893 * | | | 9894 * +->(*initpkt)() +- sdintr() 9895 * | | | | 9896 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9897 * | +-> scsi_setup_cdb(9F) | 9898 * | | 9899 * +--> scsi_transport(9F) | 9900 * | | 9901 * +----> SCSA ---->+ 9902 * 9903 * 9904 * This code is based upon the following presumptions: 9905 * 9906 * - iostart and iodone functions operate on buf(9S) structures. These 9907 * functions perform the necessary operations on the buf(9S) and pass 9908 * them along to the next function in the chain by using the macros 9909 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9910 * (for iodone side functions). 9911 * 9912 * - The iostart side functions may sleep. The iodone side functions 9913 * are called under interrupt context and may NOT sleep. Therefore 9914 * iodone side functions also may not call iostart side functions. 9915 * (NOTE: iostart side functions should NOT sleep for memory, as 9916 * this could result in deadlock.) 9917 * 9918 * - An iostart side function may call its corresponding iodone side 9919 * function directly (if necessary). 9920 * 9921 * - In the event of an error, an iostart side function can return a buf(9S) 9922 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9923 * b_error in the usual way of course). 9924 * 9925 * - The taskq mechanism may be used by the iodone side functions to dispatch 9926 * requests to the iostart side functions. The iostart side functions in 9927 * this case would be called under the context of a taskq thread, so it's 9928 * OK for them to block/sleep/spin in this case. 9929 * 9930 * - iostart side functions may allocate "shadow" buf(9S) structs and 9931 * pass them along to the next function in the chain. The corresponding 9932 * iodone side functions must coalesce the "shadow" bufs and return 9933 * the "original" buf to the next higher layer. 9934 * 9935 * - The b_private field of the buf(9S) struct holds a pointer to 9936 * an sd_xbuf struct, which contains information needed to 9937 * construct the scsi_pkt for the command. 9938 * 9939 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9940 * layer must acquire & release the SD_MUTEX(un) as needed. 9941 */ 9942 9943 9944 /* 9945 * Create taskq for all targets in the system. This is created at 9946 * _init(9E) and destroyed at _fini(9E). 9947 * 9948 * Note: here we set the minalloc to a reasonably high number to ensure that 9949 * we will have an adequate supply of task entries available at interrupt time. 9950 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9951 * sd_create_taskq(). Since we do not want to sleep for allocations at 9952 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9953 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9954 * requests any one instant in time. 9955 */ 9956 #define SD_TASKQ_NUMTHREADS 8 9957 #define SD_TASKQ_MINALLOC 256 9958 #define SD_TASKQ_MAXALLOC 256 9959 9960 static taskq_t *sd_tq = NULL; 9961 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9962 9963 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9964 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9965 9966 /* 9967 * The following task queue is being created for the write part of 9968 * read-modify-write of non-512 block size devices. 9969 * Limit the number of threads to 1 for now. This number has been chosen 9970 * considering the fact that it applies only to dvd ram drives/MO drives 9971 * currently. Performance for which is not main criteria at this stage. 9972 * Note: It needs to be explored if we can use a single taskq in future 9973 */ 9974 #define SD_WMR_TASKQ_NUMTHREADS 1 9975 static taskq_t *sd_wmr_tq = NULL; 9976 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9977 9978 /* 9979 * Function: sd_taskq_create 9980 * 9981 * Description: Create taskq thread(s) and preallocate task entries 9982 * 9983 * Return Code: Returns a pointer to the allocated taskq_t. 9984 * 9985 * Context: Can sleep. Requires blockable context. 9986 * 9987 * Notes: - The taskq() facility currently is NOT part of the DDI. 9988 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9989 * - taskq_create() will block for memory, also it will panic 9990 * if it cannot create the requested number of threads. 9991 * - Currently taskq_create() creates threads that cannot be 9992 * swapped. 9993 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9994 * supply of taskq entries at interrupt time (ie, so that we 9995 * do not have to sleep for memory) 9996 */ 9997 9998 static void 9999 sd_taskq_create(void) 10000 { 10001 char taskq_name[TASKQ_NAMELEN]; 10002 10003 ASSERT(sd_tq == NULL); 10004 ASSERT(sd_wmr_tq == NULL); 10005 10006 (void) snprintf(taskq_name, sizeof (taskq_name), 10007 "%s_drv_taskq", sd_label); 10008 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10009 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10010 TASKQ_PREPOPULATE)); 10011 10012 (void) snprintf(taskq_name, sizeof (taskq_name), 10013 "%s_rmw_taskq", sd_label); 10014 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10015 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10016 TASKQ_PREPOPULATE)); 10017 } 10018 10019 10020 /* 10021 * Function: sd_taskq_delete 10022 * 10023 * Description: Complementary cleanup routine for sd_taskq_create(). 10024 * 10025 * Context: Kernel thread context. 10026 */ 10027 10028 static void 10029 sd_taskq_delete(void) 10030 { 10031 ASSERT(sd_tq != NULL); 10032 ASSERT(sd_wmr_tq != NULL); 10033 taskq_destroy(sd_tq); 10034 taskq_destroy(sd_wmr_tq); 10035 sd_tq = NULL; 10036 sd_wmr_tq = NULL; 10037 } 10038 10039 10040 /* 10041 * Function: sdstrategy 10042 * 10043 * Description: Driver's strategy (9E) entry point function. 10044 * 10045 * Arguments: bp - pointer to buf(9S) 10046 * 10047 * Return Code: Always returns zero 10048 * 10049 * Context: Kernel thread context. 10050 */ 10051 10052 static int 10053 sdstrategy(struct buf *bp) 10054 { 10055 struct sd_lun *un; 10056 10057 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10058 if (un == NULL) { 10059 bioerror(bp, EIO); 10060 bp->b_resid = bp->b_bcount; 10061 biodone(bp); 10062 return (0); 10063 } 10064 /* As was done in the past, fail new cmds. if state is dumping. */ 10065 if (un->un_state == SD_STATE_DUMPING) { 10066 bioerror(bp, ENXIO); 10067 bp->b_resid = bp->b_bcount; 10068 biodone(bp); 10069 return (0); 10070 } 10071 10072 ASSERT(!mutex_owned(SD_MUTEX(un))); 10073 10074 /* 10075 * Commands may sneak in while we released the mutex in 10076 * DDI_SUSPEND, we should block new commands. However, old 10077 * commands that are still in the driver at this point should 10078 * still be allowed to drain. 10079 */ 10080 mutex_enter(SD_MUTEX(un)); 10081 /* 10082 * Must wait here if either the device is suspended or 10083 * if it's power level is changing. 10084 */ 10085 while ((un->un_state == SD_STATE_SUSPENDED) || 10086 (un->un_state == SD_STATE_PM_CHANGING)) { 10087 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10088 } 10089 10090 un->un_ncmds_in_driver++; 10091 10092 /* 10093 * atapi: Since we are running the CD for now in PIO mode we need to 10094 * call bp_mapin here to avoid bp_mapin called interrupt context under 10095 * the HBA's init_pkt routine. 10096 */ 10097 if (un->un_f_cfg_is_atapi == TRUE) { 10098 mutex_exit(SD_MUTEX(un)); 10099 bp_mapin(bp); 10100 mutex_enter(SD_MUTEX(un)); 10101 } 10102 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10103 un->un_ncmds_in_driver); 10104 10105 mutex_exit(SD_MUTEX(un)); 10106 10107 /* 10108 * This will (eventually) allocate the sd_xbuf area and 10109 * call sd_xbuf_strategy(). We just want to return the 10110 * result of ddi_xbuf_qstrategy so that we have an opt- 10111 * imized tail call which saves us a stack frame. 10112 */ 10113 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10114 } 10115 10116 10117 /* 10118 * Function: sd_xbuf_strategy 10119 * 10120 * Description: Function for initiating IO operations via the 10121 * ddi_xbuf_qstrategy() mechanism. 10122 * 10123 * Context: Kernel thread context. 10124 */ 10125 10126 static void 10127 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10128 { 10129 struct sd_lun *un = arg; 10130 10131 ASSERT(bp != NULL); 10132 ASSERT(xp != NULL); 10133 ASSERT(un != NULL); 10134 ASSERT(!mutex_owned(SD_MUTEX(un))); 10135 10136 /* 10137 * Initialize the fields in the xbuf and save a pointer to the 10138 * xbuf in bp->b_private. 10139 */ 10140 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10141 10142 /* Send the buf down the iostart chain */ 10143 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10144 } 10145 10146 10147 /* 10148 * Function: sd_xbuf_init 10149 * 10150 * Description: Prepare the given sd_xbuf struct for use. 10151 * 10152 * Arguments: un - ptr to softstate 10153 * bp - ptr to associated buf(9S) 10154 * xp - ptr to associated sd_xbuf 10155 * chain_type - IO chain type to use: 10156 * SD_CHAIN_NULL 10157 * SD_CHAIN_BUFIO 10158 * SD_CHAIN_USCSI 10159 * SD_CHAIN_DIRECT 10160 * SD_CHAIN_DIRECT_PRIORITY 10161 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10162 * initialization; may be NULL if none. 10163 * 10164 * Context: Kernel thread context 10165 */ 10166 10167 static void 10168 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10169 uchar_t chain_type, void *pktinfop) 10170 { 10171 int index; 10172 10173 ASSERT(un != NULL); 10174 ASSERT(bp != NULL); 10175 ASSERT(xp != NULL); 10176 10177 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10178 bp, chain_type); 10179 10180 xp->xb_un = un; 10181 xp->xb_pktp = NULL; 10182 xp->xb_pktinfo = pktinfop; 10183 xp->xb_private = bp->b_private; 10184 xp->xb_blkno = (daddr_t)bp->b_blkno; 10185 10186 /* 10187 * Set up the iostart and iodone chain indexes in the xbuf, based 10188 * upon the specified chain type to use. 10189 */ 10190 switch (chain_type) { 10191 case SD_CHAIN_NULL: 10192 /* 10193 * Fall thru to just use the values for the buf type, even 10194 * tho for the NULL chain these values will never be used. 10195 */ 10196 /* FALLTHRU */ 10197 case SD_CHAIN_BUFIO: 10198 index = un->un_buf_chain_type; 10199 break; 10200 case SD_CHAIN_USCSI: 10201 index = un->un_uscsi_chain_type; 10202 break; 10203 case SD_CHAIN_DIRECT: 10204 index = un->un_direct_chain_type; 10205 break; 10206 case SD_CHAIN_DIRECT_PRIORITY: 10207 index = un->un_priority_chain_type; 10208 break; 10209 default: 10210 /* We're really broken if we ever get here... */ 10211 panic("sd_xbuf_init: illegal chain type!"); 10212 /*NOTREACHED*/ 10213 } 10214 10215 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10216 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10217 10218 /* 10219 * It might be a bit easier to simply bzero the entire xbuf above, 10220 * but it turns out that since we init a fair number of members anyway, 10221 * we save a fair number cycles by doing explicit assignment of zero. 10222 */ 10223 xp->xb_pkt_flags = 0; 10224 xp->xb_dma_resid = 0; 10225 xp->xb_retry_count = 0; 10226 xp->xb_victim_retry_count = 0; 10227 xp->xb_ua_retry_count = 0; 10228 xp->xb_sense_bp = NULL; 10229 xp->xb_sense_status = 0; 10230 xp->xb_sense_state = 0; 10231 xp->xb_sense_resid = 0; 10232 10233 bp->b_private = xp; 10234 bp->b_flags &= ~(B_DONE | B_ERROR); 10235 bp->b_resid = 0; 10236 bp->av_forw = NULL; 10237 bp->av_back = NULL; 10238 bioerror(bp, 0); 10239 10240 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10241 } 10242 10243 10244 /* 10245 * Function: sd_uscsi_strategy 10246 * 10247 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10248 * 10249 * Arguments: bp - buf struct ptr 10250 * 10251 * Return Code: Always returns 0 10252 * 10253 * Context: Kernel thread context 10254 */ 10255 10256 static int 10257 sd_uscsi_strategy(struct buf *bp) 10258 { 10259 struct sd_lun *un; 10260 struct sd_uscsi_info *uip; 10261 struct sd_xbuf *xp; 10262 uchar_t chain_type; 10263 10264 ASSERT(bp != NULL); 10265 10266 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10267 if (un == NULL) { 10268 bioerror(bp, EIO); 10269 bp->b_resid = bp->b_bcount; 10270 biodone(bp); 10271 return (0); 10272 } 10273 10274 ASSERT(!mutex_owned(SD_MUTEX(un))); 10275 10276 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10277 10278 mutex_enter(SD_MUTEX(un)); 10279 /* 10280 * atapi: Since we are running the CD for now in PIO mode we need to 10281 * call bp_mapin here to avoid bp_mapin called interrupt context under 10282 * the HBA's init_pkt routine. 10283 */ 10284 if (un->un_f_cfg_is_atapi == TRUE) { 10285 mutex_exit(SD_MUTEX(un)); 10286 bp_mapin(bp); 10287 mutex_enter(SD_MUTEX(un)); 10288 } 10289 un->un_ncmds_in_driver++; 10290 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10291 un->un_ncmds_in_driver); 10292 mutex_exit(SD_MUTEX(un)); 10293 10294 /* 10295 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10296 */ 10297 ASSERT(bp->b_private != NULL); 10298 uip = (struct sd_uscsi_info *)bp->b_private; 10299 10300 switch (uip->ui_flags) { 10301 case SD_PATH_DIRECT: 10302 chain_type = SD_CHAIN_DIRECT; 10303 break; 10304 case SD_PATH_DIRECT_PRIORITY: 10305 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10306 break; 10307 default: 10308 chain_type = SD_CHAIN_USCSI; 10309 break; 10310 } 10311 10312 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10313 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10314 10315 /* Use the index obtained within xbuf_init */ 10316 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10317 10318 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10319 10320 return (0); 10321 } 10322 10323 /* 10324 * Function: sd_send_scsi_cmd 10325 * 10326 * Description: Runs a USCSI command for user (when called thru sdioctl), 10327 * or for the driver 10328 * 10329 * Arguments: dev - the dev_t for the device 10330 * incmd - ptr to a valid uscsi_cmd struct 10331 * flag - bit flag, indicating open settings, 32/64 bit type 10332 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10333 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10334 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10335 * to use the USCSI "direct" chain and bypass the normal 10336 * command waitq. 10337 * 10338 * Return Code: 0 - successful completion of the given command 10339 * EIO - scsi_uscsi_handle_command() failed 10340 * ENXIO - soft state not found for specified dev 10341 * EINVAL 10342 * EFAULT - copyin/copyout error 10343 * return code of scsi_uscsi_handle_command(): 10344 * EIO 10345 * ENXIO 10346 * EACCES 10347 * 10348 * Context: Waits for command to complete. Can sleep. 10349 */ 10350 10351 static int 10352 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10353 enum uio_seg dataspace, int path_flag) 10354 { 10355 struct sd_uscsi_info *uip; 10356 struct uscsi_cmd *uscmd; 10357 struct sd_lun *un; 10358 int format = 0; 10359 int rval; 10360 10361 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10362 if (un == NULL) { 10363 return (ENXIO); 10364 } 10365 10366 ASSERT(!mutex_owned(SD_MUTEX(un))); 10367 10368 #ifdef SDDEBUG 10369 switch (dataspace) { 10370 case UIO_USERSPACE: 10371 SD_TRACE(SD_LOG_IO, un, 10372 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10373 break; 10374 case UIO_SYSSPACE: 10375 SD_TRACE(SD_LOG_IO, un, 10376 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10377 break; 10378 default: 10379 SD_TRACE(SD_LOG_IO, un, 10380 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10381 break; 10382 } 10383 #endif 10384 10385 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10386 SD_ADDRESS(un), &uscmd); 10387 if (rval != 0) { 10388 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10389 "scsi_uscsi_alloc_and_copyin failed\n", un); 10390 return (rval); 10391 } 10392 10393 if ((uscmd->uscsi_cdb != NULL) && 10394 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10395 mutex_enter(SD_MUTEX(un)); 10396 un->un_f_format_in_progress = TRUE; 10397 mutex_exit(SD_MUTEX(un)); 10398 format = 1; 10399 } 10400 10401 /* 10402 * Allocate an sd_uscsi_info struct and fill it with the info 10403 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10404 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10405 * since we allocate the buf here in this function, we do not 10406 * need to preserve the prior contents of b_private. 10407 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10408 */ 10409 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10410 uip->ui_flags = path_flag; 10411 uip->ui_cmdp = uscmd; 10412 10413 /* 10414 * Commands sent with priority are intended for error recovery 10415 * situations, and do not have retries performed. 10416 */ 10417 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10418 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10419 } 10420 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10421 10422 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10423 sd_uscsi_strategy, NULL, uip); 10424 10425 #ifdef SDDEBUG 10426 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10427 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10428 uscmd->uscsi_status, uscmd->uscsi_resid); 10429 if (uscmd->uscsi_bufaddr != NULL) { 10430 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10431 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10432 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10433 if (dataspace == UIO_SYSSPACE) { 10434 SD_DUMP_MEMORY(un, SD_LOG_IO, 10435 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10436 uscmd->uscsi_buflen, SD_LOG_HEX); 10437 } 10438 } 10439 #endif 10440 10441 if (format == 1) { 10442 mutex_enter(SD_MUTEX(un)); 10443 un->un_f_format_in_progress = FALSE; 10444 mutex_exit(SD_MUTEX(un)); 10445 } 10446 10447 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10448 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10449 10450 return (rval); 10451 } 10452 10453 10454 /* 10455 * Function: sd_buf_iodone 10456 * 10457 * Description: Frees the sd_xbuf & returns the buf to its originator. 10458 * 10459 * Context: May be called from interrupt context. 10460 */ 10461 /* ARGSUSED */ 10462 static void 10463 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10464 { 10465 struct sd_xbuf *xp; 10466 10467 ASSERT(un != NULL); 10468 ASSERT(bp != NULL); 10469 ASSERT(!mutex_owned(SD_MUTEX(un))); 10470 10471 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10472 10473 xp = SD_GET_XBUF(bp); 10474 ASSERT(xp != NULL); 10475 10476 mutex_enter(SD_MUTEX(un)); 10477 10478 /* 10479 * Grab time when the cmd completed. 10480 * This is used for determining if the system has been 10481 * idle long enough to make it idle to the PM framework. 10482 * This is for lowering the overhead, and therefore improving 10483 * performance per I/O operation. 10484 */ 10485 un->un_pm_idle_time = ddi_get_time(); 10486 10487 un->un_ncmds_in_driver--; 10488 ASSERT(un->un_ncmds_in_driver >= 0); 10489 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10490 un->un_ncmds_in_driver); 10491 10492 mutex_exit(SD_MUTEX(un)); 10493 10494 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10495 biodone(bp); /* bp is gone after this */ 10496 10497 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10498 } 10499 10500 10501 /* 10502 * Function: sd_uscsi_iodone 10503 * 10504 * Description: Frees the sd_xbuf & returns the buf to its originator. 10505 * 10506 * Context: May be called from interrupt context. 10507 */ 10508 /* ARGSUSED */ 10509 static void 10510 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10511 { 10512 struct sd_xbuf *xp; 10513 10514 ASSERT(un != NULL); 10515 ASSERT(bp != NULL); 10516 10517 xp = SD_GET_XBUF(bp); 10518 ASSERT(xp != NULL); 10519 ASSERT(!mutex_owned(SD_MUTEX(un))); 10520 10521 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10522 10523 bp->b_private = xp->xb_private; 10524 10525 mutex_enter(SD_MUTEX(un)); 10526 10527 /* 10528 * Grab time when the cmd completed. 10529 * This is used for determining if the system has been 10530 * idle long enough to make it idle to the PM framework. 10531 * This is for lowering the overhead, and therefore improving 10532 * performance per I/O operation. 10533 */ 10534 un->un_pm_idle_time = ddi_get_time(); 10535 10536 un->un_ncmds_in_driver--; 10537 ASSERT(un->un_ncmds_in_driver >= 0); 10538 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10539 un->un_ncmds_in_driver); 10540 10541 mutex_exit(SD_MUTEX(un)); 10542 10543 kmem_free(xp, sizeof (struct sd_xbuf)); 10544 biodone(bp); 10545 10546 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10547 } 10548 10549 10550 /* 10551 * Function: sd_mapblockaddr_iostart 10552 * 10553 * Description: Verify request lies within the partition limits for 10554 * the indicated minor device. Issue "overrun" buf if 10555 * request would exceed partition range. Converts 10556 * partition-relative block address to absolute. 10557 * 10558 * Context: Can sleep 10559 * 10560 * Issues: This follows what the old code did, in terms of accessing 10561 * some of the partition info in the unit struct without holding 10562 * the mutext. This is a general issue, if the partition info 10563 * can be altered while IO is in progress... as soon as we send 10564 * a buf, its partitioning can be invalid before it gets to the 10565 * device. Probably the right fix is to move partitioning out 10566 * of the driver entirely. 10567 */ 10568 10569 static void 10570 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10571 { 10572 diskaddr_t nblocks; /* #blocks in the given partition */ 10573 daddr_t blocknum; /* Block number specified by the buf */ 10574 size_t requested_nblocks; 10575 size_t available_nblocks; 10576 int partition; 10577 diskaddr_t partition_offset; 10578 struct sd_xbuf *xp; 10579 10580 10581 ASSERT(un != NULL); 10582 ASSERT(bp != NULL); 10583 ASSERT(!mutex_owned(SD_MUTEX(un))); 10584 10585 SD_TRACE(SD_LOG_IO_PARTITION, un, 10586 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10587 10588 xp = SD_GET_XBUF(bp); 10589 ASSERT(xp != NULL); 10590 10591 /* 10592 * If the geometry is not indicated as valid, attempt to access 10593 * the unit & verify the geometry/label. This can be the case for 10594 * removable-media devices, of if the device was opened in 10595 * NDELAY/NONBLOCK mode. 10596 */ 10597 if (!SD_IS_VALID_LABEL(un) && 10598 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10599 /* 10600 * For removable devices it is possible to start an I/O 10601 * without a media by opening the device in nodelay mode. 10602 * Also for writable CDs there can be many scenarios where 10603 * there is no geometry yet but volume manager is trying to 10604 * issue a read() just because it can see TOC on the CD. So 10605 * do not print a message for removables. 10606 */ 10607 if (!un->un_f_has_removable_media) { 10608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10609 "i/o to invalid geometry\n"); 10610 } 10611 bioerror(bp, EIO); 10612 bp->b_resid = bp->b_bcount; 10613 SD_BEGIN_IODONE(index, un, bp); 10614 return; 10615 } 10616 10617 partition = SDPART(bp->b_edev); 10618 10619 nblocks = 0; 10620 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10621 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10622 10623 /* 10624 * blocknum is the starting block number of the request. At this 10625 * point it is still relative to the start of the minor device. 10626 */ 10627 blocknum = xp->xb_blkno; 10628 10629 /* 10630 * Legacy: If the starting block number is one past the last block 10631 * in the partition, do not set B_ERROR in the buf. 10632 */ 10633 if (blocknum == nblocks) { 10634 goto error_exit; 10635 } 10636 10637 /* 10638 * Confirm that the first block of the request lies within the 10639 * partition limits. Also the requested number of bytes must be 10640 * a multiple of the system block size. 10641 */ 10642 if ((blocknum < 0) || (blocknum >= nblocks) || 10643 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10644 bp->b_flags |= B_ERROR; 10645 goto error_exit; 10646 } 10647 10648 /* 10649 * If the requsted # blocks exceeds the available # blocks, that 10650 * is an overrun of the partition. 10651 */ 10652 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10653 available_nblocks = (size_t)(nblocks - blocknum); 10654 ASSERT(nblocks >= blocknum); 10655 10656 if (requested_nblocks > available_nblocks) { 10657 /* 10658 * Allocate an "overrun" buf to allow the request to proceed 10659 * for the amount of space available in the partition. The 10660 * amount not transferred will be added into the b_resid 10661 * when the operation is complete. The overrun buf 10662 * replaces the original buf here, and the original buf 10663 * is saved inside the overrun buf, for later use. 10664 */ 10665 size_t resid = SD_SYSBLOCKS2BYTES(un, 10666 (offset_t)(requested_nblocks - available_nblocks)); 10667 size_t count = bp->b_bcount - resid; 10668 /* 10669 * Note: count is an unsigned entity thus it'll NEVER 10670 * be less than 0 so ASSERT the original values are 10671 * correct. 10672 */ 10673 ASSERT(bp->b_bcount >= resid); 10674 10675 bp = sd_bioclone_alloc(bp, count, blocknum, 10676 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10677 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10678 ASSERT(xp != NULL); 10679 } 10680 10681 /* At this point there should be no residual for this buf. */ 10682 ASSERT(bp->b_resid == 0); 10683 10684 /* Convert the block number to an absolute address. */ 10685 xp->xb_blkno += partition_offset; 10686 10687 SD_NEXT_IOSTART(index, un, bp); 10688 10689 SD_TRACE(SD_LOG_IO_PARTITION, un, 10690 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10691 10692 return; 10693 10694 error_exit: 10695 bp->b_resid = bp->b_bcount; 10696 SD_BEGIN_IODONE(index, un, bp); 10697 SD_TRACE(SD_LOG_IO_PARTITION, un, 10698 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10699 } 10700 10701 10702 /* 10703 * Function: sd_mapblockaddr_iodone 10704 * 10705 * Description: Completion-side processing for partition management. 10706 * 10707 * Context: May be called under interrupt context 10708 */ 10709 10710 static void 10711 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10712 { 10713 /* int partition; */ /* Not used, see below. */ 10714 ASSERT(un != NULL); 10715 ASSERT(bp != NULL); 10716 ASSERT(!mutex_owned(SD_MUTEX(un))); 10717 10718 SD_TRACE(SD_LOG_IO_PARTITION, un, 10719 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10720 10721 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10722 /* 10723 * We have an "overrun" buf to deal with... 10724 */ 10725 struct sd_xbuf *xp; 10726 struct buf *obp; /* ptr to the original buf */ 10727 10728 xp = SD_GET_XBUF(bp); 10729 ASSERT(xp != NULL); 10730 10731 /* Retrieve the pointer to the original buf */ 10732 obp = (struct buf *)xp->xb_private; 10733 ASSERT(obp != NULL); 10734 10735 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10736 bioerror(obp, bp->b_error); 10737 10738 sd_bioclone_free(bp); 10739 10740 /* 10741 * Get back the original buf. 10742 * Note that since the restoration of xb_blkno below 10743 * was removed, the sd_xbuf is not needed. 10744 */ 10745 bp = obp; 10746 /* 10747 * xp = SD_GET_XBUF(bp); 10748 * ASSERT(xp != NULL); 10749 */ 10750 } 10751 10752 /* 10753 * Convert sd->xb_blkno back to a minor-device relative value. 10754 * Note: this has been commented out, as it is not needed in the 10755 * current implementation of the driver (ie, since this function 10756 * is at the top of the layering chains, so the info will be 10757 * discarded) and it is in the "hot" IO path. 10758 * 10759 * partition = getminor(bp->b_edev) & SDPART_MASK; 10760 * xp->xb_blkno -= un->un_offset[partition]; 10761 */ 10762 10763 SD_NEXT_IODONE(index, un, bp); 10764 10765 SD_TRACE(SD_LOG_IO_PARTITION, un, 10766 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10767 } 10768 10769 10770 /* 10771 * Function: sd_mapblocksize_iostart 10772 * 10773 * Description: Convert between system block size (un->un_sys_blocksize) 10774 * and target block size (un->un_tgt_blocksize). 10775 * 10776 * Context: Can sleep to allocate resources. 10777 * 10778 * Assumptions: A higher layer has already performed any partition validation, 10779 * and converted the xp->xb_blkno to an absolute value relative 10780 * to the start of the device. 10781 * 10782 * It is also assumed that the higher layer has implemented 10783 * an "overrun" mechanism for the case where the request would 10784 * read/write beyond the end of a partition. In this case we 10785 * assume (and ASSERT) that bp->b_resid == 0. 10786 * 10787 * Note: The implementation for this routine assumes the target 10788 * block size remains constant between allocation and transport. 10789 */ 10790 10791 static void 10792 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10793 { 10794 struct sd_mapblocksize_info *bsp; 10795 struct sd_xbuf *xp; 10796 offset_t first_byte; 10797 daddr_t start_block, end_block; 10798 daddr_t request_bytes; 10799 ushort_t is_aligned = FALSE; 10800 10801 ASSERT(un != NULL); 10802 ASSERT(bp != NULL); 10803 ASSERT(!mutex_owned(SD_MUTEX(un))); 10804 ASSERT(bp->b_resid == 0); 10805 10806 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10807 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10808 10809 /* 10810 * For a non-writable CD, a write request is an error 10811 */ 10812 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10813 (un->un_f_mmc_writable_media == FALSE)) { 10814 bioerror(bp, EIO); 10815 bp->b_resid = bp->b_bcount; 10816 SD_BEGIN_IODONE(index, un, bp); 10817 return; 10818 } 10819 10820 /* 10821 * We do not need a shadow buf if the device is using 10822 * un->un_sys_blocksize as its block size or if bcount == 0. 10823 * In this case there is no layer-private data block allocated. 10824 */ 10825 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10826 (bp->b_bcount == 0)) { 10827 goto done; 10828 } 10829 10830 #if defined(__i386) || defined(__amd64) 10831 /* We do not support non-block-aligned transfers for ROD devices */ 10832 ASSERT(!ISROD(un)); 10833 #endif 10834 10835 xp = SD_GET_XBUF(bp); 10836 ASSERT(xp != NULL); 10837 10838 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10839 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10840 un->un_tgt_blocksize, un->un_sys_blocksize); 10841 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10842 "request start block:0x%x\n", xp->xb_blkno); 10843 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10844 "request len:0x%x\n", bp->b_bcount); 10845 10846 /* 10847 * Allocate the layer-private data area for the mapblocksize layer. 10848 * Layers are allowed to use the xp_private member of the sd_xbuf 10849 * struct to store the pointer to their layer-private data block, but 10850 * each layer also has the responsibility of restoring the prior 10851 * contents of xb_private before returning the buf/xbuf to the 10852 * higher layer that sent it. 10853 * 10854 * Here we save the prior contents of xp->xb_private into the 10855 * bsp->mbs_oprivate field of our layer-private data area. This value 10856 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10857 * the layer-private area and returning the buf/xbuf to the layer 10858 * that sent it. 10859 * 10860 * Note that here we use kmem_zalloc for the allocation as there are 10861 * parts of the mapblocksize code that expect certain fields to be 10862 * zero unless explicitly set to a required value. 10863 */ 10864 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10865 bsp->mbs_oprivate = xp->xb_private; 10866 xp->xb_private = bsp; 10867 10868 /* 10869 * This treats the data on the disk (target) as an array of bytes. 10870 * first_byte is the byte offset, from the beginning of the device, 10871 * to the location of the request. This is converted from a 10872 * un->un_sys_blocksize block address to a byte offset, and then back 10873 * to a block address based upon a un->un_tgt_blocksize block size. 10874 * 10875 * xp->xb_blkno should be absolute upon entry into this function, 10876 * but, but it is based upon partitions that use the "system" 10877 * block size. It must be adjusted to reflect the block size of 10878 * the target. 10879 * 10880 * Note that end_block is actually the block that follows the last 10881 * block of the request, but that's what is needed for the computation. 10882 */ 10883 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10884 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10885 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10886 un->un_tgt_blocksize; 10887 10888 /* request_bytes is rounded up to a multiple of the target block size */ 10889 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10890 10891 /* 10892 * See if the starting address of the request and the request 10893 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10894 * then we do not need to allocate a shadow buf to handle the request. 10895 */ 10896 if (((first_byte % un->un_tgt_blocksize) == 0) && 10897 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10898 is_aligned = TRUE; 10899 } 10900 10901 if ((bp->b_flags & B_READ) == 0) { 10902 /* 10903 * Lock the range for a write operation. An aligned request is 10904 * considered a simple write; otherwise the request must be a 10905 * read-modify-write. 10906 */ 10907 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10908 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10909 } 10910 10911 /* 10912 * Alloc a shadow buf if the request is not aligned. Also, this is 10913 * where the READ command is generated for a read-modify-write. (The 10914 * write phase is deferred until after the read completes.) 10915 */ 10916 if (is_aligned == FALSE) { 10917 10918 struct sd_mapblocksize_info *shadow_bsp; 10919 struct sd_xbuf *shadow_xp; 10920 struct buf *shadow_bp; 10921 10922 /* 10923 * Allocate the shadow buf and it associated xbuf. Note that 10924 * after this call the xb_blkno value in both the original 10925 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10926 * same: absolute relative to the start of the device, and 10927 * adjusted for the target block size. The b_blkno in the 10928 * shadow buf will also be set to this value. We should never 10929 * change b_blkno in the original bp however. 10930 * 10931 * Note also that the shadow buf will always need to be a 10932 * READ command, regardless of whether the incoming command 10933 * is a READ or a WRITE. 10934 */ 10935 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10936 xp->xb_blkno, 10937 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10938 10939 shadow_xp = SD_GET_XBUF(shadow_bp); 10940 10941 /* 10942 * Allocate the layer-private data for the shadow buf. 10943 * (No need to preserve xb_private in the shadow xbuf.) 10944 */ 10945 shadow_xp->xb_private = shadow_bsp = 10946 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10947 10948 /* 10949 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10950 * to figure out where the start of the user data is (based upon 10951 * the system block size) in the data returned by the READ 10952 * command (which will be based upon the target blocksize). Note 10953 * that this is only really used if the request is unaligned. 10954 */ 10955 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10956 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10957 ASSERT((bsp->mbs_copy_offset >= 0) && 10958 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10959 10960 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10961 10962 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10963 10964 /* Transfer the wmap (if any) to the shadow buf */ 10965 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10966 bsp->mbs_wmp = NULL; 10967 10968 /* 10969 * The shadow buf goes on from here in place of the 10970 * original buf. 10971 */ 10972 shadow_bsp->mbs_orig_bp = bp; 10973 bp = shadow_bp; 10974 } 10975 10976 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10977 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10978 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10979 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10980 request_bytes); 10981 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10982 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10983 10984 done: 10985 SD_NEXT_IOSTART(index, un, bp); 10986 10987 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10988 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10989 } 10990 10991 10992 /* 10993 * Function: sd_mapblocksize_iodone 10994 * 10995 * Description: Completion side processing for block-size mapping. 10996 * 10997 * Context: May be called under interrupt context 10998 */ 10999 11000 static void 11001 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11002 { 11003 struct sd_mapblocksize_info *bsp; 11004 struct sd_xbuf *xp; 11005 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11006 struct buf *orig_bp; /* ptr to the original buf */ 11007 offset_t shadow_end; 11008 offset_t request_end; 11009 offset_t shadow_start; 11010 ssize_t copy_offset; 11011 size_t copy_length; 11012 size_t shortfall; 11013 uint_t is_write; /* TRUE if this bp is a WRITE */ 11014 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11015 11016 ASSERT(un != NULL); 11017 ASSERT(bp != NULL); 11018 11019 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11020 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11021 11022 /* 11023 * There is no shadow buf or layer-private data if the target is 11024 * using un->un_sys_blocksize as its block size or if bcount == 0. 11025 */ 11026 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11027 (bp->b_bcount == 0)) { 11028 goto exit; 11029 } 11030 11031 xp = SD_GET_XBUF(bp); 11032 ASSERT(xp != NULL); 11033 11034 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11035 bsp = xp->xb_private; 11036 11037 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11038 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11039 11040 if (is_write) { 11041 /* 11042 * For a WRITE request we must free up the block range that 11043 * we have locked up. This holds regardless of whether this is 11044 * an aligned write request or a read-modify-write request. 11045 */ 11046 sd_range_unlock(un, bsp->mbs_wmp); 11047 bsp->mbs_wmp = NULL; 11048 } 11049 11050 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11051 /* 11052 * An aligned read or write command will have no shadow buf; 11053 * there is not much else to do with it. 11054 */ 11055 goto done; 11056 } 11057 11058 orig_bp = bsp->mbs_orig_bp; 11059 ASSERT(orig_bp != NULL); 11060 orig_xp = SD_GET_XBUF(orig_bp); 11061 ASSERT(orig_xp != NULL); 11062 ASSERT(!mutex_owned(SD_MUTEX(un))); 11063 11064 if (!is_write && has_wmap) { 11065 /* 11066 * A READ with a wmap means this is the READ phase of a 11067 * read-modify-write. If an error occurred on the READ then 11068 * we do not proceed with the WRITE phase or copy any data. 11069 * Just release the write maps and return with an error. 11070 */ 11071 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11072 orig_bp->b_resid = orig_bp->b_bcount; 11073 bioerror(orig_bp, bp->b_error); 11074 sd_range_unlock(un, bsp->mbs_wmp); 11075 goto freebuf_done; 11076 } 11077 } 11078 11079 /* 11080 * Here is where we set up to copy the data from the shadow buf 11081 * into the space associated with the original buf. 11082 * 11083 * To deal with the conversion between block sizes, these 11084 * computations treat the data as an array of bytes, with the 11085 * first byte (byte 0) corresponding to the first byte in the 11086 * first block on the disk. 11087 */ 11088 11089 /* 11090 * shadow_start and shadow_len indicate the location and size of 11091 * the data returned with the shadow IO request. 11092 */ 11093 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11094 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11095 11096 /* 11097 * copy_offset gives the offset (in bytes) from the start of the first 11098 * block of the READ request to the beginning of the data. We retrieve 11099 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11100 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11101 * data to be copied (in bytes). 11102 */ 11103 copy_offset = bsp->mbs_copy_offset; 11104 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11105 copy_length = orig_bp->b_bcount; 11106 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11107 11108 /* 11109 * Set up the resid and error fields of orig_bp as appropriate. 11110 */ 11111 if (shadow_end >= request_end) { 11112 /* We got all the requested data; set resid to zero */ 11113 orig_bp->b_resid = 0; 11114 } else { 11115 /* 11116 * We failed to get enough data to fully satisfy the original 11117 * request. Just copy back whatever data we got and set 11118 * up the residual and error code as required. 11119 * 11120 * 'shortfall' is the amount by which the data received with the 11121 * shadow buf has "fallen short" of the requested amount. 11122 */ 11123 shortfall = (size_t)(request_end - shadow_end); 11124 11125 if (shortfall > orig_bp->b_bcount) { 11126 /* 11127 * We did not get enough data to even partially 11128 * fulfill the original request. The residual is 11129 * equal to the amount requested. 11130 */ 11131 orig_bp->b_resid = orig_bp->b_bcount; 11132 } else { 11133 /* 11134 * We did not get all the data that we requested 11135 * from the device, but we will try to return what 11136 * portion we did get. 11137 */ 11138 orig_bp->b_resid = shortfall; 11139 } 11140 ASSERT(copy_length >= orig_bp->b_resid); 11141 copy_length -= orig_bp->b_resid; 11142 } 11143 11144 /* Propagate the error code from the shadow buf to the original buf */ 11145 bioerror(orig_bp, bp->b_error); 11146 11147 if (is_write) { 11148 goto freebuf_done; /* No data copying for a WRITE */ 11149 } 11150 11151 if (has_wmap) { 11152 /* 11153 * This is a READ command from the READ phase of a 11154 * read-modify-write request. We have to copy the data given 11155 * by the user OVER the data returned by the READ command, 11156 * then convert the command from a READ to a WRITE and send 11157 * it back to the target. 11158 */ 11159 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11160 copy_length); 11161 11162 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11163 11164 /* 11165 * Dispatch the WRITE command to the taskq thread, which 11166 * will in turn send the command to the target. When the 11167 * WRITE command completes, we (sd_mapblocksize_iodone()) 11168 * will get called again as part of the iodone chain 11169 * processing for it. Note that we will still be dealing 11170 * with the shadow buf at that point. 11171 */ 11172 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11173 KM_NOSLEEP) != 0) { 11174 /* 11175 * Dispatch was successful so we are done. Return 11176 * without going any higher up the iodone chain. Do 11177 * not free up any layer-private data until after the 11178 * WRITE completes. 11179 */ 11180 return; 11181 } 11182 11183 /* 11184 * Dispatch of the WRITE command failed; set up the error 11185 * condition and send this IO back up the iodone chain. 11186 */ 11187 bioerror(orig_bp, EIO); 11188 orig_bp->b_resid = orig_bp->b_bcount; 11189 11190 } else { 11191 /* 11192 * This is a regular READ request (ie, not a RMW). Copy the 11193 * data from the shadow buf into the original buf. The 11194 * copy_offset compensates for any "misalignment" between the 11195 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11196 * original buf (with its un->un_sys_blocksize blocks). 11197 */ 11198 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11199 copy_length); 11200 } 11201 11202 freebuf_done: 11203 11204 /* 11205 * At this point we still have both the shadow buf AND the original 11206 * buf to deal with, as well as the layer-private data area in each. 11207 * Local variables are as follows: 11208 * 11209 * bp -- points to shadow buf 11210 * xp -- points to xbuf of shadow buf 11211 * bsp -- points to layer-private data area of shadow buf 11212 * orig_bp -- points to original buf 11213 * 11214 * First free the shadow buf and its associated xbuf, then free the 11215 * layer-private data area from the shadow buf. There is no need to 11216 * restore xb_private in the shadow xbuf. 11217 */ 11218 sd_shadow_buf_free(bp); 11219 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11220 11221 /* 11222 * Now update the local variables to point to the original buf, xbuf, 11223 * and layer-private area. 11224 */ 11225 bp = orig_bp; 11226 xp = SD_GET_XBUF(bp); 11227 ASSERT(xp != NULL); 11228 ASSERT(xp == orig_xp); 11229 bsp = xp->xb_private; 11230 ASSERT(bsp != NULL); 11231 11232 done: 11233 /* 11234 * Restore xb_private to whatever it was set to by the next higher 11235 * layer in the chain, then free the layer-private data area. 11236 */ 11237 xp->xb_private = bsp->mbs_oprivate; 11238 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11239 11240 exit: 11241 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11242 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11243 11244 SD_NEXT_IODONE(index, un, bp); 11245 } 11246 11247 11248 /* 11249 * Function: sd_checksum_iostart 11250 * 11251 * Description: A stub function for a layer that's currently not used. 11252 * For now just a placeholder. 11253 * 11254 * Context: Kernel thread context 11255 */ 11256 11257 static void 11258 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11259 { 11260 ASSERT(un != NULL); 11261 ASSERT(bp != NULL); 11262 ASSERT(!mutex_owned(SD_MUTEX(un))); 11263 SD_NEXT_IOSTART(index, un, bp); 11264 } 11265 11266 11267 /* 11268 * Function: sd_checksum_iodone 11269 * 11270 * Description: A stub function for a layer that's currently not used. 11271 * For now just a placeholder. 11272 * 11273 * Context: May be called under interrupt context 11274 */ 11275 11276 static void 11277 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11278 { 11279 ASSERT(un != NULL); 11280 ASSERT(bp != NULL); 11281 ASSERT(!mutex_owned(SD_MUTEX(un))); 11282 SD_NEXT_IODONE(index, un, bp); 11283 } 11284 11285 11286 /* 11287 * Function: sd_checksum_uscsi_iostart 11288 * 11289 * Description: A stub function for a layer that's currently not used. 11290 * For now just a placeholder. 11291 * 11292 * Context: Kernel thread context 11293 */ 11294 11295 static void 11296 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11297 { 11298 ASSERT(un != NULL); 11299 ASSERT(bp != NULL); 11300 ASSERT(!mutex_owned(SD_MUTEX(un))); 11301 SD_NEXT_IOSTART(index, un, bp); 11302 } 11303 11304 11305 /* 11306 * Function: sd_checksum_uscsi_iodone 11307 * 11308 * Description: A stub function for a layer that's currently not used. 11309 * For now just a placeholder. 11310 * 11311 * Context: May be called under interrupt context 11312 */ 11313 11314 static void 11315 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11316 { 11317 ASSERT(un != NULL); 11318 ASSERT(bp != NULL); 11319 ASSERT(!mutex_owned(SD_MUTEX(un))); 11320 SD_NEXT_IODONE(index, un, bp); 11321 } 11322 11323 11324 /* 11325 * Function: sd_pm_iostart 11326 * 11327 * Description: iostart-side routine for Power mangement. 11328 * 11329 * Context: Kernel thread context 11330 */ 11331 11332 static void 11333 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11334 { 11335 ASSERT(un != NULL); 11336 ASSERT(bp != NULL); 11337 ASSERT(!mutex_owned(SD_MUTEX(un))); 11338 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11339 11340 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11341 11342 if (sd_pm_entry(un) != DDI_SUCCESS) { 11343 /* 11344 * Set up to return the failed buf back up the 'iodone' 11345 * side of the calling chain. 11346 */ 11347 bioerror(bp, EIO); 11348 bp->b_resid = bp->b_bcount; 11349 11350 SD_BEGIN_IODONE(index, un, bp); 11351 11352 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11353 return; 11354 } 11355 11356 SD_NEXT_IOSTART(index, un, bp); 11357 11358 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11359 } 11360 11361 11362 /* 11363 * Function: sd_pm_iodone 11364 * 11365 * Description: iodone-side routine for power mangement. 11366 * 11367 * Context: may be called from interrupt context 11368 */ 11369 11370 static void 11371 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11372 { 11373 ASSERT(un != NULL); 11374 ASSERT(bp != NULL); 11375 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11376 11377 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11378 11379 /* 11380 * After attach the following flag is only read, so don't 11381 * take the penalty of acquiring a mutex for it. 11382 */ 11383 if (un->un_f_pm_is_enabled == TRUE) { 11384 sd_pm_exit(un); 11385 } 11386 11387 SD_NEXT_IODONE(index, un, bp); 11388 11389 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11390 } 11391 11392 11393 /* 11394 * Function: sd_core_iostart 11395 * 11396 * Description: Primary driver function for enqueuing buf(9S) structs from 11397 * the system and initiating IO to the target device 11398 * 11399 * Context: Kernel thread context. Can sleep. 11400 * 11401 * Assumptions: - The given xp->xb_blkno is absolute 11402 * (ie, relative to the start of the device). 11403 * - The IO is to be done using the native blocksize of 11404 * the device, as specified in un->un_tgt_blocksize. 11405 */ 11406 /* ARGSUSED */ 11407 static void 11408 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11409 { 11410 struct sd_xbuf *xp; 11411 11412 ASSERT(un != NULL); 11413 ASSERT(bp != NULL); 11414 ASSERT(!mutex_owned(SD_MUTEX(un))); 11415 ASSERT(bp->b_resid == 0); 11416 11417 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11418 11419 xp = SD_GET_XBUF(bp); 11420 ASSERT(xp != NULL); 11421 11422 mutex_enter(SD_MUTEX(un)); 11423 11424 /* 11425 * If we are currently in the failfast state, fail any new IO 11426 * that has B_FAILFAST set, then return. 11427 */ 11428 if ((bp->b_flags & B_FAILFAST) && 11429 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11430 mutex_exit(SD_MUTEX(un)); 11431 bioerror(bp, EIO); 11432 bp->b_resid = bp->b_bcount; 11433 SD_BEGIN_IODONE(index, un, bp); 11434 return; 11435 } 11436 11437 if (SD_IS_DIRECT_PRIORITY(xp)) { 11438 /* 11439 * Priority command -- transport it immediately. 11440 * 11441 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11442 * because all direct priority commands should be associated 11443 * with error recovery actions which we don't want to retry. 11444 */ 11445 sd_start_cmds(un, bp); 11446 } else { 11447 /* 11448 * Normal command -- add it to the wait queue, then start 11449 * transporting commands from the wait queue. 11450 */ 11451 sd_add_buf_to_waitq(un, bp); 11452 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11453 sd_start_cmds(un, NULL); 11454 } 11455 11456 mutex_exit(SD_MUTEX(un)); 11457 11458 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11459 } 11460 11461 11462 /* 11463 * Function: sd_init_cdb_limits 11464 * 11465 * Description: This is to handle scsi_pkt initialization differences 11466 * between the driver platforms. 11467 * 11468 * Legacy behaviors: 11469 * 11470 * If the block number or the sector count exceeds the 11471 * capabilities of a Group 0 command, shift over to a 11472 * Group 1 command. We don't blindly use Group 1 11473 * commands because a) some drives (CDC Wren IVs) get a 11474 * bit confused, and b) there is probably a fair amount 11475 * of speed difference for a target to receive and decode 11476 * a 10 byte command instead of a 6 byte command. 11477 * 11478 * The xfer time difference of 6 vs 10 byte CDBs is 11479 * still significant so this code is still worthwhile. 11480 * 10 byte CDBs are very inefficient with the fas HBA driver 11481 * and older disks. Each CDB byte took 1 usec with some 11482 * popular disks. 11483 * 11484 * Context: Must be called at attach time 11485 */ 11486 11487 static void 11488 sd_init_cdb_limits(struct sd_lun *un) 11489 { 11490 int hba_cdb_limit; 11491 11492 /* 11493 * Use CDB_GROUP1 commands for most devices except for 11494 * parallel SCSI fixed drives in which case we get better 11495 * performance using CDB_GROUP0 commands (where applicable). 11496 */ 11497 un->un_mincdb = SD_CDB_GROUP1; 11498 #if !defined(__fibre) 11499 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11500 !un->un_f_has_removable_media) { 11501 un->un_mincdb = SD_CDB_GROUP0; 11502 } 11503 #endif 11504 11505 /* 11506 * Try to read the max-cdb-length supported by HBA. 11507 */ 11508 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11509 if (0 >= un->un_max_hba_cdb) { 11510 un->un_max_hba_cdb = CDB_GROUP4; 11511 hba_cdb_limit = SD_CDB_GROUP4; 11512 } else if (0 < un->un_max_hba_cdb && 11513 un->un_max_hba_cdb < CDB_GROUP1) { 11514 hba_cdb_limit = SD_CDB_GROUP0; 11515 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11516 un->un_max_hba_cdb < CDB_GROUP5) { 11517 hba_cdb_limit = SD_CDB_GROUP1; 11518 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11519 un->un_max_hba_cdb < CDB_GROUP4) { 11520 hba_cdb_limit = SD_CDB_GROUP5; 11521 } else { 11522 hba_cdb_limit = SD_CDB_GROUP4; 11523 } 11524 11525 /* 11526 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11527 * commands for fixed disks unless we are building for a 32 bit 11528 * kernel. 11529 */ 11530 #ifdef _LP64 11531 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11532 min(hba_cdb_limit, SD_CDB_GROUP4); 11533 #else 11534 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11535 min(hba_cdb_limit, SD_CDB_GROUP1); 11536 #endif 11537 11538 /* 11539 * x86 systems require the PKT_DMA_PARTIAL flag 11540 */ 11541 #if defined(__x86) 11542 un->un_pkt_flags = PKT_DMA_PARTIAL; 11543 #else 11544 un->un_pkt_flags = 0; 11545 #endif 11546 11547 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11548 ? sizeof (struct scsi_arq_status) : 1); 11549 un->un_cmd_timeout = (ushort_t)sd_io_time; 11550 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11551 } 11552 11553 11554 /* 11555 * Function: sd_initpkt_for_buf 11556 * 11557 * Description: Allocate and initialize for transport a scsi_pkt struct, 11558 * based upon the info specified in the given buf struct. 11559 * 11560 * Assumes the xb_blkno in the request is absolute (ie, 11561 * relative to the start of the device (NOT partition!). 11562 * Also assumes that the request is using the native block 11563 * size of the device (as returned by the READ CAPACITY 11564 * command). 11565 * 11566 * Return Code: SD_PKT_ALLOC_SUCCESS 11567 * SD_PKT_ALLOC_FAILURE 11568 * SD_PKT_ALLOC_FAILURE_NO_DMA 11569 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11570 * 11571 * Context: Kernel thread and may be called from software interrupt context 11572 * as part of a sdrunout callback. This function may not block or 11573 * call routines that block 11574 */ 11575 11576 static int 11577 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11578 { 11579 struct sd_xbuf *xp; 11580 struct scsi_pkt *pktp = NULL; 11581 struct sd_lun *un; 11582 size_t blockcount; 11583 daddr_t startblock; 11584 int rval; 11585 int cmd_flags; 11586 11587 ASSERT(bp != NULL); 11588 ASSERT(pktpp != NULL); 11589 xp = SD_GET_XBUF(bp); 11590 ASSERT(xp != NULL); 11591 un = SD_GET_UN(bp); 11592 ASSERT(un != NULL); 11593 ASSERT(mutex_owned(SD_MUTEX(un))); 11594 ASSERT(bp->b_resid == 0); 11595 11596 SD_TRACE(SD_LOG_IO_CORE, un, 11597 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11598 11599 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11600 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11601 /* 11602 * Already have a scsi_pkt -- just need DMA resources. 11603 * We must recompute the CDB in case the mapping returns 11604 * a nonzero pkt_resid. 11605 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11606 * that is being retried, the unmap/remap of the DMA resouces 11607 * will result in the entire transfer starting over again 11608 * from the very first block. 11609 */ 11610 ASSERT(xp->xb_pktp != NULL); 11611 pktp = xp->xb_pktp; 11612 } else { 11613 pktp = NULL; 11614 } 11615 #endif /* __i386 || __amd64 */ 11616 11617 startblock = xp->xb_blkno; /* Absolute block num. */ 11618 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11619 11620 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11621 11622 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11623 11624 #else 11625 11626 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11627 11628 #endif 11629 11630 /* 11631 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11632 * call scsi_init_pkt, and build the CDB. 11633 */ 11634 rval = sd_setup_rw_pkt(un, &pktp, bp, 11635 cmd_flags, sdrunout, (caddr_t)un, 11636 startblock, blockcount); 11637 11638 if (rval == 0) { 11639 /* 11640 * Success. 11641 * 11642 * If partial DMA is being used and required for this transfer. 11643 * set it up here. 11644 */ 11645 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11646 (pktp->pkt_resid != 0)) { 11647 11648 /* 11649 * Save the CDB length and pkt_resid for the 11650 * next xfer 11651 */ 11652 xp->xb_dma_resid = pktp->pkt_resid; 11653 11654 /* rezero resid */ 11655 pktp->pkt_resid = 0; 11656 11657 } else { 11658 xp->xb_dma_resid = 0; 11659 } 11660 11661 pktp->pkt_flags = un->un_tagflags; 11662 pktp->pkt_time = un->un_cmd_timeout; 11663 pktp->pkt_comp = sdintr; 11664 11665 pktp->pkt_private = bp; 11666 *pktpp = pktp; 11667 11668 SD_TRACE(SD_LOG_IO_CORE, un, 11669 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11670 11671 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11672 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11673 #endif 11674 11675 return (SD_PKT_ALLOC_SUCCESS); 11676 11677 } 11678 11679 /* 11680 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11681 * from sd_setup_rw_pkt. 11682 */ 11683 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11684 11685 if (rval == SD_PKT_ALLOC_FAILURE) { 11686 *pktpp = NULL; 11687 /* 11688 * Set the driver state to RWAIT to indicate the driver 11689 * is waiting on resource allocations. The driver will not 11690 * suspend, pm_suspend, or detatch while the state is RWAIT. 11691 */ 11692 New_state(un, SD_STATE_RWAIT); 11693 11694 SD_ERROR(SD_LOG_IO_CORE, un, 11695 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11696 11697 if ((bp->b_flags & B_ERROR) != 0) { 11698 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11699 } 11700 return (SD_PKT_ALLOC_FAILURE); 11701 } else { 11702 /* 11703 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11704 * 11705 * This should never happen. Maybe someone messed with the 11706 * kernel's minphys? 11707 */ 11708 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11709 "Request rejected: too large for CDB: " 11710 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11711 SD_ERROR(SD_LOG_IO_CORE, un, 11712 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11713 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11714 11715 } 11716 } 11717 11718 11719 /* 11720 * Function: sd_destroypkt_for_buf 11721 * 11722 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11723 * 11724 * Context: Kernel thread or interrupt context 11725 */ 11726 11727 static void 11728 sd_destroypkt_for_buf(struct buf *bp) 11729 { 11730 ASSERT(bp != NULL); 11731 ASSERT(SD_GET_UN(bp) != NULL); 11732 11733 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11734 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11735 11736 ASSERT(SD_GET_PKTP(bp) != NULL); 11737 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11738 11739 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11740 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11741 } 11742 11743 /* 11744 * Function: sd_setup_rw_pkt 11745 * 11746 * Description: Determines appropriate CDB group for the requested LBA 11747 * and transfer length, calls scsi_init_pkt, and builds 11748 * the CDB. Do not use for partial DMA transfers except 11749 * for the initial transfer since the CDB size must 11750 * remain constant. 11751 * 11752 * Context: Kernel thread and may be called from software interrupt 11753 * context as part of a sdrunout callback. This function may not 11754 * block or call routines that block 11755 */ 11756 11757 11758 int 11759 sd_setup_rw_pkt(struct sd_lun *un, 11760 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11761 int (*callback)(caddr_t), caddr_t callback_arg, 11762 diskaddr_t lba, uint32_t blockcount) 11763 { 11764 struct scsi_pkt *return_pktp; 11765 union scsi_cdb *cdbp; 11766 struct sd_cdbinfo *cp = NULL; 11767 int i; 11768 11769 /* 11770 * See which size CDB to use, based upon the request. 11771 */ 11772 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11773 11774 /* 11775 * Check lba and block count against sd_cdbtab limits. 11776 * In the partial DMA case, we have to use the same size 11777 * CDB for all the transfers. Check lba + blockcount 11778 * against the max LBA so we know that segment of the 11779 * transfer can use the CDB we select. 11780 */ 11781 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11782 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11783 11784 /* 11785 * The command will fit into the CDB type 11786 * specified by sd_cdbtab[i]. 11787 */ 11788 cp = sd_cdbtab + i; 11789 11790 /* 11791 * Call scsi_init_pkt so we can fill in the 11792 * CDB. 11793 */ 11794 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11795 bp, cp->sc_grpcode, un->un_status_len, 0, 11796 flags, callback, callback_arg); 11797 11798 if (return_pktp != NULL) { 11799 11800 /* 11801 * Return new value of pkt 11802 */ 11803 *pktpp = return_pktp; 11804 11805 /* 11806 * To be safe, zero the CDB insuring there is 11807 * no leftover data from a previous command. 11808 */ 11809 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11810 11811 /* 11812 * Handle partial DMA mapping 11813 */ 11814 if (return_pktp->pkt_resid != 0) { 11815 11816 /* 11817 * Not going to xfer as many blocks as 11818 * originally expected 11819 */ 11820 blockcount -= 11821 SD_BYTES2TGTBLOCKS(un, 11822 return_pktp->pkt_resid); 11823 } 11824 11825 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11826 11827 /* 11828 * Set command byte based on the CDB 11829 * type we matched. 11830 */ 11831 cdbp->scc_cmd = cp->sc_grpmask | 11832 ((bp->b_flags & B_READ) ? 11833 SCMD_READ : SCMD_WRITE); 11834 11835 SD_FILL_SCSI1_LUN(un, return_pktp); 11836 11837 /* 11838 * Fill in LBA and length 11839 */ 11840 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11841 (cp->sc_grpcode == CDB_GROUP4) || 11842 (cp->sc_grpcode == CDB_GROUP0) || 11843 (cp->sc_grpcode == CDB_GROUP5)); 11844 11845 if (cp->sc_grpcode == CDB_GROUP1) { 11846 FORMG1ADDR(cdbp, lba); 11847 FORMG1COUNT(cdbp, blockcount); 11848 return (0); 11849 } else if (cp->sc_grpcode == CDB_GROUP4) { 11850 FORMG4LONGADDR(cdbp, lba); 11851 FORMG4COUNT(cdbp, blockcount); 11852 return (0); 11853 } else if (cp->sc_grpcode == CDB_GROUP0) { 11854 FORMG0ADDR(cdbp, lba); 11855 FORMG0COUNT(cdbp, blockcount); 11856 return (0); 11857 } else if (cp->sc_grpcode == CDB_GROUP5) { 11858 FORMG5ADDR(cdbp, lba); 11859 FORMG5COUNT(cdbp, blockcount); 11860 return (0); 11861 } 11862 11863 /* 11864 * It should be impossible to not match one 11865 * of the CDB types above, so we should never 11866 * reach this point. Set the CDB command byte 11867 * to test-unit-ready to avoid writing 11868 * to somewhere we don't intend. 11869 */ 11870 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11871 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11872 } else { 11873 /* 11874 * Couldn't get scsi_pkt 11875 */ 11876 return (SD_PKT_ALLOC_FAILURE); 11877 } 11878 } 11879 } 11880 11881 /* 11882 * None of the available CDB types were suitable. This really 11883 * should never happen: on a 64 bit system we support 11884 * READ16/WRITE16 which will hold an entire 64 bit disk address 11885 * and on a 32 bit system we will refuse to bind to a device 11886 * larger than 2TB so addresses will never be larger than 32 bits. 11887 */ 11888 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11889 } 11890 11891 #if defined(__i386) || defined(__amd64) 11892 /* 11893 * Function: sd_setup_next_rw_pkt 11894 * 11895 * Description: Setup packet for partial DMA transfers, except for the 11896 * initial transfer. sd_setup_rw_pkt should be used for 11897 * the initial transfer. 11898 * 11899 * Context: Kernel thread and may be called from interrupt context. 11900 */ 11901 11902 int 11903 sd_setup_next_rw_pkt(struct sd_lun *un, 11904 struct scsi_pkt *pktp, struct buf *bp, 11905 diskaddr_t lba, uint32_t blockcount) 11906 { 11907 uchar_t com; 11908 union scsi_cdb *cdbp; 11909 uchar_t cdb_group_id; 11910 11911 ASSERT(pktp != NULL); 11912 ASSERT(pktp->pkt_cdbp != NULL); 11913 11914 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11915 com = cdbp->scc_cmd; 11916 cdb_group_id = CDB_GROUPID(com); 11917 11918 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11919 (cdb_group_id == CDB_GROUPID_1) || 11920 (cdb_group_id == CDB_GROUPID_4) || 11921 (cdb_group_id == CDB_GROUPID_5)); 11922 11923 /* 11924 * Move pkt to the next portion of the xfer. 11925 * func is NULL_FUNC so we do not have to release 11926 * the disk mutex here. 11927 */ 11928 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11929 NULL_FUNC, NULL) == pktp) { 11930 /* Success. Handle partial DMA */ 11931 if (pktp->pkt_resid != 0) { 11932 blockcount -= 11933 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11934 } 11935 11936 cdbp->scc_cmd = com; 11937 SD_FILL_SCSI1_LUN(un, pktp); 11938 if (cdb_group_id == CDB_GROUPID_1) { 11939 FORMG1ADDR(cdbp, lba); 11940 FORMG1COUNT(cdbp, blockcount); 11941 return (0); 11942 } else if (cdb_group_id == CDB_GROUPID_4) { 11943 FORMG4LONGADDR(cdbp, lba); 11944 FORMG4COUNT(cdbp, blockcount); 11945 return (0); 11946 } else if (cdb_group_id == CDB_GROUPID_0) { 11947 FORMG0ADDR(cdbp, lba); 11948 FORMG0COUNT(cdbp, blockcount); 11949 return (0); 11950 } else if (cdb_group_id == CDB_GROUPID_5) { 11951 FORMG5ADDR(cdbp, lba); 11952 FORMG5COUNT(cdbp, blockcount); 11953 return (0); 11954 } 11955 11956 /* Unreachable */ 11957 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11958 } 11959 11960 /* 11961 * Error setting up next portion of cmd transfer. 11962 * Something is definitely very wrong and this 11963 * should not happen. 11964 */ 11965 return (SD_PKT_ALLOC_FAILURE); 11966 } 11967 #endif /* defined(__i386) || defined(__amd64) */ 11968 11969 /* 11970 * Function: sd_initpkt_for_uscsi 11971 * 11972 * Description: Allocate and initialize for transport a scsi_pkt struct, 11973 * based upon the info specified in the given uscsi_cmd struct. 11974 * 11975 * Return Code: SD_PKT_ALLOC_SUCCESS 11976 * SD_PKT_ALLOC_FAILURE 11977 * SD_PKT_ALLOC_FAILURE_NO_DMA 11978 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11979 * 11980 * Context: Kernel thread and may be called from software interrupt context 11981 * as part of a sdrunout callback. This function may not block or 11982 * call routines that block 11983 */ 11984 11985 static int 11986 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11987 { 11988 struct uscsi_cmd *uscmd; 11989 struct sd_xbuf *xp; 11990 struct scsi_pkt *pktp; 11991 struct sd_lun *un; 11992 uint32_t flags = 0; 11993 11994 ASSERT(bp != NULL); 11995 ASSERT(pktpp != NULL); 11996 xp = SD_GET_XBUF(bp); 11997 ASSERT(xp != NULL); 11998 un = SD_GET_UN(bp); 11999 ASSERT(un != NULL); 12000 ASSERT(mutex_owned(SD_MUTEX(un))); 12001 12002 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12003 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12004 ASSERT(uscmd != NULL); 12005 12006 SD_TRACE(SD_LOG_IO_CORE, un, 12007 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12008 12009 /* 12010 * Allocate the scsi_pkt for the command. 12011 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12012 * during scsi_init_pkt time and will continue to use the 12013 * same path as long as the same scsi_pkt is used without 12014 * intervening scsi_dma_free(). Since uscsi command does 12015 * not call scsi_dmafree() before retry failed command, it 12016 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12017 * set such that scsi_vhci can use other available path for 12018 * retry. Besides, ucsci command does not allow DMA breakup, 12019 * so there is no need to set PKT_DMA_PARTIAL flag. 12020 */ 12021 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12022 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12023 sizeof (struct scsi_arq_status), 0, 12024 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12025 sdrunout, (caddr_t)un); 12026 12027 if (pktp == NULL) { 12028 *pktpp = NULL; 12029 /* 12030 * Set the driver state to RWAIT to indicate the driver 12031 * is waiting on resource allocations. The driver will not 12032 * suspend, pm_suspend, or detatch while the state is RWAIT. 12033 */ 12034 New_state(un, SD_STATE_RWAIT); 12035 12036 SD_ERROR(SD_LOG_IO_CORE, un, 12037 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12038 12039 if ((bp->b_flags & B_ERROR) != 0) { 12040 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12041 } 12042 return (SD_PKT_ALLOC_FAILURE); 12043 } 12044 12045 /* 12046 * We do not do DMA breakup for USCSI commands, so return failure 12047 * here if all the needed DMA resources were not allocated. 12048 */ 12049 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12050 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12051 scsi_destroy_pkt(pktp); 12052 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12053 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12054 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12055 } 12056 12057 /* Init the cdb from the given uscsi struct */ 12058 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12059 uscmd->uscsi_cdb[0], 0, 0, 0); 12060 12061 SD_FILL_SCSI1_LUN(un, pktp); 12062 12063 /* 12064 * Set up the optional USCSI flags. See the uscsi (7I) man page 12065 * for listing of the supported flags. 12066 */ 12067 12068 if (uscmd->uscsi_flags & USCSI_SILENT) { 12069 flags |= FLAG_SILENT; 12070 } 12071 12072 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12073 flags |= FLAG_DIAGNOSE; 12074 } 12075 12076 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12077 flags |= FLAG_ISOLATE; 12078 } 12079 12080 if (un->un_f_is_fibre == FALSE) { 12081 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12082 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12083 } 12084 } 12085 12086 /* 12087 * Set the pkt flags here so we save time later. 12088 * Note: These flags are NOT in the uscsi man page!!! 12089 */ 12090 if (uscmd->uscsi_flags & USCSI_HEAD) { 12091 flags |= FLAG_HEAD; 12092 } 12093 12094 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12095 flags |= FLAG_NOINTR; 12096 } 12097 12098 /* 12099 * For tagged queueing, things get a bit complicated. 12100 * Check first for head of queue and last for ordered queue. 12101 * If neither head nor order, use the default driver tag flags. 12102 */ 12103 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12104 if (uscmd->uscsi_flags & USCSI_HTAG) { 12105 flags |= FLAG_HTAG; 12106 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12107 flags |= FLAG_OTAG; 12108 } else { 12109 flags |= un->un_tagflags & FLAG_TAGMASK; 12110 } 12111 } 12112 12113 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12114 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12115 } 12116 12117 pktp->pkt_flags = flags; 12118 12119 /* Copy the caller's CDB into the pkt... */ 12120 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12121 12122 if (uscmd->uscsi_timeout == 0) { 12123 pktp->pkt_time = un->un_uscsi_timeout; 12124 } else { 12125 pktp->pkt_time = uscmd->uscsi_timeout; 12126 } 12127 12128 /* need it later to identify USCSI request in sdintr */ 12129 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12130 12131 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12132 12133 pktp->pkt_private = bp; 12134 pktp->pkt_comp = sdintr; 12135 *pktpp = pktp; 12136 12137 SD_TRACE(SD_LOG_IO_CORE, un, 12138 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12139 12140 return (SD_PKT_ALLOC_SUCCESS); 12141 } 12142 12143 12144 /* 12145 * Function: sd_destroypkt_for_uscsi 12146 * 12147 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12148 * IOs.. Also saves relevant info into the associated uscsi_cmd 12149 * struct. 12150 * 12151 * Context: May be called under interrupt context 12152 */ 12153 12154 static void 12155 sd_destroypkt_for_uscsi(struct buf *bp) 12156 { 12157 struct uscsi_cmd *uscmd; 12158 struct sd_xbuf *xp; 12159 struct scsi_pkt *pktp; 12160 struct sd_lun *un; 12161 12162 ASSERT(bp != NULL); 12163 xp = SD_GET_XBUF(bp); 12164 ASSERT(xp != NULL); 12165 un = SD_GET_UN(bp); 12166 ASSERT(un != NULL); 12167 ASSERT(!mutex_owned(SD_MUTEX(un))); 12168 pktp = SD_GET_PKTP(bp); 12169 ASSERT(pktp != NULL); 12170 12171 SD_TRACE(SD_LOG_IO_CORE, un, 12172 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12173 12174 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12175 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12176 ASSERT(uscmd != NULL); 12177 12178 /* Save the status and the residual into the uscsi_cmd struct */ 12179 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12180 uscmd->uscsi_resid = bp->b_resid; 12181 12182 /* 12183 * If enabled, copy any saved sense data into the area specified 12184 * by the uscsi command. 12185 */ 12186 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12187 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12188 /* 12189 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12190 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12191 */ 12192 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12193 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12194 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12195 } 12196 12197 /* We are done with the scsi_pkt; free it now */ 12198 ASSERT(SD_GET_PKTP(bp) != NULL); 12199 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12200 12201 SD_TRACE(SD_LOG_IO_CORE, un, 12202 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12203 } 12204 12205 12206 /* 12207 * Function: sd_bioclone_alloc 12208 * 12209 * Description: Allocate a buf(9S) and init it as per the given buf 12210 * and the various arguments. The associated sd_xbuf 12211 * struct is (nearly) duplicated. The struct buf *bp 12212 * argument is saved in new_xp->xb_private. 12213 * 12214 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12215 * datalen - size of data area for the shadow bp 12216 * blkno - starting LBA 12217 * func - function pointer for b_iodone in the shadow buf. (May 12218 * be NULL if none.) 12219 * 12220 * Return Code: Pointer to allocates buf(9S) struct 12221 * 12222 * Context: Can sleep. 12223 */ 12224 12225 static struct buf * 12226 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12227 daddr_t blkno, int (*func)(struct buf *)) 12228 { 12229 struct sd_lun *un; 12230 struct sd_xbuf *xp; 12231 struct sd_xbuf *new_xp; 12232 struct buf *new_bp; 12233 12234 ASSERT(bp != NULL); 12235 xp = SD_GET_XBUF(bp); 12236 ASSERT(xp != NULL); 12237 un = SD_GET_UN(bp); 12238 ASSERT(un != NULL); 12239 ASSERT(!mutex_owned(SD_MUTEX(un))); 12240 12241 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12242 NULL, KM_SLEEP); 12243 12244 new_bp->b_lblkno = blkno; 12245 12246 /* 12247 * Allocate an xbuf for the shadow bp and copy the contents of the 12248 * original xbuf into it. 12249 */ 12250 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12251 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12252 12253 /* 12254 * The given bp is automatically saved in the xb_private member 12255 * of the new xbuf. Callers are allowed to depend on this. 12256 */ 12257 new_xp->xb_private = bp; 12258 12259 new_bp->b_private = new_xp; 12260 12261 return (new_bp); 12262 } 12263 12264 /* 12265 * Function: sd_shadow_buf_alloc 12266 * 12267 * Description: Allocate a buf(9S) and init it as per the given buf 12268 * and the various arguments. The associated sd_xbuf 12269 * struct is (nearly) duplicated. The struct buf *bp 12270 * argument is saved in new_xp->xb_private. 12271 * 12272 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12273 * datalen - size of data area for the shadow bp 12274 * bflags - B_READ or B_WRITE (pseudo flag) 12275 * blkno - starting LBA 12276 * func - function pointer for b_iodone in the shadow buf. (May 12277 * be NULL if none.) 12278 * 12279 * Return Code: Pointer to allocates buf(9S) struct 12280 * 12281 * Context: Can sleep. 12282 */ 12283 12284 static struct buf * 12285 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12286 daddr_t blkno, int (*func)(struct buf *)) 12287 { 12288 struct sd_lun *un; 12289 struct sd_xbuf *xp; 12290 struct sd_xbuf *new_xp; 12291 struct buf *new_bp; 12292 12293 ASSERT(bp != NULL); 12294 xp = SD_GET_XBUF(bp); 12295 ASSERT(xp != NULL); 12296 un = SD_GET_UN(bp); 12297 ASSERT(un != NULL); 12298 ASSERT(!mutex_owned(SD_MUTEX(un))); 12299 12300 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12301 bp_mapin(bp); 12302 } 12303 12304 bflags &= (B_READ | B_WRITE); 12305 #if defined(__i386) || defined(__amd64) 12306 new_bp = getrbuf(KM_SLEEP); 12307 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12308 new_bp->b_bcount = datalen; 12309 new_bp->b_flags = bflags | 12310 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12311 #else 12312 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12313 datalen, bflags, SLEEP_FUNC, NULL); 12314 #endif 12315 new_bp->av_forw = NULL; 12316 new_bp->av_back = NULL; 12317 new_bp->b_dev = bp->b_dev; 12318 new_bp->b_blkno = blkno; 12319 new_bp->b_iodone = func; 12320 new_bp->b_edev = bp->b_edev; 12321 new_bp->b_resid = 0; 12322 12323 /* We need to preserve the B_FAILFAST flag */ 12324 if (bp->b_flags & B_FAILFAST) { 12325 new_bp->b_flags |= B_FAILFAST; 12326 } 12327 12328 /* 12329 * Allocate an xbuf for the shadow bp and copy the contents of the 12330 * original xbuf into it. 12331 */ 12332 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12333 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12334 12335 /* Need later to copy data between the shadow buf & original buf! */ 12336 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12337 12338 /* 12339 * The given bp is automatically saved in the xb_private member 12340 * of the new xbuf. Callers are allowed to depend on this. 12341 */ 12342 new_xp->xb_private = bp; 12343 12344 new_bp->b_private = new_xp; 12345 12346 return (new_bp); 12347 } 12348 12349 /* 12350 * Function: sd_bioclone_free 12351 * 12352 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12353 * in the larger than partition operation. 12354 * 12355 * Context: May be called under interrupt context 12356 */ 12357 12358 static void 12359 sd_bioclone_free(struct buf *bp) 12360 { 12361 struct sd_xbuf *xp; 12362 12363 ASSERT(bp != NULL); 12364 xp = SD_GET_XBUF(bp); 12365 ASSERT(xp != NULL); 12366 12367 /* 12368 * Call bp_mapout() before freeing the buf, in case a lower 12369 * layer or HBA had done a bp_mapin(). we must do this here 12370 * as we are the "originator" of the shadow buf. 12371 */ 12372 bp_mapout(bp); 12373 12374 /* 12375 * Null out b_iodone before freeing the bp, to ensure that the driver 12376 * never gets confused by a stale value in this field. (Just a little 12377 * extra defensiveness here.) 12378 */ 12379 bp->b_iodone = NULL; 12380 12381 freerbuf(bp); 12382 12383 kmem_free(xp, sizeof (struct sd_xbuf)); 12384 } 12385 12386 /* 12387 * Function: sd_shadow_buf_free 12388 * 12389 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12390 * 12391 * Context: May be called under interrupt context 12392 */ 12393 12394 static void 12395 sd_shadow_buf_free(struct buf *bp) 12396 { 12397 struct sd_xbuf *xp; 12398 12399 ASSERT(bp != NULL); 12400 xp = SD_GET_XBUF(bp); 12401 ASSERT(xp != NULL); 12402 12403 #if defined(__sparc) 12404 /* 12405 * Call bp_mapout() before freeing the buf, in case a lower 12406 * layer or HBA had done a bp_mapin(). we must do this here 12407 * as we are the "originator" of the shadow buf. 12408 */ 12409 bp_mapout(bp); 12410 #endif 12411 12412 /* 12413 * Null out b_iodone before freeing the bp, to ensure that the driver 12414 * never gets confused by a stale value in this field. (Just a little 12415 * extra defensiveness here.) 12416 */ 12417 bp->b_iodone = NULL; 12418 12419 #if defined(__i386) || defined(__amd64) 12420 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12421 freerbuf(bp); 12422 #else 12423 scsi_free_consistent_buf(bp); 12424 #endif 12425 12426 kmem_free(xp, sizeof (struct sd_xbuf)); 12427 } 12428 12429 12430 /* 12431 * Function: sd_print_transport_rejected_message 12432 * 12433 * Description: This implements the ludicrously complex rules for printing 12434 * a "transport rejected" message. This is to address the 12435 * specific problem of having a flood of this error message 12436 * produced when a failover occurs. 12437 * 12438 * Context: Any. 12439 */ 12440 12441 static void 12442 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12443 int code) 12444 { 12445 ASSERT(un != NULL); 12446 ASSERT(mutex_owned(SD_MUTEX(un))); 12447 ASSERT(xp != NULL); 12448 12449 /* 12450 * Print the "transport rejected" message under the following 12451 * conditions: 12452 * 12453 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12454 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12455 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12456 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12457 * scsi_transport(9F) (which indicates that the target might have 12458 * gone off-line). This uses the un->un_tran_fatal_count 12459 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12460 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12461 * from scsi_transport(). 12462 * 12463 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12464 * the preceeding cases in order for the message to be printed. 12465 */ 12466 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12467 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12468 (code != TRAN_FATAL_ERROR) || 12469 (un->un_tran_fatal_count == 1)) { 12470 switch (code) { 12471 case TRAN_BADPKT: 12472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12473 "transport rejected bad packet\n"); 12474 break; 12475 case TRAN_FATAL_ERROR: 12476 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12477 "transport rejected fatal error\n"); 12478 break; 12479 default: 12480 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12481 "transport rejected (%d)\n", code); 12482 break; 12483 } 12484 } 12485 } 12486 } 12487 12488 12489 /* 12490 * Function: sd_add_buf_to_waitq 12491 * 12492 * Description: Add the given buf(9S) struct to the wait queue for the 12493 * instance. If sorting is enabled, then the buf is added 12494 * to the queue via an elevator sort algorithm (a la 12495 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12496 * If sorting is not enabled, then the buf is just added 12497 * to the end of the wait queue. 12498 * 12499 * Return Code: void 12500 * 12501 * Context: Does not sleep/block, therefore technically can be called 12502 * from any context. However if sorting is enabled then the 12503 * execution time is indeterminate, and may take long if 12504 * the wait queue grows large. 12505 */ 12506 12507 static void 12508 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12509 { 12510 struct buf *ap; 12511 12512 ASSERT(bp != NULL); 12513 ASSERT(un != NULL); 12514 ASSERT(mutex_owned(SD_MUTEX(un))); 12515 12516 /* If the queue is empty, add the buf as the only entry & return. */ 12517 if (un->un_waitq_headp == NULL) { 12518 ASSERT(un->un_waitq_tailp == NULL); 12519 un->un_waitq_headp = un->un_waitq_tailp = bp; 12520 bp->av_forw = NULL; 12521 return; 12522 } 12523 12524 ASSERT(un->un_waitq_tailp != NULL); 12525 12526 /* 12527 * If sorting is disabled, just add the buf to the tail end of 12528 * the wait queue and return. 12529 */ 12530 if (un->un_f_disksort_disabled) { 12531 un->un_waitq_tailp->av_forw = bp; 12532 un->un_waitq_tailp = bp; 12533 bp->av_forw = NULL; 12534 return; 12535 } 12536 12537 /* 12538 * Sort thru the list of requests currently on the wait queue 12539 * and add the new buf request at the appropriate position. 12540 * 12541 * The un->un_waitq_headp is an activity chain pointer on which 12542 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12543 * first queue holds those requests which are positioned after 12544 * the current SD_GET_BLKNO() (in the first request); the second holds 12545 * requests which came in after their SD_GET_BLKNO() number was passed. 12546 * Thus we implement a one way scan, retracting after reaching 12547 * the end of the drive to the first request on the second 12548 * queue, at which time it becomes the first queue. 12549 * A one-way scan is natural because of the way UNIX read-ahead 12550 * blocks are allocated. 12551 * 12552 * If we lie after the first request, then we must locate the 12553 * second request list and add ourselves to it. 12554 */ 12555 ap = un->un_waitq_headp; 12556 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12557 while (ap->av_forw != NULL) { 12558 /* 12559 * Look for an "inversion" in the (normally 12560 * ascending) block numbers. This indicates 12561 * the start of the second request list. 12562 */ 12563 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12564 /* 12565 * Search the second request list for the 12566 * first request at a larger block number. 12567 * We go before that; however if there is 12568 * no such request, we go at the end. 12569 */ 12570 do { 12571 if (SD_GET_BLKNO(bp) < 12572 SD_GET_BLKNO(ap->av_forw)) { 12573 goto insert; 12574 } 12575 ap = ap->av_forw; 12576 } while (ap->av_forw != NULL); 12577 goto insert; /* after last */ 12578 } 12579 ap = ap->av_forw; 12580 } 12581 12582 /* 12583 * No inversions... we will go after the last, and 12584 * be the first request in the second request list. 12585 */ 12586 goto insert; 12587 } 12588 12589 /* 12590 * Request is at/after the current request... 12591 * sort in the first request list. 12592 */ 12593 while (ap->av_forw != NULL) { 12594 /* 12595 * We want to go after the current request (1) if 12596 * there is an inversion after it (i.e. it is the end 12597 * of the first request list), or (2) if the next 12598 * request is a larger block no. than our request. 12599 */ 12600 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12601 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12602 goto insert; 12603 } 12604 ap = ap->av_forw; 12605 } 12606 12607 /* 12608 * Neither a second list nor a larger request, therefore 12609 * we go at the end of the first list (which is the same 12610 * as the end of the whole schebang). 12611 */ 12612 insert: 12613 bp->av_forw = ap->av_forw; 12614 ap->av_forw = bp; 12615 12616 /* 12617 * If we inserted onto the tail end of the waitq, make sure the 12618 * tail pointer is updated. 12619 */ 12620 if (ap == un->un_waitq_tailp) { 12621 un->un_waitq_tailp = bp; 12622 } 12623 } 12624 12625 12626 /* 12627 * Function: sd_start_cmds 12628 * 12629 * Description: Remove and transport cmds from the driver queues. 12630 * 12631 * Arguments: un - pointer to the unit (soft state) struct for the target. 12632 * 12633 * immed_bp - ptr to a buf to be transported immediately. Only 12634 * the immed_bp is transported; bufs on the waitq are not 12635 * processed and the un_retry_bp is not checked. If immed_bp is 12636 * NULL, then normal queue processing is performed. 12637 * 12638 * Context: May be called from kernel thread context, interrupt context, 12639 * or runout callback context. This function may not block or 12640 * call routines that block. 12641 */ 12642 12643 static void 12644 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12645 { 12646 struct sd_xbuf *xp; 12647 struct buf *bp; 12648 void (*statp)(kstat_io_t *); 12649 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12650 void (*saved_statp)(kstat_io_t *); 12651 #endif 12652 int rval; 12653 12654 ASSERT(un != NULL); 12655 ASSERT(mutex_owned(SD_MUTEX(un))); 12656 ASSERT(un->un_ncmds_in_transport >= 0); 12657 ASSERT(un->un_throttle >= 0); 12658 12659 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12660 12661 do { 12662 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12663 saved_statp = NULL; 12664 #endif 12665 12666 /* 12667 * If we are syncing or dumping, fail the command to 12668 * avoid recursively calling back into scsi_transport(). 12669 * The dump I/O itself uses a separate code path so this 12670 * only prevents non-dump I/O from being sent while dumping. 12671 * File system sync takes place before dumping begins. 12672 * During panic, filesystem I/O is allowed provided 12673 * un_in_callback is <= 1. This is to prevent recursion 12674 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12675 * sd_start_cmds and so on. See panic.c for more information 12676 * about the states the system can be in during panic. 12677 */ 12678 if ((un->un_state == SD_STATE_DUMPING) || 12679 (ddi_in_panic() && (un->un_in_callback > 1))) { 12680 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12681 "sd_start_cmds: panicking\n"); 12682 goto exit; 12683 } 12684 12685 if ((bp = immed_bp) != NULL) { 12686 /* 12687 * We have a bp that must be transported immediately. 12688 * It's OK to transport the immed_bp here without doing 12689 * the throttle limit check because the immed_bp is 12690 * always used in a retry/recovery case. This means 12691 * that we know we are not at the throttle limit by 12692 * virtue of the fact that to get here we must have 12693 * already gotten a command back via sdintr(). This also 12694 * relies on (1) the command on un_retry_bp preventing 12695 * further commands from the waitq from being issued; 12696 * and (2) the code in sd_retry_command checking the 12697 * throttle limit before issuing a delayed or immediate 12698 * retry. This holds even if the throttle limit is 12699 * currently ratcheted down from its maximum value. 12700 */ 12701 statp = kstat_runq_enter; 12702 if (bp == un->un_retry_bp) { 12703 ASSERT((un->un_retry_statp == NULL) || 12704 (un->un_retry_statp == kstat_waitq_enter) || 12705 (un->un_retry_statp == 12706 kstat_runq_back_to_waitq)); 12707 /* 12708 * If the waitq kstat was incremented when 12709 * sd_set_retry_bp() queued this bp for a retry, 12710 * then we must set up statp so that the waitq 12711 * count will get decremented correctly below. 12712 * Also we must clear un->un_retry_statp to 12713 * ensure that we do not act on a stale value 12714 * in this field. 12715 */ 12716 if ((un->un_retry_statp == kstat_waitq_enter) || 12717 (un->un_retry_statp == 12718 kstat_runq_back_to_waitq)) { 12719 statp = kstat_waitq_to_runq; 12720 } 12721 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12722 saved_statp = un->un_retry_statp; 12723 #endif 12724 un->un_retry_statp = NULL; 12725 12726 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12727 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12728 "un_throttle:%d un_ncmds_in_transport:%d\n", 12729 un, un->un_retry_bp, un->un_throttle, 12730 un->un_ncmds_in_transport); 12731 } else { 12732 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12733 "processing priority bp:0x%p\n", bp); 12734 } 12735 12736 } else if ((bp = un->un_waitq_headp) != NULL) { 12737 /* 12738 * A command on the waitq is ready to go, but do not 12739 * send it if: 12740 * 12741 * (1) the throttle limit has been reached, or 12742 * (2) a retry is pending, or 12743 * (3) a START_STOP_UNIT callback pending, or 12744 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12745 * command is pending. 12746 * 12747 * For all of these conditions, IO processing will 12748 * restart after the condition is cleared. 12749 */ 12750 if (un->un_ncmds_in_transport >= un->un_throttle) { 12751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12752 "sd_start_cmds: exiting, " 12753 "throttle limit reached!\n"); 12754 goto exit; 12755 } 12756 if (un->un_retry_bp != NULL) { 12757 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12758 "sd_start_cmds: exiting, retry pending!\n"); 12759 goto exit; 12760 } 12761 if (un->un_startstop_timeid != NULL) { 12762 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12763 "sd_start_cmds: exiting, " 12764 "START_STOP pending!\n"); 12765 goto exit; 12766 } 12767 if (un->un_direct_priority_timeid != NULL) { 12768 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12769 "sd_start_cmds: exiting, " 12770 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12771 goto exit; 12772 } 12773 12774 /* Dequeue the command */ 12775 un->un_waitq_headp = bp->av_forw; 12776 if (un->un_waitq_headp == NULL) { 12777 un->un_waitq_tailp = NULL; 12778 } 12779 bp->av_forw = NULL; 12780 statp = kstat_waitq_to_runq; 12781 SD_TRACE(SD_LOG_IO_CORE, un, 12782 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12783 12784 } else { 12785 /* No work to do so bail out now */ 12786 SD_TRACE(SD_LOG_IO_CORE, un, 12787 "sd_start_cmds: no more work, exiting!\n"); 12788 goto exit; 12789 } 12790 12791 /* 12792 * Reset the state to normal. This is the mechanism by which 12793 * the state transitions from either SD_STATE_RWAIT or 12794 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12795 * If state is SD_STATE_PM_CHANGING then this command is 12796 * part of the device power control and the state must 12797 * not be put back to normal. Doing so would would 12798 * allow new commands to proceed when they shouldn't, 12799 * the device may be going off. 12800 */ 12801 if ((un->un_state != SD_STATE_SUSPENDED) && 12802 (un->un_state != SD_STATE_PM_CHANGING)) { 12803 New_state(un, SD_STATE_NORMAL); 12804 } 12805 12806 xp = SD_GET_XBUF(bp); 12807 ASSERT(xp != NULL); 12808 12809 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12810 /* 12811 * Allocate the scsi_pkt if we need one, or attach DMA 12812 * resources if we have a scsi_pkt that needs them. The 12813 * latter should only occur for commands that are being 12814 * retried. 12815 */ 12816 if ((xp->xb_pktp == NULL) || 12817 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12818 #else 12819 if (xp->xb_pktp == NULL) { 12820 #endif 12821 /* 12822 * There is no scsi_pkt allocated for this buf. Call 12823 * the initpkt function to allocate & init one. 12824 * 12825 * The scsi_init_pkt runout callback functionality is 12826 * implemented as follows: 12827 * 12828 * 1) The initpkt function always calls 12829 * scsi_init_pkt(9F) with sdrunout specified as the 12830 * callback routine. 12831 * 2) A successful packet allocation is initialized and 12832 * the I/O is transported. 12833 * 3) The I/O associated with an allocation resource 12834 * failure is left on its queue to be retried via 12835 * runout or the next I/O. 12836 * 4) The I/O associated with a DMA error is removed 12837 * from the queue and failed with EIO. Processing of 12838 * the transport queues is also halted to be 12839 * restarted via runout or the next I/O. 12840 * 5) The I/O associated with a CDB size or packet 12841 * size error is removed from the queue and failed 12842 * with EIO. Processing of the transport queues is 12843 * continued. 12844 * 12845 * Note: there is no interface for canceling a runout 12846 * callback. To prevent the driver from detaching or 12847 * suspending while a runout is pending the driver 12848 * state is set to SD_STATE_RWAIT 12849 * 12850 * Note: using the scsi_init_pkt callback facility can 12851 * result in an I/O request persisting at the head of 12852 * the list which cannot be satisfied even after 12853 * multiple retries. In the future the driver may 12854 * implement some kind of maximum runout count before 12855 * failing an I/O. 12856 * 12857 * Note: the use of funcp below may seem superfluous, 12858 * but it helps warlock figure out the correct 12859 * initpkt function calls (see [s]sd.wlcmd). 12860 */ 12861 struct scsi_pkt *pktp; 12862 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12863 12864 ASSERT(bp != un->un_rqs_bp); 12865 12866 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12867 switch ((*funcp)(bp, &pktp)) { 12868 case SD_PKT_ALLOC_SUCCESS: 12869 xp->xb_pktp = pktp; 12870 SD_TRACE(SD_LOG_IO_CORE, un, 12871 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12872 pktp); 12873 goto got_pkt; 12874 12875 case SD_PKT_ALLOC_FAILURE: 12876 /* 12877 * Temporary (hopefully) resource depletion. 12878 * Since retries and RQS commands always have a 12879 * scsi_pkt allocated, these cases should never 12880 * get here. So the only cases this needs to 12881 * handle is a bp from the waitq (which we put 12882 * back onto the waitq for sdrunout), or a bp 12883 * sent as an immed_bp (which we just fail). 12884 */ 12885 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12886 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12887 12888 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12889 12890 if (bp == immed_bp) { 12891 /* 12892 * If SD_XB_DMA_FREED is clear, then 12893 * this is a failure to allocate a 12894 * scsi_pkt, and we must fail the 12895 * command. 12896 */ 12897 if ((xp->xb_pkt_flags & 12898 SD_XB_DMA_FREED) == 0) { 12899 break; 12900 } 12901 12902 /* 12903 * If this immediate command is NOT our 12904 * un_retry_bp, then we must fail it. 12905 */ 12906 if (bp != un->un_retry_bp) { 12907 break; 12908 } 12909 12910 /* 12911 * We get here if this cmd is our 12912 * un_retry_bp that was DMAFREED, but 12913 * scsi_init_pkt() failed to reallocate 12914 * DMA resources when we attempted to 12915 * retry it. This can happen when an 12916 * mpxio failover is in progress, but 12917 * we don't want to just fail the 12918 * command in this case. 12919 * 12920 * Use timeout(9F) to restart it after 12921 * a 100ms delay. We don't want to 12922 * let sdrunout() restart it, because 12923 * sdrunout() is just supposed to start 12924 * commands that are sitting on the 12925 * wait queue. The un_retry_bp stays 12926 * set until the command completes, but 12927 * sdrunout can be called many times 12928 * before that happens. Since sdrunout 12929 * cannot tell if the un_retry_bp is 12930 * already in the transport, it could 12931 * end up calling scsi_transport() for 12932 * the un_retry_bp multiple times. 12933 * 12934 * Also: don't schedule the callback 12935 * if some other callback is already 12936 * pending. 12937 */ 12938 if (un->un_retry_statp == NULL) { 12939 /* 12940 * restore the kstat pointer to 12941 * keep kstat counts coherent 12942 * when we do retry the command. 12943 */ 12944 un->un_retry_statp = 12945 saved_statp; 12946 } 12947 12948 if ((un->un_startstop_timeid == NULL) && 12949 (un->un_retry_timeid == NULL) && 12950 (un->un_direct_priority_timeid == 12951 NULL)) { 12952 12953 un->un_retry_timeid = 12954 timeout( 12955 sd_start_retry_command, 12956 un, SD_RESTART_TIMEOUT); 12957 } 12958 goto exit; 12959 } 12960 12961 #else 12962 if (bp == immed_bp) { 12963 break; /* Just fail the command */ 12964 } 12965 #endif 12966 12967 /* Add the buf back to the head of the waitq */ 12968 bp->av_forw = un->un_waitq_headp; 12969 un->un_waitq_headp = bp; 12970 if (un->un_waitq_tailp == NULL) { 12971 un->un_waitq_tailp = bp; 12972 } 12973 goto exit; 12974 12975 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12976 /* 12977 * HBA DMA resource failure. Fail the command 12978 * and continue processing of the queues. 12979 */ 12980 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12981 "sd_start_cmds: " 12982 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12983 break; 12984 12985 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12986 /* 12987 * Note:x86: Partial DMA mapping not supported 12988 * for USCSI commands, and all the needed DMA 12989 * resources were not allocated. 12990 */ 12991 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12992 "sd_start_cmds: " 12993 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12994 break; 12995 12996 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12997 /* 12998 * Note:x86: Request cannot fit into CDB based 12999 * on lba and len. 13000 */ 13001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13002 "sd_start_cmds: " 13003 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13004 break; 13005 13006 default: 13007 /* Should NEVER get here! */ 13008 panic("scsi_initpkt error"); 13009 /*NOTREACHED*/ 13010 } 13011 13012 /* 13013 * Fatal error in allocating a scsi_pkt for this buf. 13014 * Update kstats & return the buf with an error code. 13015 * We must use sd_return_failed_command_no_restart() to 13016 * avoid a recursive call back into sd_start_cmds(). 13017 * However this also means that we must keep processing 13018 * the waitq here in order to avoid stalling. 13019 */ 13020 if (statp == kstat_waitq_to_runq) { 13021 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13022 } 13023 sd_return_failed_command_no_restart(un, bp, EIO); 13024 if (bp == immed_bp) { 13025 /* immed_bp is gone by now, so clear this */ 13026 immed_bp = NULL; 13027 } 13028 continue; 13029 } 13030 got_pkt: 13031 if (bp == immed_bp) { 13032 /* goto the head of the class.... */ 13033 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13034 } 13035 13036 un->un_ncmds_in_transport++; 13037 SD_UPDATE_KSTATS(un, statp, bp); 13038 13039 /* 13040 * Call scsi_transport() to send the command to the target. 13041 * According to SCSA architecture, we must drop the mutex here 13042 * before calling scsi_transport() in order to avoid deadlock. 13043 * Note that the scsi_pkt's completion routine can be executed 13044 * (from interrupt context) even before the call to 13045 * scsi_transport() returns. 13046 */ 13047 SD_TRACE(SD_LOG_IO_CORE, un, 13048 "sd_start_cmds: calling scsi_transport()\n"); 13049 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13050 13051 mutex_exit(SD_MUTEX(un)); 13052 rval = scsi_transport(xp->xb_pktp); 13053 mutex_enter(SD_MUTEX(un)); 13054 13055 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13056 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13057 13058 switch (rval) { 13059 case TRAN_ACCEPT: 13060 /* Clear this with every pkt accepted by the HBA */ 13061 un->un_tran_fatal_count = 0; 13062 break; /* Success; try the next cmd (if any) */ 13063 13064 case TRAN_BUSY: 13065 un->un_ncmds_in_transport--; 13066 ASSERT(un->un_ncmds_in_transport >= 0); 13067 13068 /* 13069 * Don't retry request sense, the sense data 13070 * is lost when another request is sent. 13071 * Free up the rqs buf and retry 13072 * the original failed cmd. Update kstat. 13073 */ 13074 if (bp == un->un_rqs_bp) { 13075 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13076 bp = sd_mark_rqs_idle(un, xp); 13077 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13078 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13079 kstat_waitq_enter); 13080 goto exit; 13081 } 13082 13083 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13084 /* 13085 * Free the DMA resources for the scsi_pkt. This will 13086 * allow mpxio to select another path the next time 13087 * we call scsi_transport() with this scsi_pkt. 13088 * See sdintr() for the rationalization behind this. 13089 */ 13090 if ((un->un_f_is_fibre == TRUE) && 13091 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13092 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13093 scsi_dmafree(xp->xb_pktp); 13094 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13095 } 13096 #endif 13097 13098 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13099 /* 13100 * Commands that are SD_PATH_DIRECT_PRIORITY 13101 * are for error recovery situations. These do 13102 * not use the normal command waitq, so if they 13103 * get a TRAN_BUSY we cannot put them back onto 13104 * the waitq for later retry. One possible 13105 * problem is that there could already be some 13106 * other command on un_retry_bp that is waiting 13107 * for this one to complete, so we would be 13108 * deadlocked if we put this command back onto 13109 * the waitq for later retry (since un_retry_bp 13110 * must complete before the driver gets back to 13111 * commands on the waitq). 13112 * 13113 * To avoid deadlock we must schedule a callback 13114 * that will restart this command after a set 13115 * interval. This should keep retrying for as 13116 * long as the underlying transport keeps 13117 * returning TRAN_BUSY (just like for other 13118 * commands). Use the same timeout interval as 13119 * for the ordinary TRAN_BUSY retry. 13120 */ 13121 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13122 "sd_start_cmds: scsi_transport() returned " 13123 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13124 13125 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13126 un->un_direct_priority_timeid = 13127 timeout(sd_start_direct_priority_command, 13128 bp, SD_BSY_TIMEOUT / 500); 13129 13130 goto exit; 13131 } 13132 13133 /* 13134 * For TRAN_BUSY, we want to reduce the throttle value, 13135 * unless we are retrying a command. 13136 */ 13137 if (bp != un->un_retry_bp) { 13138 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13139 } 13140 13141 /* 13142 * Set up the bp to be tried again 10 ms later. 13143 * Note:x86: Is there a timeout value in the sd_lun 13144 * for this condition? 13145 */ 13146 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13147 kstat_runq_back_to_waitq); 13148 goto exit; 13149 13150 case TRAN_FATAL_ERROR: 13151 un->un_tran_fatal_count++; 13152 /* FALLTHRU */ 13153 13154 case TRAN_BADPKT: 13155 default: 13156 un->un_ncmds_in_transport--; 13157 ASSERT(un->un_ncmds_in_transport >= 0); 13158 13159 /* 13160 * If this is our REQUEST SENSE command with a 13161 * transport error, we must get back the pointers 13162 * to the original buf, and mark the REQUEST 13163 * SENSE command as "available". 13164 */ 13165 if (bp == un->un_rqs_bp) { 13166 bp = sd_mark_rqs_idle(un, xp); 13167 xp = SD_GET_XBUF(bp); 13168 } else { 13169 /* 13170 * Legacy behavior: do not update transport 13171 * error count for request sense commands. 13172 */ 13173 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13174 } 13175 13176 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13177 sd_print_transport_rejected_message(un, xp, rval); 13178 13179 /* 13180 * We must use sd_return_failed_command_no_restart() to 13181 * avoid a recursive call back into sd_start_cmds(). 13182 * However this also means that we must keep processing 13183 * the waitq here in order to avoid stalling. 13184 */ 13185 sd_return_failed_command_no_restart(un, bp, EIO); 13186 13187 /* 13188 * Notify any threads waiting in sd_ddi_suspend() that 13189 * a command completion has occurred. 13190 */ 13191 if (un->un_state == SD_STATE_SUSPENDED) { 13192 cv_broadcast(&un->un_disk_busy_cv); 13193 } 13194 13195 if (bp == immed_bp) { 13196 /* immed_bp is gone by now, so clear this */ 13197 immed_bp = NULL; 13198 } 13199 break; 13200 } 13201 13202 } while (immed_bp == NULL); 13203 13204 exit: 13205 ASSERT(mutex_owned(SD_MUTEX(un))); 13206 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13207 } 13208 13209 13210 /* 13211 * Function: sd_return_command 13212 * 13213 * Description: Returns a command to its originator (with or without an 13214 * error). Also starts commands waiting to be transported 13215 * to the target. 13216 * 13217 * Context: May be called from interrupt, kernel, or timeout context 13218 */ 13219 13220 static void 13221 sd_return_command(struct sd_lun *un, struct buf *bp) 13222 { 13223 struct sd_xbuf *xp; 13224 #if defined(__i386) || defined(__amd64) 13225 struct scsi_pkt *pktp; 13226 #endif 13227 13228 ASSERT(bp != NULL); 13229 ASSERT(un != NULL); 13230 ASSERT(mutex_owned(SD_MUTEX(un))); 13231 ASSERT(bp != un->un_rqs_bp); 13232 xp = SD_GET_XBUF(bp); 13233 ASSERT(xp != NULL); 13234 13235 #if defined(__i386) || defined(__amd64) 13236 pktp = SD_GET_PKTP(bp); 13237 #endif 13238 13239 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13240 13241 #if defined(__i386) || defined(__amd64) 13242 /* 13243 * Note:x86: check for the "sdrestart failed" case. 13244 */ 13245 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13246 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13247 (xp->xb_pktp->pkt_resid == 0)) { 13248 13249 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13250 /* 13251 * Successfully set up next portion of cmd 13252 * transfer, try sending it 13253 */ 13254 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13255 NULL, NULL, 0, (clock_t)0, NULL); 13256 sd_start_cmds(un, NULL); 13257 return; /* Note:x86: need a return here? */ 13258 } 13259 } 13260 #endif 13261 13262 /* 13263 * If this is the failfast bp, clear it from un_failfast_bp. This 13264 * can happen if upon being re-tried the failfast bp either 13265 * succeeded or encountered another error (possibly even a different 13266 * error than the one that precipitated the failfast state, but in 13267 * that case it would have had to exhaust retries as well). Regardless, 13268 * this should not occur whenever the instance is in the active 13269 * failfast state. 13270 */ 13271 if (bp == un->un_failfast_bp) { 13272 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13273 un->un_failfast_bp = NULL; 13274 } 13275 13276 /* 13277 * Clear the failfast state upon successful completion of ANY cmd. 13278 */ 13279 if (bp->b_error == 0) { 13280 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13281 } 13282 13283 /* 13284 * This is used if the command was retried one or more times. Show that 13285 * we are done with it, and allow processing of the waitq to resume. 13286 */ 13287 if (bp == un->un_retry_bp) { 13288 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13289 "sd_return_command: un:0x%p: " 13290 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13291 un->un_retry_bp = NULL; 13292 un->un_retry_statp = NULL; 13293 } 13294 13295 SD_UPDATE_RDWR_STATS(un, bp); 13296 SD_UPDATE_PARTITION_STATS(un, bp); 13297 13298 switch (un->un_state) { 13299 case SD_STATE_SUSPENDED: 13300 /* 13301 * Notify any threads waiting in sd_ddi_suspend() that 13302 * a command completion has occurred. 13303 */ 13304 cv_broadcast(&un->un_disk_busy_cv); 13305 break; 13306 default: 13307 sd_start_cmds(un, NULL); 13308 break; 13309 } 13310 13311 /* Return this command up the iodone chain to its originator. */ 13312 mutex_exit(SD_MUTEX(un)); 13313 13314 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13315 xp->xb_pktp = NULL; 13316 13317 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13318 13319 ASSERT(!mutex_owned(SD_MUTEX(un))); 13320 mutex_enter(SD_MUTEX(un)); 13321 13322 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13323 } 13324 13325 13326 /* 13327 * Function: sd_return_failed_command 13328 * 13329 * Description: Command completion when an error occurred. 13330 * 13331 * Context: May be called from interrupt context 13332 */ 13333 13334 static void 13335 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13336 { 13337 ASSERT(bp != NULL); 13338 ASSERT(un != NULL); 13339 ASSERT(mutex_owned(SD_MUTEX(un))); 13340 13341 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13342 "sd_return_failed_command: entry\n"); 13343 13344 /* 13345 * b_resid could already be nonzero due to a partial data 13346 * transfer, so do not change it here. 13347 */ 13348 SD_BIOERROR(bp, errcode); 13349 13350 sd_return_command(un, bp); 13351 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13352 "sd_return_failed_command: exit\n"); 13353 } 13354 13355 13356 /* 13357 * Function: sd_return_failed_command_no_restart 13358 * 13359 * Description: Same as sd_return_failed_command, but ensures that no 13360 * call back into sd_start_cmds will be issued. 13361 * 13362 * Context: May be called from interrupt context 13363 */ 13364 13365 static void 13366 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13367 int errcode) 13368 { 13369 struct sd_xbuf *xp; 13370 13371 ASSERT(bp != NULL); 13372 ASSERT(un != NULL); 13373 ASSERT(mutex_owned(SD_MUTEX(un))); 13374 xp = SD_GET_XBUF(bp); 13375 ASSERT(xp != NULL); 13376 ASSERT(errcode != 0); 13377 13378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13379 "sd_return_failed_command_no_restart: entry\n"); 13380 13381 /* 13382 * b_resid could already be nonzero due to a partial data 13383 * transfer, so do not change it here. 13384 */ 13385 SD_BIOERROR(bp, errcode); 13386 13387 /* 13388 * If this is the failfast bp, clear it. This can happen if the 13389 * failfast bp encounterd a fatal error when we attempted to 13390 * re-try it (such as a scsi_transport(9F) failure). However 13391 * we should NOT be in an active failfast state if the failfast 13392 * bp is not NULL. 13393 */ 13394 if (bp == un->un_failfast_bp) { 13395 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13396 un->un_failfast_bp = NULL; 13397 } 13398 13399 if (bp == un->un_retry_bp) { 13400 /* 13401 * This command was retried one or more times. Show that we are 13402 * done with it, and allow processing of the waitq to resume. 13403 */ 13404 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13405 "sd_return_failed_command_no_restart: " 13406 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13407 un->un_retry_bp = NULL; 13408 un->un_retry_statp = NULL; 13409 } 13410 13411 SD_UPDATE_RDWR_STATS(un, bp); 13412 SD_UPDATE_PARTITION_STATS(un, bp); 13413 13414 mutex_exit(SD_MUTEX(un)); 13415 13416 if (xp->xb_pktp != NULL) { 13417 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13418 xp->xb_pktp = NULL; 13419 } 13420 13421 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13422 13423 mutex_enter(SD_MUTEX(un)); 13424 13425 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13426 "sd_return_failed_command_no_restart: exit\n"); 13427 } 13428 13429 13430 /* 13431 * Function: sd_retry_command 13432 * 13433 * Description: queue up a command for retry, or (optionally) fail it 13434 * if retry counts are exhausted. 13435 * 13436 * Arguments: un - Pointer to the sd_lun struct for the target. 13437 * 13438 * bp - Pointer to the buf for the command to be retried. 13439 * 13440 * retry_check_flag - Flag to see which (if any) of the retry 13441 * counts should be decremented/checked. If the indicated 13442 * retry count is exhausted, then the command will not be 13443 * retried; it will be failed instead. This should use a 13444 * value equal to one of the following: 13445 * 13446 * SD_RETRIES_NOCHECK 13447 * SD_RESD_RETRIES_STANDARD 13448 * SD_RETRIES_VICTIM 13449 * 13450 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13451 * if the check should be made to see of FLAG_ISOLATE is set 13452 * in the pkt. If FLAG_ISOLATE is set, then the command is 13453 * not retried, it is simply failed. 13454 * 13455 * user_funcp - Ptr to function to call before dispatching the 13456 * command. May be NULL if no action needs to be performed. 13457 * (Primarily intended for printing messages.) 13458 * 13459 * user_arg - Optional argument to be passed along to 13460 * the user_funcp call. 13461 * 13462 * failure_code - errno return code to set in the bp if the 13463 * command is going to be failed. 13464 * 13465 * retry_delay - Retry delay interval in (clock_t) units. May 13466 * be zero which indicates that the retry should be retried 13467 * immediately (ie, without an intervening delay). 13468 * 13469 * statp - Ptr to kstat function to be updated if the command 13470 * is queued for a delayed retry. May be NULL if no kstat 13471 * update is desired. 13472 * 13473 * Context: May be called from interrupt context. 13474 */ 13475 13476 static void 13477 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13478 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13479 code), void *user_arg, int failure_code, clock_t retry_delay, 13480 void (*statp)(kstat_io_t *)) 13481 { 13482 struct sd_xbuf *xp; 13483 struct scsi_pkt *pktp; 13484 13485 ASSERT(un != NULL); 13486 ASSERT(mutex_owned(SD_MUTEX(un))); 13487 ASSERT(bp != NULL); 13488 xp = SD_GET_XBUF(bp); 13489 ASSERT(xp != NULL); 13490 pktp = SD_GET_PKTP(bp); 13491 ASSERT(pktp != NULL); 13492 13493 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13494 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13495 13496 /* 13497 * If we are syncing or dumping, fail the command to avoid 13498 * recursively calling back into scsi_transport(). 13499 */ 13500 if (ddi_in_panic()) { 13501 goto fail_command_no_log; 13502 } 13503 13504 /* 13505 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13506 * log an error and fail the command. 13507 */ 13508 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13509 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13510 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13511 sd_dump_memory(un, SD_LOG_IO, "CDB", 13512 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13513 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13514 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13515 goto fail_command; 13516 } 13517 13518 /* 13519 * If we are suspended, then put the command onto head of the 13520 * wait queue since we don't want to start more commands, and 13521 * clear the un_retry_bp. Next time when we are resumed, will 13522 * handle the command in the wait queue. 13523 */ 13524 switch (un->un_state) { 13525 case SD_STATE_SUSPENDED: 13526 case SD_STATE_DUMPING: 13527 bp->av_forw = un->un_waitq_headp; 13528 un->un_waitq_headp = bp; 13529 if (un->un_waitq_tailp == NULL) { 13530 un->un_waitq_tailp = bp; 13531 } 13532 if (bp == un->un_retry_bp) { 13533 un->un_retry_bp = NULL; 13534 un->un_retry_statp = NULL; 13535 } 13536 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13537 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13538 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13539 return; 13540 default: 13541 break; 13542 } 13543 13544 /* 13545 * If the caller wants us to check FLAG_ISOLATE, then see if that 13546 * is set; if it is then we do not want to retry the command. 13547 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13548 */ 13549 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13550 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13551 goto fail_command; 13552 } 13553 } 13554 13555 13556 /* 13557 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13558 * command timeout or a selection timeout has occurred. This means 13559 * that we were unable to establish an kind of communication with 13560 * the target, and subsequent retries and/or commands are likely 13561 * to encounter similar results and take a long time to complete. 13562 * 13563 * If this is a failfast error condition, we need to update the 13564 * failfast state, even if this bp does not have B_FAILFAST set. 13565 */ 13566 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13567 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13568 ASSERT(un->un_failfast_bp == NULL); 13569 /* 13570 * If we are already in the active failfast state, and 13571 * another failfast error condition has been detected, 13572 * then fail this command if it has B_FAILFAST set. 13573 * If B_FAILFAST is clear, then maintain the legacy 13574 * behavior of retrying heroically, even tho this will 13575 * take a lot more time to fail the command. 13576 */ 13577 if (bp->b_flags & B_FAILFAST) { 13578 goto fail_command; 13579 } 13580 } else { 13581 /* 13582 * We're not in the active failfast state, but we 13583 * have a failfast error condition, so we must begin 13584 * transition to the next state. We do this regardless 13585 * of whether or not this bp has B_FAILFAST set. 13586 */ 13587 if (un->un_failfast_bp == NULL) { 13588 /* 13589 * This is the first bp to meet a failfast 13590 * condition so save it on un_failfast_bp & 13591 * do normal retry processing. Do not enter 13592 * active failfast state yet. This marks 13593 * entry into the "failfast pending" state. 13594 */ 13595 un->un_failfast_bp = bp; 13596 13597 } else if (un->un_failfast_bp == bp) { 13598 /* 13599 * This is the second time *this* bp has 13600 * encountered a failfast error condition, 13601 * so enter active failfast state & flush 13602 * queues as appropriate. 13603 */ 13604 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13605 un->un_failfast_bp = NULL; 13606 sd_failfast_flushq(un); 13607 13608 /* 13609 * Fail this bp now if B_FAILFAST set; 13610 * otherwise continue with retries. (It would 13611 * be pretty ironic if this bp succeeded on a 13612 * subsequent retry after we just flushed all 13613 * the queues). 13614 */ 13615 if (bp->b_flags & B_FAILFAST) { 13616 goto fail_command; 13617 } 13618 13619 #if !defined(lint) && !defined(__lint) 13620 } else { 13621 /* 13622 * If neither of the preceeding conditionals 13623 * was true, it means that there is some 13624 * *other* bp that has met an inital failfast 13625 * condition and is currently either being 13626 * retried or is waiting to be retried. In 13627 * that case we should perform normal retry 13628 * processing on *this* bp, since there is a 13629 * chance that the current failfast condition 13630 * is transient and recoverable. If that does 13631 * not turn out to be the case, then retries 13632 * will be cleared when the wait queue is 13633 * flushed anyway. 13634 */ 13635 #endif 13636 } 13637 } 13638 } else { 13639 /* 13640 * SD_RETRIES_FAILFAST is clear, which indicates that we 13641 * likely were able to at least establish some level of 13642 * communication with the target and subsequent commands 13643 * and/or retries are likely to get through to the target, 13644 * In this case we want to be aggressive about clearing 13645 * the failfast state. Note that this does not affect 13646 * the "failfast pending" condition. 13647 */ 13648 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13649 } 13650 13651 13652 /* 13653 * Check the specified retry count to see if we can still do 13654 * any retries with this pkt before we should fail it. 13655 */ 13656 switch (retry_check_flag & SD_RETRIES_MASK) { 13657 case SD_RETRIES_VICTIM: 13658 /* 13659 * Check the victim retry count. If exhausted, then fall 13660 * thru & check against the standard retry count. 13661 */ 13662 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13663 /* Increment count & proceed with the retry */ 13664 xp->xb_victim_retry_count++; 13665 break; 13666 } 13667 /* Victim retries exhausted, fall back to std. retries... */ 13668 /* FALLTHRU */ 13669 13670 case SD_RETRIES_STANDARD: 13671 if (xp->xb_retry_count >= un->un_retry_count) { 13672 /* Retries exhausted, fail the command */ 13673 SD_TRACE(SD_LOG_IO_CORE, un, 13674 "sd_retry_command: retries exhausted!\n"); 13675 /* 13676 * update b_resid for failed SCMD_READ & SCMD_WRITE 13677 * commands with nonzero pkt_resid. 13678 */ 13679 if ((pktp->pkt_reason == CMD_CMPLT) && 13680 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13681 (pktp->pkt_resid != 0)) { 13682 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13683 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13684 SD_UPDATE_B_RESID(bp, pktp); 13685 } 13686 } 13687 goto fail_command; 13688 } 13689 xp->xb_retry_count++; 13690 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13691 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13692 break; 13693 13694 case SD_RETRIES_UA: 13695 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13696 /* Retries exhausted, fail the command */ 13697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13698 "Unit Attention retries exhausted. " 13699 "Check the target.\n"); 13700 goto fail_command; 13701 } 13702 xp->xb_ua_retry_count++; 13703 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13704 "sd_retry_command: retry count:%d\n", 13705 xp->xb_ua_retry_count); 13706 break; 13707 13708 case SD_RETRIES_BUSY: 13709 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13710 /* Retries exhausted, fail the command */ 13711 SD_TRACE(SD_LOG_IO_CORE, un, 13712 "sd_retry_command: retries exhausted!\n"); 13713 goto fail_command; 13714 } 13715 xp->xb_retry_count++; 13716 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13717 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13718 break; 13719 13720 case SD_RETRIES_NOCHECK: 13721 default: 13722 /* No retry count to check. Just proceed with the retry */ 13723 break; 13724 } 13725 13726 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13727 13728 /* 13729 * If we were given a zero timeout, we must attempt to retry the 13730 * command immediately (ie, without a delay). 13731 */ 13732 if (retry_delay == 0) { 13733 /* 13734 * Check some limiting conditions to see if we can actually 13735 * do the immediate retry. If we cannot, then we must 13736 * fall back to queueing up a delayed retry. 13737 */ 13738 if (un->un_ncmds_in_transport >= un->un_throttle) { 13739 /* 13740 * We are at the throttle limit for the target, 13741 * fall back to delayed retry. 13742 */ 13743 retry_delay = SD_BSY_TIMEOUT; 13744 statp = kstat_waitq_enter; 13745 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13746 "sd_retry_command: immed. retry hit " 13747 "throttle!\n"); 13748 } else { 13749 /* 13750 * We're clear to proceed with the immediate retry. 13751 * First call the user-provided function (if any) 13752 */ 13753 if (user_funcp != NULL) { 13754 (*user_funcp)(un, bp, user_arg, 13755 SD_IMMEDIATE_RETRY_ISSUED); 13756 #ifdef __lock_lint 13757 sd_print_incomplete_msg(un, bp, user_arg, 13758 SD_IMMEDIATE_RETRY_ISSUED); 13759 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13760 SD_IMMEDIATE_RETRY_ISSUED); 13761 sd_print_sense_failed_msg(un, bp, user_arg, 13762 SD_IMMEDIATE_RETRY_ISSUED); 13763 #endif 13764 } 13765 13766 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13767 "sd_retry_command: issuing immediate retry\n"); 13768 13769 /* 13770 * Call sd_start_cmds() to transport the command to 13771 * the target. 13772 */ 13773 sd_start_cmds(un, bp); 13774 13775 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13776 "sd_retry_command exit\n"); 13777 return; 13778 } 13779 } 13780 13781 /* 13782 * Set up to retry the command after a delay. 13783 * First call the user-provided function (if any) 13784 */ 13785 if (user_funcp != NULL) { 13786 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13787 } 13788 13789 sd_set_retry_bp(un, bp, retry_delay, statp); 13790 13791 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13792 return; 13793 13794 fail_command: 13795 13796 if (user_funcp != NULL) { 13797 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13798 } 13799 13800 fail_command_no_log: 13801 13802 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13803 "sd_retry_command: returning failed command\n"); 13804 13805 sd_return_failed_command(un, bp, failure_code); 13806 13807 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13808 } 13809 13810 13811 /* 13812 * Function: sd_set_retry_bp 13813 * 13814 * Description: Set up the given bp for retry. 13815 * 13816 * Arguments: un - ptr to associated softstate 13817 * bp - ptr to buf(9S) for the command 13818 * retry_delay - time interval before issuing retry (may be 0) 13819 * statp - optional pointer to kstat function 13820 * 13821 * Context: May be called under interrupt context 13822 */ 13823 13824 static void 13825 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13826 void (*statp)(kstat_io_t *)) 13827 { 13828 ASSERT(un != NULL); 13829 ASSERT(mutex_owned(SD_MUTEX(un))); 13830 ASSERT(bp != NULL); 13831 13832 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13833 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13834 13835 /* 13836 * Indicate that the command is being retried. This will not allow any 13837 * other commands on the wait queue to be transported to the target 13838 * until this command has been completed (success or failure). The 13839 * "retry command" is not transported to the target until the given 13840 * time delay expires, unless the user specified a 0 retry_delay. 13841 * 13842 * Note: the timeout(9F) callback routine is what actually calls 13843 * sd_start_cmds() to transport the command, with the exception of a 13844 * zero retry_delay. The only current implementor of a zero retry delay 13845 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13846 */ 13847 if (un->un_retry_bp == NULL) { 13848 ASSERT(un->un_retry_statp == NULL); 13849 un->un_retry_bp = bp; 13850 13851 /* 13852 * If the user has not specified a delay the command should 13853 * be queued and no timeout should be scheduled. 13854 */ 13855 if (retry_delay == 0) { 13856 /* 13857 * Save the kstat pointer that will be used in the 13858 * call to SD_UPDATE_KSTATS() below, so that 13859 * sd_start_cmds() can correctly decrement the waitq 13860 * count when it is time to transport this command. 13861 */ 13862 un->un_retry_statp = statp; 13863 goto done; 13864 } 13865 } 13866 13867 if (un->un_retry_bp == bp) { 13868 /* 13869 * Save the kstat pointer that will be used in the call to 13870 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13871 * correctly decrement the waitq count when it is time to 13872 * transport this command. 13873 */ 13874 un->un_retry_statp = statp; 13875 13876 /* 13877 * Schedule a timeout if: 13878 * 1) The user has specified a delay. 13879 * 2) There is not a START_STOP_UNIT callback pending. 13880 * 13881 * If no delay has been specified, then it is up to the caller 13882 * to ensure that IO processing continues without stalling. 13883 * Effectively, this means that the caller will issue the 13884 * required call to sd_start_cmds(). The START_STOP_UNIT 13885 * callback does this after the START STOP UNIT command has 13886 * completed. In either of these cases we should not schedule 13887 * a timeout callback here. Also don't schedule the timeout if 13888 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13889 */ 13890 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13891 (un->un_direct_priority_timeid == NULL)) { 13892 un->un_retry_timeid = 13893 timeout(sd_start_retry_command, un, retry_delay); 13894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13895 "sd_set_retry_bp: setting timeout: un: 0x%p" 13896 " bp:0x%p un_retry_timeid:0x%p\n", 13897 un, bp, un->un_retry_timeid); 13898 } 13899 } else { 13900 /* 13901 * We only get in here if there is already another command 13902 * waiting to be retried. In this case, we just put the 13903 * given command onto the wait queue, so it can be transported 13904 * after the current retry command has completed. 13905 * 13906 * Also we have to make sure that if the command at the head 13907 * of the wait queue is the un_failfast_bp, that we do not 13908 * put ahead of it any other commands that are to be retried. 13909 */ 13910 if ((un->un_failfast_bp != NULL) && 13911 (un->un_failfast_bp == un->un_waitq_headp)) { 13912 /* 13913 * Enqueue this command AFTER the first command on 13914 * the wait queue (which is also un_failfast_bp). 13915 */ 13916 bp->av_forw = un->un_waitq_headp->av_forw; 13917 un->un_waitq_headp->av_forw = bp; 13918 if (un->un_waitq_headp == un->un_waitq_tailp) { 13919 un->un_waitq_tailp = bp; 13920 } 13921 } else { 13922 /* Enqueue this command at the head of the waitq. */ 13923 bp->av_forw = un->un_waitq_headp; 13924 un->un_waitq_headp = bp; 13925 if (un->un_waitq_tailp == NULL) { 13926 un->un_waitq_tailp = bp; 13927 } 13928 } 13929 13930 if (statp == NULL) { 13931 statp = kstat_waitq_enter; 13932 } 13933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13934 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13935 } 13936 13937 done: 13938 if (statp != NULL) { 13939 SD_UPDATE_KSTATS(un, statp, bp); 13940 } 13941 13942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13943 "sd_set_retry_bp: exit un:0x%p\n", un); 13944 } 13945 13946 13947 /* 13948 * Function: sd_start_retry_command 13949 * 13950 * Description: Start the command that has been waiting on the target's 13951 * retry queue. Called from timeout(9F) context after the 13952 * retry delay interval has expired. 13953 * 13954 * Arguments: arg - pointer to associated softstate for the device. 13955 * 13956 * Context: timeout(9F) thread context. May not sleep. 13957 */ 13958 13959 static void 13960 sd_start_retry_command(void *arg) 13961 { 13962 struct sd_lun *un = arg; 13963 13964 ASSERT(un != NULL); 13965 ASSERT(!mutex_owned(SD_MUTEX(un))); 13966 13967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13968 "sd_start_retry_command: entry\n"); 13969 13970 mutex_enter(SD_MUTEX(un)); 13971 13972 un->un_retry_timeid = NULL; 13973 13974 if (un->un_retry_bp != NULL) { 13975 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13976 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13977 un, un->un_retry_bp); 13978 sd_start_cmds(un, un->un_retry_bp); 13979 } 13980 13981 mutex_exit(SD_MUTEX(un)); 13982 13983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13984 "sd_start_retry_command: exit\n"); 13985 } 13986 13987 13988 /* 13989 * Function: sd_start_direct_priority_command 13990 * 13991 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13992 * received TRAN_BUSY when we called scsi_transport() to send it 13993 * to the underlying HBA. This function is called from timeout(9F) 13994 * context after the delay interval has expired. 13995 * 13996 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13997 * 13998 * Context: timeout(9F) thread context. May not sleep. 13999 */ 14000 14001 static void 14002 sd_start_direct_priority_command(void *arg) 14003 { 14004 struct buf *priority_bp = arg; 14005 struct sd_lun *un; 14006 14007 ASSERT(priority_bp != NULL); 14008 un = SD_GET_UN(priority_bp); 14009 ASSERT(un != NULL); 14010 ASSERT(!mutex_owned(SD_MUTEX(un))); 14011 14012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14013 "sd_start_direct_priority_command: entry\n"); 14014 14015 mutex_enter(SD_MUTEX(un)); 14016 un->un_direct_priority_timeid = NULL; 14017 sd_start_cmds(un, priority_bp); 14018 mutex_exit(SD_MUTEX(un)); 14019 14020 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14021 "sd_start_direct_priority_command: exit\n"); 14022 } 14023 14024 14025 /* 14026 * Function: sd_send_request_sense_command 14027 * 14028 * Description: Sends a REQUEST SENSE command to the target 14029 * 14030 * Context: May be called from interrupt context. 14031 */ 14032 14033 static void 14034 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14035 struct scsi_pkt *pktp) 14036 { 14037 ASSERT(bp != NULL); 14038 ASSERT(un != NULL); 14039 ASSERT(mutex_owned(SD_MUTEX(un))); 14040 14041 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14042 "entry: buf:0x%p\n", bp); 14043 14044 /* 14045 * If we are syncing or dumping, then fail the command to avoid a 14046 * recursive callback into scsi_transport(). Also fail the command 14047 * if we are suspended (legacy behavior). 14048 */ 14049 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14050 (un->un_state == SD_STATE_DUMPING)) { 14051 sd_return_failed_command(un, bp, EIO); 14052 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14053 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14054 return; 14055 } 14056 14057 /* 14058 * Retry the failed command and don't issue the request sense if: 14059 * 1) the sense buf is busy 14060 * 2) we have 1 or more outstanding commands on the target 14061 * (the sense data will be cleared or invalidated any way) 14062 * 14063 * Note: There could be an issue with not checking a retry limit here, 14064 * the problem is determining which retry limit to check. 14065 */ 14066 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14067 /* Don't retry if the command is flagged as non-retryable */ 14068 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14069 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14070 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14072 "sd_send_request_sense_command: " 14073 "at full throttle, retrying exit\n"); 14074 } else { 14075 sd_return_failed_command(un, bp, EIO); 14076 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14077 "sd_send_request_sense_command: " 14078 "at full throttle, non-retryable exit\n"); 14079 } 14080 return; 14081 } 14082 14083 sd_mark_rqs_busy(un, bp); 14084 sd_start_cmds(un, un->un_rqs_bp); 14085 14086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14087 "sd_send_request_sense_command: exit\n"); 14088 } 14089 14090 14091 /* 14092 * Function: sd_mark_rqs_busy 14093 * 14094 * Description: Indicate that the request sense bp for this instance is 14095 * in use. 14096 * 14097 * Context: May be called under interrupt context 14098 */ 14099 14100 static void 14101 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14102 { 14103 struct sd_xbuf *sense_xp; 14104 14105 ASSERT(un != NULL); 14106 ASSERT(bp != NULL); 14107 ASSERT(mutex_owned(SD_MUTEX(un))); 14108 ASSERT(un->un_sense_isbusy == 0); 14109 14110 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14111 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14112 14113 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14114 ASSERT(sense_xp != NULL); 14115 14116 SD_INFO(SD_LOG_IO, un, 14117 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14118 14119 ASSERT(sense_xp->xb_pktp != NULL); 14120 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14121 == (FLAG_SENSING | FLAG_HEAD)); 14122 14123 un->un_sense_isbusy = 1; 14124 un->un_rqs_bp->b_resid = 0; 14125 sense_xp->xb_pktp->pkt_resid = 0; 14126 sense_xp->xb_pktp->pkt_reason = 0; 14127 14128 /* So we can get back the bp at interrupt time! */ 14129 sense_xp->xb_sense_bp = bp; 14130 14131 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14132 14133 /* 14134 * Mark this buf as awaiting sense data. (This is already set in 14135 * the pkt_flags for the RQS packet.) 14136 */ 14137 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14138 14139 sense_xp->xb_retry_count = 0; 14140 sense_xp->xb_victim_retry_count = 0; 14141 sense_xp->xb_ua_retry_count = 0; 14142 sense_xp->xb_dma_resid = 0; 14143 14144 /* Clean up the fields for auto-request sense */ 14145 sense_xp->xb_sense_status = 0; 14146 sense_xp->xb_sense_state = 0; 14147 sense_xp->xb_sense_resid = 0; 14148 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14149 14150 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14151 } 14152 14153 14154 /* 14155 * Function: sd_mark_rqs_idle 14156 * 14157 * Description: SD_MUTEX must be held continuously through this routine 14158 * to prevent reuse of the rqs struct before the caller can 14159 * complete it's processing. 14160 * 14161 * Return Code: Pointer to the RQS buf 14162 * 14163 * Context: May be called under interrupt context 14164 */ 14165 14166 static struct buf * 14167 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14168 { 14169 struct buf *bp; 14170 ASSERT(un != NULL); 14171 ASSERT(sense_xp != NULL); 14172 ASSERT(mutex_owned(SD_MUTEX(un))); 14173 ASSERT(un->un_sense_isbusy != 0); 14174 14175 un->un_sense_isbusy = 0; 14176 bp = sense_xp->xb_sense_bp; 14177 sense_xp->xb_sense_bp = NULL; 14178 14179 /* This pkt is no longer interested in getting sense data */ 14180 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14181 14182 return (bp); 14183 } 14184 14185 14186 14187 /* 14188 * Function: sd_alloc_rqs 14189 * 14190 * Description: Set up the unit to receive auto request sense data 14191 * 14192 * Return Code: DDI_SUCCESS or DDI_FAILURE 14193 * 14194 * Context: Called under attach(9E) context 14195 */ 14196 14197 static int 14198 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14199 { 14200 struct sd_xbuf *xp; 14201 14202 ASSERT(un != NULL); 14203 ASSERT(!mutex_owned(SD_MUTEX(un))); 14204 ASSERT(un->un_rqs_bp == NULL); 14205 ASSERT(un->un_rqs_pktp == NULL); 14206 14207 /* 14208 * First allocate the required buf and scsi_pkt structs, then set up 14209 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14210 */ 14211 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14212 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14213 if (un->un_rqs_bp == NULL) { 14214 return (DDI_FAILURE); 14215 } 14216 14217 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14218 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14219 14220 if (un->un_rqs_pktp == NULL) { 14221 sd_free_rqs(un); 14222 return (DDI_FAILURE); 14223 } 14224 14225 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14226 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14227 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14228 14229 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14230 14231 /* Set up the other needed members in the ARQ scsi_pkt. */ 14232 un->un_rqs_pktp->pkt_comp = sdintr; 14233 un->un_rqs_pktp->pkt_time = sd_io_time; 14234 un->un_rqs_pktp->pkt_flags |= 14235 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14236 14237 /* 14238 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14239 * provide any intpkt, destroypkt routines as we take care of 14240 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14241 */ 14242 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14243 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14244 xp->xb_pktp = un->un_rqs_pktp; 14245 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14246 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14247 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14248 14249 /* 14250 * Save the pointer to the request sense private bp so it can 14251 * be retrieved in sdintr. 14252 */ 14253 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14254 ASSERT(un->un_rqs_bp->b_private == xp); 14255 14256 /* 14257 * See if the HBA supports auto-request sense for the specified 14258 * target/lun. If it does, then try to enable it (if not already 14259 * enabled). 14260 * 14261 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14262 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14263 * return success. However, in both of these cases ARQ is always 14264 * enabled and scsi_ifgetcap will always return true. The best approach 14265 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14266 * 14267 * The 3rd case is the HBA (adp) always return enabled on 14268 * scsi_ifgetgetcap even when it's not enable, the best approach 14269 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14270 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14271 */ 14272 14273 if (un->un_f_is_fibre == TRUE) { 14274 un->un_f_arq_enabled = TRUE; 14275 } else { 14276 #if defined(__i386) || defined(__amd64) 14277 /* 14278 * Circumvent the Adaptec bug, remove this code when 14279 * the bug is fixed 14280 */ 14281 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14282 #endif 14283 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14284 case 0: 14285 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14286 "sd_alloc_rqs: HBA supports ARQ\n"); 14287 /* 14288 * ARQ is supported by this HBA but currently is not 14289 * enabled. Attempt to enable it and if successful then 14290 * mark this instance as ARQ enabled. 14291 */ 14292 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14293 == 1) { 14294 /* Successfully enabled ARQ in the HBA */ 14295 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14296 "sd_alloc_rqs: ARQ enabled\n"); 14297 un->un_f_arq_enabled = TRUE; 14298 } else { 14299 /* Could not enable ARQ in the HBA */ 14300 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14301 "sd_alloc_rqs: failed ARQ enable\n"); 14302 un->un_f_arq_enabled = FALSE; 14303 } 14304 break; 14305 case 1: 14306 /* 14307 * ARQ is supported by this HBA and is already enabled. 14308 * Just mark ARQ as enabled for this instance. 14309 */ 14310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14311 "sd_alloc_rqs: ARQ already enabled\n"); 14312 un->un_f_arq_enabled = TRUE; 14313 break; 14314 default: 14315 /* 14316 * ARQ is not supported by this HBA; disable it for this 14317 * instance. 14318 */ 14319 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14320 "sd_alloc_rqs: HBA does not support ARQ\n"); 14321 un->un_f_arq_enabled = FALSE; 14322 break; 14323 } 14324 } 14325 14326 return (DDI_SUCCESS); 14327 } 14328 14329 14330 /* 14331 * Function: sd_free_rqs 14332 * 14333 * Description: Cleanup for the pre-instance RQS command. 14334 * 14335 * Context: Kernel thread context 14336 */ 14337 14338 static void 14339 sd_free_rqs(struct sd_lun *un) 14340 { 14341 ASSERT(un != NULL); 14342 14343 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14344 14345 /* 14346 * If consistent memory is bound to a scsi_pkt, the pkt 14347 * has to be destroyed *before* freeing the consistent memory. 14348 * Don't change the sequence of this operations. 14349 * scsi_destroy_pkt() might access memory, which isn't allowed, 14350 * after it was freed in scsi_free_consistent_buf(). 14351 */ 14352 if (un->un_rqs_pktp != NULL) { 14353 scsi_destroy_pkt(un->un_rqs_pktp); 14354 un->un_rqs_pktp = NULL; 14355 } 14356 14357 if (un->un_rqs_bp != NULL) { 14358 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14359 scsi_free_consistent_buf(un->un_rqs_bp); 14360 un->un_rqs_bp = NULL; 14361 } 14362 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14363 } 14364 14365 14366 14367 /* 14368 * Function: sd_reduce_throttle 14369 * 14370 * Description: Reduces the maximum # of outstanding commands on a 14371 * target to the current number of outstanding commands. 14372 * Queues a tiemout(9F) callback to restore the limit 14373 * after a specified interval has elapsed. 14374 * Typically used when we get a TRAN_BUSY return code 14375 * back from scsi_transport(). 14376 * 14377 * Arguments: un - ptr to the sd_lun softstate struct 14378 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14379 * 14380 * Context: May be called from interrupt context 14381 */ 14382 14383 static void 14384 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14385 { 14386 ASSERT(un != NULL); 14387 ASSERT(mutex_owned(SD_MUTEX(un))); 14388 ASSERT(un->un_ncmds_in_transport >= 0); 14389 14390 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14391 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14392 un, un->un_throttle, un->un_ncmds_in_transport); 14393 14394 if (un->un_throttle > 1) { 14395 if (un->un_f_use_adaptive_throttle == TRUE) { 14396 switch (throttle_type) { 14397 case SD_THROTTLE_TRAN_BUSY: 14398 if (un->un_busy_throttle == 0) { 14399 un->un_busy_throttle = un->un_throttle; 14400 } 14401 break; 14402 case SD_THROTTLE_QFULL: 14403 un->un_busy_throttle = 0; 14404 break; 14405 default: 14406 ASSERT(FALSE); 14407 } 14408 14409 if (un->un_ncmds_in_transport > 0) { 14410 un->un_throttle = un->un_ncmds_in_transport; 14411 } 14412 14413 } else { 14414 if (un->un_ncmds_in_transport == 0) { 14415 un->un_throttle = 1; 14416 } else { 14417 un->un_throttle = un->un_ncmds_in_transport; 14418 } 14419 } 14420 } 14421 14422 /* Reschedule the timeout if none is currently active */ 14423 if (un->un_reset_throttle_timeid == NULL) { 14424 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14425 un, SD_THROTTLE_RESET_INTERVAL); 14426 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14427 "sd_reduce_throttle: timeout scheduled!\n"); 14428 } 14429 14430 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14431 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14432 } 14433 14434 14435 14436 /* 14437 * Function: sd_restore_throttle 14438 * 14439 * Description: Callback function for timeout(9F). Resets the current 14440 * value of un->un_throttle to its default. 14441 * 14442 * Arguments: arg - pointer to associated softstate for the device. 14443 * 14444 * Context: May be called from interrupt context 14445 */ 14446 14447 static void 14448 sd_restore_throttle(void *arg) 14449 { 14450 struct sd_lun *un = arg; 14451 14452 ASSERT(un != NULL); 14453 ASSERT(!mutex_owned(SD_MUTEX(un))); 14454 14455 mutex_enter(SD_MUTEX(un)); 14456 14457 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14458 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14459 14460 un->un_reset_throttle_timeid = NULL; 14461 14462 if (un->un_f_use_adaptive_throttle == TRUE) { 14463 /* 14464 * If un_busy_throttle is nonzero, then it contains the 14465 * value that un_throttle was when we got a TRAN_BUSY back 14466 * from scsi_transport(). We want to revert back to this 14467 * value. 14468 * 14469 * In the QFULL case, the throttle limit will incrementally 14470 * increase until it reaches max throttle. 14471 */ 14472 if (un->un_busy_throttle > 0) { 14473 un->un_throttle = un->un_busy_throttle; 14474 un->un_busy_throttle = 0; 14475 } else { 14476 /* 14477 * increase throttle by 10% open gate slowly, schedule 14478 * another restore if saved throttle has not been 14479 * reached 14480 */ 14481 short throttle; 14482 if (sd_qfull_throttle_enable) { 14483 throttle = un->un_throttle + 14484 max((un->un_throttle / 10), 1); 14485 un->un_throttle = 14486 (throttle < un->un_saved_throttle) ? 14487 throttle : un->un_saved_throttle; 14488 if (un->un_throttle < un->un_saved_throttle) { 14489 un->un_reset_throttle_timeid = 14490 timeout(sd_restore_throttle, 14491 un, 14492 SD_QFULL_THROTTLE_RESET_INTERVAL); 14493 } 14494 } 14495 } 14496 14497 /* 14498 * If un_throttle has fallen below the low-water mark, we 14499 * restore the maximum value here (and allow it to ratchet 14500 * down again if necessary). 14501 */ 14502 if (un->un_throttle < un->un_min_throttle) { 14503 un->un_throttle = un->un_saved_throttle; 14504 } 14505 } else { 14506 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14507 "restoring limit from 0x%x to 0x%x\n", 14508 un->un_throttle, un->un_saved_throttle); 14509 un->un_throttle = un->un_saved_throttle; 14510 } 14511 14512 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14513 "sd_restore_throttle: calling sd_start_cmds!\n"); 14514 14515 sd_start_cmds(un, NULL); 14516 14517 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14518 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14519 un, un->un_throttle); 14520 14521 mutex_exit(SD_MUTEX(un)); 14522 14523 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14524 } 14525 14526 /* 14527 * Function: sdrunout 14528 * 14529 * Description: Callback routine for scsi_init_pkt when a resource allocation 14530 * fails. 14531 * 14532 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14533 * soft state instance. 14534 * 14535 * Return Code: The scsi_init_pkt routine allows for the callback function to 14536 * return a 0 indicating the callback should be rescheduled or a 1 14537 * indicating not to reschedule. This routine always returns 1 14538 * because the driver always provides a callback function to 14539 * scsi_init_pkt. This results in a callback always being scheduled 14540 * (via the scsi_init_pkt callback implementation) if a resource 14541 * failure occurs. 14542 * 14543 * Context: This callback function may not block or call routines that block 14544 * 14545 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14546 * request persisting at the head of the list which cannot be 14547 * satisfied even after multiple retries. In the future the driver 14548 * may implement some time of maximum runout count before failing 14549 * an I/O. 14550 */ 14551 14552 static int 14553 sdrunout(caddr_t arg) 14554 { 14555 struct sd_lun *un = (struct sd_lun *)arg; 14556 14557 ASSERT(un != NULL); 14558 ASSERT(!mutex_owned(SD_MUTEX(un))); 14559 14560 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14561 14562 mutex_enter(SD_MUTEX(un)); 14563 sd_start_cmds(un, NULL); 14564 mutex_exit(SD_MUTEX(un)); 14565 /* 14566 * This callback routine always returns 1 (i.e. do not reschedule) 14567 * because we always specify sdrunout as the callback handler for 14568 * scsi_init_pkt inside the call to sd_start_cmds. 14569 */ 14570 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14571 return (1); 14572 } 14573 14574 14575 /* 14576 * Function: sdintr 14577 * 14578 * Description: Completion callback routine for scsi_pkt(9S) structs 14579 * sent to the HBA driver via scsi_transport(9F). 14580 * 14581 * Context: Interrupt context 14582 */ 14583 14584 static void 14585 sdintr(struct scsi_pkt *pktp) 14586 { 14587 struct buf *bp; 14588 struct sd_xbuf *xp; 14589 struct sd_lun *un; 14590 14591 ASSERT(pktp != NULL); 14592 bp = (struct buf *)pktp->pkt_private; 14593 ASSERT(bp != NULL); 14594 xp = SD_GET_XBUF(bp); 14595 ASSERT(xp != NULL); 14596 ASSERT(xp->xb_pktp != NULL); 14597 un = SD_GET_UN(bp); 14598 ASSERT(un != NULL); 14599 ASSERT(!mutex_owned(SD_MUTEX(un))); 14600 14601 #ifdef SD_FAULT_INJECTION 14602 14603 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14604 /* SD FaultInjection */ 14605 sd_faultinjection(pktp); 14606 14607 #endif /* SD_FAULT_INJECTION */ 14608 14609 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14610 " xp:0x%p, un:0x%p\n", bp, xp, un); 14611 14612 mutex_enter(SD_MUTEX(un)); 14613 14614 /* Reduce the count of the #commands currently in transport */ 14615 un->un_ncmds_in_transport--; 14616 ASSERT(un->un_ncmds_in_transport >= 0); 14617 14618 /* Increment counter to indicate that the callback routine is active */ 14619 un->un_in_callback++; 14620 14621 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14622 14623 #ifdef SDDEBUG 14624 if (bp == un->un_retry_bp) { 14625 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14626 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14627 un, un->un_retry_bp, un->un_ncmds_in_transport); 14628 } 14629 #endif 14630 14631 /* 14632 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14633 * state if needed. 14634 */ 14635 if (pktp->pkt_reason == CMD_DEV_GONE) { 14636 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14637 "Device is gone\n"); 14638 if (un->un_mediastate != DKIO_DEV_GONE) { 14639 un->un_mediastate = DKIO_DEV_GONE; 14640 cv_broadcast(&un->un_state_cv); 14641 } 14642 sd_return_failed_command(un, bp, EIO); 14643 goto exit; 14644 } 14645 14646 /* 14647 * First see if the pkt has auto-request sense data with it.... 14648 * Look at the packet state first so we don't take a performance 14649 * hit looking at the arq enabled flag unless absolutely necessary. 14650 */ 14651 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14652 (un->un_f_arq_enabled == TRUE)) { 14653 /* 14654 * The HBA did an auto request sense for this command so check 14655 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14656 * driver command that should not be retried. 14657 */ 14658 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14659 /* 14660 * Save the relevant sense info into the xp for the 14661 * original cmd. 14662 */ 14663 struct scsi_arq_status *asp; 14664 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14665 xp->xb_sense_status = 14666 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14667 xp->xb_sense_state = asp->sts_rqpkt_state; 14668 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14669 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14670 min(sizeof (struct scsi_extended_sense), 14671 SENSE_LENGTH)); 14672 14673 /* fail the command */ 14674 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14675 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14676 sd_return_failed_command(un, bp, EIO); 14677 goto exit; 14678 } 14679 14680 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14681 /* 14682 * We want to either retry or fail this command, so free 14683 * the DMA resources here. If we retry the command then 14684 * the DMA resources will be reallocated in sd_start_cmds(). 14685 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14686 * causes the *entire* transfer to start over again from the 14687 * beginning of the request, even for PARTIAL chunks that 14688 * have already transferred successfully. 14689 */ 14690 if ((un->un_f_is_fibre == TRUE) && 14691 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14692 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14693 scsi_dmafree(pktp); 14694 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14695 } 14696 #endif 14697 14698 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14699 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14700 14701 sd_handle_auto_request_sense(un, bp, xp, pktp); 14702 goto exit; 14703 } 14704 14705 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14706 if (pktp->pkt_flags & FLAG_SENSING) { 14707 /* This pktp is from the unit's REQUEST_SENSE command */ 14708 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14709 "sdintr: sd_handle_request_sense\n"); 14710 sd_handle_request_sense(un, bp, xp, pktp); 14711 goto exit; 14712 } 14713 14714 /* 14715 * Check to see if the command successfully completed as requested; 14716 * this is the most common case (and also the hot performance path). 14717 * 14718 * Requirements for successful completion are: 14719 * pkt_reason is CMD_CMPLT and packet status is status good. 14720 * In addition: 14721 * - A residual of zero indicates successful completion no matter what 14722 * the command is. 14723 * - If the residual is not zero and the command is not a read or 14724 * write, then it's still defined as successful completion. In other 14725 * words, if the command is a read or write the residual must be 14726 * zero for successful completion. 14727 * - If the residual is not zero and the command is a read or 14728 * write, and it's a USCSICMD, then it's still defined as 14729 * successful completion. 14730 */ 14731 if ((pktp->pkt_reason == CMD_CMPLT) && 14732 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14733 14734 /* 14735 * Since this command is returned with a good status, we 14736 * can reset the count for Sonoma failover. 14737 */ 14738 un->un_sonoma_failure_count = 0; 14739 14740 /* 14741 * Return all USCSI commands on good status 14742 */ 14743 if (pktp->pkt_resid == 0) { 14744 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14745 "sdintr: returning command for resid == 0\n"); 14746 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14747 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14748 SD_UPDATE_B_RESID(bp, pktp); 14749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14750 "sdintr: returning command for resid != 0\n"); 14751 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14752 SD_UPDATE_B_RESID(bp, pktp); 14753 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14754 "sdintr: returning uscsi command\n"); 14755 } else { 14756 goto not_successful; 14757 } 14758 sd_return_command(un, bp); 14759 14760 /* 14761 * Decrement counter to indicate that the callback routine 14762 * is done. 14763 */ 14764 un->un_in_callback--; 14765 ASSERT(un->un_in_callback >= 0); 14766 mutex_exit(SD_MUTEX(un)); 14767 14768 return; 14769 } 14770 14771 not_successful: 14772 14773 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14774 /* 14775 * The following is based upon knowledge of the underlying transport 14776 * and its use of DMA resources. This code should be removed when 14777 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14778 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14779 * and sd_start_cmds(). 14780 * 14781 * Free any DMA resources associated with this command if there 14782 * is a chance it could be retried or enqueued for later retry. 14783 * If we keep the DMA binding then mpxio cannot reissue the 14784 * command on another path whenever a path failure occurs. 14785 * 14786 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14787 * causes the *entire* transfer to start over again from the 14788 * beginning of the request, even for PARTIAL chunks that 14789 * have already transferred successfully. 14790 * 14791 * This is only done for non-uscsi commands (and also skipped for the 14792 * driver's internal RQS command). Also just do this for Fibre Channel 14793 * devices as these are the only ones that support mpxio. 14794 */ 14795 if ((un->un_f_is_fibre == TRUE) && 14796 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14797 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14798 scsi_dmafree(pktp); 14799 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14800 } 14801 #endif 14802 14803 /* 14804 * The command did not successfully complete as requested so check 14805 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14806 * driver command that should not be retried so just return. If 14807 * FLAG_DIAGNOSE is not set the error will be processed below. 14808 */ 14809 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14810 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14811 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14812 /* 14813 * Issue a request sense if a check condition caused the error 14814 * (we handle the auto request sense case above), otherwise 14815 * just fail the command. 14816 */ 14817 if ((pktp->pkt_reason == CMD_CMPLT) && 14818 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14819 sd_send_request_sense_command(un, bp, pktp); 14820 } else { 14821 sd_return_failed_command(un, bp, EIO); 14822 } 14823 goto exit; 14824 } 14825 14826 /* 14827 * The command did not successfully complete as requested so process 14828 * the error, retry, and/or attempt recovery. 14829 */ 14830 switch (pktp->pkt_reason) { 14831 case CMD_CMPLT: 14832 switch (SD_GET_PKT_STATUS(pktp)) { 14833 case STATUS_GOOD: 14834 /* 14835 * The command completed successfully with a non-zero 14836 * residual 14837 */ 14838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14839 "sdintr: STATUS_GOOD \n"); 14840 sd_pkt_status_good(un, bp, xp, pktp); 14841 break; 14842 14843 case STATUS_CHECK: 14844 case STATUS_TERMINATED: 14845 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14846 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14847 sd_pkt_status_check_condition(un, bp, xp, pktp); 14848 break; 14849 14850 case STATUS_BUSY: 14851 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14852 "sdintr: STATUS_BUSY\n"); 14853 sd_pkt_status_busy(un, bp, xp, pktp); 14854 break; 14855 14856 case STATUS_RESERVATION_CONFLICT: 14857 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14858 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14859 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14860 break; 14861 14862 case STATUS_QFULL: 14863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14864 "sdintr: STATUS_QFULL\n"); 14865 sd_pkt_status_qfull(un, bp, xp, pktp); 14866 break; 14867 14868 case STATUS_MET: 14869 case STATUS_INTERMEDIATE: 14870 case STATUS_SCSI2: 14871 case STATUS_INTERMEDIATE_MET: 14872 case STATUS_ACA_ACTIVE: 14873 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14874 "Unexpected SCSI status received: 0x%x\n", 14875 SD_GET_PKT_STATUS(pktp)); 14876 sd_return_failed_command(un, bp, EIO); 14877 break; 14878 14879 default: 14880 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14881 "Invalid SCSI status received: 0x%x\n", 14882 SD_GET_PKT_STATUS(pktp)); 14883 sd_return_failed_command(un, bp, EIO); 14884 break; 14885 14886 } 14887 break; 14888 14889 case CMD_INCOMPLETE: 14890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14891 "sdintr: CMD_INCOMPLETE\n"); 14892 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14893 break; 14894 case CMD_TRAN_ERR: 14895 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14896 "sdintr: CMD_TRAN_ERR\n"); 14897 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14898 break; 14899 case CMD_RESET: 14900 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14901 "sdintr: CMD_RESET \n"); 14902 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14903 break; 14904 case CMD_ABORTED: 14905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14906 "sdintr: CMD_ABORTED \n"); 14907 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14908 break; 14909 case CMD_TIMEOUT: 14910 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14911 "sdintr: CMD_TIMEOUT\n"); 14912 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14913 break; 14914 case CMD_UNX_BUS_FREE: 14915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14916 "sdintr: CMD_UNX_BUS_FREE \n"); 14917 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14918 break; 14919 case CMD_TAG_REJECT: 14920 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14921 "sdintr: CMD_TAG_REJECT\n"); 14922 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14923 break; 14924 default: 14925 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14926 "sdintr: default\n"); 14927 sd_pkt_reason_default(un, bp, xp, pktp); 14928 break; 14929 } 14930 14931 exit: 14932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14933 14934 /* Decrement counter to indicate that the callback routine is done. */ 14935 un->un_in_callback--; 14936 ASSERT(un->un_in_callback >= 0); 14937 14938 /* 14939 * At this point, the pkt has been dispatched, ie, it is either 14940 * being re-tried or has been returned to its caller and should 14941 * not be referenced. 14942 */ 14943 14944 mutex_exit(SD_MUTEX(un)); 14945 } 14946 14947 14948 /* 14949 * Function: sd_print_incomplete_msg 14950 * 14951 * Description: Prints the error message for a CMD_INCOMPLETE error. 14952 * 14953 * Arguments: un - ptr to associated softstate for the device. 14954 * bp - ptr to the buf(9S) for the command. 14955 * arg - message string ptr 14956 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14957 * or SD_NO_RETRY_ISSUED. 14958 * 14959 * Context: May be called under interrupt context 14960 */ 14961 14962 static void 14963 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14964 { 14965 struct scsi_pkt *pktp; 14966 char *msgp; 14967 char *cmdp = arg; 14968 14969 ASSERT(un != NULL); 14970 ASSERT(mutex_owned(SD_MUTEX(un))); 14971 ASSERT(bp != NULL); 14972 ASSERT(arg != NULL); 14973 pktp = SD_GET_PKTP(bp); 14974 ASSERT(pktp != NULL); 14975 14976 switch (code) { 14977 case SD_DELAYED_RETRY_ISSUED: 14978 case SD_IMMEDIATE_RETRY_ISSUED: 14979 msgp = "retrying"; 14980 break; 14981 case SD_NO_RETRY_ISSUED: 14982 default: 14983 msgp = "giving up"; 14984 break; 14985 } 14986 14987 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14988 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14989 "incomplete %s- %s\n", cmdp, msgp); 14990 } 14991 } 14992 14993 14994 14995 /* 14996 * Function: sd_pkt_status_good 14997 * 14998 * Description: Processing for a STATUS_GOOD code in pkt_status. 14999 * 15000 * Context: May be called under interrupt context 15001 */ 15002 15003 static void 15004 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15005 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15006 { 15007 char *cmdp; 15008 15009 ASSERT(un != NULL); 15010 ASSERT(mutex_owned(SD_MUTEX(un))); 15011 ASSERT(bp != NULL); 15012 ASSERT(xp != NULL); 15013 ASSERT(pktp != NULL); 15014 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15015 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15016 ASSERT(pktp->pkt_resid != 0); 15017 15018 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15019 15020 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15021 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15022 case SCMD_READ: 15023 cmdp = "read"; 15024 break; 15025 case SCMD_WRITE: 15026 cmdp = "write"; 15027 break; 15028 default: 15029 SD_UPDATE_B_RESID(bp, pktp); 15030 sd_return_command(un, bp); 15031 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15032 return; 15033 } 15034 15035 /* 15036 * See if we can retry the read/write, preferrably immediately. 15037 * If retries are exhaused, then sd_retry_command() will update 15038 * the b_resid count. 15039 */ 15040 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15041 cmdp, EIO, (clock_t)0, NULL); 15042 15043 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15044 } 15045 15046 15047 15048 15049 15050 /* 15051 * Function: sd_handle_request_sense 15052 * 15053 * Description: Processing for non-auto Request Sense command. 15054 * 15055 * Arguments: un - ptr to associated softstate 15056 * sense_bp - ptr to buf(9S) for the RQS command 15057 * sense_xp - ptr to the sd_xbuf for the RQS command 15058 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15059 * 15060 * Context: May be called under interrupt context 15061 */ 15062 15063 static void 15064 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15065 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15066 { 15067 struct buf *cmd_bp; /* buf for the original command */ 15068 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15069 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15070 15071 ASSERT(un != NULL); 15072 ASSERT(mutex_owned(SD_MUTEX(un))); 15073 ASSERT(sense_bp != NULL); 15074 ASSERT(sense_xp != NULL); 15075 ASSERT(sense_pktp != NULL); 15076 15077 /* 15078 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15079 * RQS command and not the original command. 15080 */ 15081 ASSERT(sense_pktp == un->un_rqs_pktp); 15082 ASSERT(sense_bp == un->un_rqs_bp); 15083 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15084 (FLAG_SENSING | FLAG_HEAD)); 15085 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15086 FLAG_SENSING) == FLAG_SENSING); 15087 15088 /* These are the bp, xp, and pktp for the original command */ 15089 cmd_bp = sense_xp->xb_sense_bp; 15090 cmd_xp = SD_GET_XBUF(cmd_bp); 15091 cmd_pktp = SD_GET_PKTP(cmd_bp); 15092 15093 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15094 /* 15095 * The REQUEST SENSE command failed. Release the REQUEST 15096 * SENSE command for re-use, get back the bp for the original 15097 * command, and attempt to re-try the original command if 15098 * FLAG_DIAGNOSE is not set in the original packet. 15099 */ 15100 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15101 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15102 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15103 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15104 NULL, NULL, EIO, (clock_t)0, NULL); 15105 return; 15106 } 15107 } 15108 15109 /* 15110 * Save the relevant sense info into the xp for the original cmd. 15111 * 15112 * Note: if the request sense failed the state info will be zero 15113 * as set in sd_mark_rqs_busy() 15114 */ 15115 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15116 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15117 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15118 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15119 15120 /* 15121 * Free up the RQS command.... 15122 * NOTE: 15123 * Must do this BEFORE calling sd_validate_sense_data! 15124 * sd_validate_sense_data may return the original command in 15125 * which case the pkt will be freed and the flags can no 15126 * longer be touched. 15127 * SD_MUTEX is held through this process until the command 15128 * is dispatched based upon the sense data, so there are 15129 * no race conditions. 15130 */ 15131 (void) sd_mark_rqs_idle(un, sense_xp); 15132 15133 /* 15134 * For a retryable command see if we have valid sense data, if so then 15135 * turn it over to sd_decode_sense() to figure out the right course of 15136 * action. Just fail a non-retryable command. 15137 */ 15138 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15139 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15140 SD_SENSE_DATA_IS_VALID) { 15141 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15142 } 15143 } else { 15144 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15145 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15146 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15147 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15148 sd_return_failed_command(un, cmd_bp, EIO); 15149 } 15150 } 15151 15152 15153 15154 15155 /* 15156 * Function: sd_handle_auto_request_sense 15157 * 15158 * Description: Processing for auto-request sense information. 15159 * 15160 * Arguments: un - ptr to associated softstate 15161 * bp - ptr to buf(9S) for the command 15162 * xp - ptr to the sd_xbuf for the command 15163 * pktp - ptr to the scsi_pkt(9S) for the command 15164 * 15165 * Context: May be called under interrupt context 15166 */ 15167 15168 static void 15169 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15170 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15171 { 15172 struct scsi_arq_status *asp; 15173 15174 ASSERT(un != NULL); 15175 ASSERT(mutex_owned(SD_MUTEX(un))); 15176 ASSERT(bp != NULL); 15177 ASSERT(xp != NULL); 15178 ASSERT(pktp != NULL); 15179 ASSERT(pktp != un->un_rqs_pktp); 15180 ASSERT(bp != un->un_rqs_bp); 15181 15182 /* 15183 * For auto-request sense, we get a scsi_arq_status back from 15184 * the HBA, with the sense data in the sts_sensedata member. 15185 * The pkt_scbp of the packet points to this scsi_arq_status. 15186 */ 15187 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15188 15189 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15190 /* 15191 * The auto REQUEST SENSE failed; see if we can re-try 15192 * the original command. 15193 */ 15194 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15195 "auto request sense failed (reason=%s)\n", 15196 scsi_rname(asp->sts_rqpkt_reason)); 15197 15198 sd_reset_target(un, pktp); 15199 15200 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15201 NULL, NULL, EIO, (clock_t)0, NULL); 15202 return; 15203 } 15204 15205 /* Save the relevant sense info into the xp for the original cmd. */ 15206 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15207 xp->xb_sense_state = asp->sts_rqpkt_state; 15208 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15209 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15210 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15211 15212 /* 15213 * See if we have valid sense data, if so then turn it over to 15214 * sd_decode_sense() to figure out the right course of action. 15215 */ 15216 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15217 sd_decode_sense(un, bp, xp, pktp); 15218 } 15219 } 15220 15221 15222 /* 15223 * Function: sd_print_sense_failed_msg 15224 * 15225 * Description: Print log message when RQS has failed. 15226 * 15227 * Arguments: un - ptr to associated softstate 15228 * bp - ptr to buf(9S) for the command 15229 * arg - generic message string ptr 15230 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15231 * or SD_NO_RETRY_ISSUED 15232 * 15233 * Context: May be called from interrupt context 15234 */ 15235 15236 static void 15237 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15238 int code) 15239 { 15240 char *msgp = arg; 15241 15242 ASSERT(un != NULL); 15243 ASSERT(mutex_owned(SD_MUTEX(un))); 15244 ASSERT(bp != NULL); 15245 15246 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15247 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15248 } 15249 } 15250 15251 15252 /* 15253 * Function: sd_validate_sense_data 15254 * 15255 * Description: Check the given sense data for validity. 15256 * If the sense data is not valid, the command will 15257 * be either failed or retried! 15258 * 15259 * Return Code: SD_SENSE_DATA_IS_INVALID 15260 * SD_SENSE_DATA_IS_VALID 15261 * 15262 * Context: May be called from interrupt context 15263 */ 15264 15265 static int 15266 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15267 { 15268 struct scsi_extended_sense *esp; 15269 struct scsi_pkt *pktp; 15270 size_t actual_len; 15271 char *msgp = NULL; 15272 15273 ASSERT(un != NULL); 15274 ASSERT(mutex_owned(SD_MUTEX(un))); 15275 ASSERT(bp != NULL); 15276 ASSERT(bp != un->un_rqs_bp); 15277 ASSERT(xp != NULL); 15278 15279 pktp = SD_GET_PKTP(bp); 15280 ASSERT(pktp != NULL); 15281 15282 /* 15283 * Check the status of the RQS command (auto or manual). 15284 */ 15285 switch (xp->xb_sense_status & STATUS_MASK) { 15286 case STATUS_GOOD: 15287 break; 15288 15289 case STATUS_RESERVATION_CONFLICT: 15290 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15291 return (SD_SENSE_DATA_IS_INVALID); 15292 15293 case STATUS_BUSY: 15294 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15295 "Busy Status on REQUEST SENSE\n"); 15296 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15297 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15298 return (SD_SENSE_DATA_IS_INVALID); 15299 15300 case STATUS_QFULL: 15301 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15302 "QFULL Status on REQUEST SENSE\n"); 15303 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15304 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15305 return (SD_SENSE_DATA_IS_INVALID); 15306 15307 case STATUS_CHECK: 15308 case STATUS_TERMINATED: 15309 msgp = "Check Condition on REQUEST SENSE\n"; 15310 goto sense_failed; 15311 15312 default: 15313 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15314 goto sense_failed; 15315 } 15316 15317 /* 15318 * See if we got the minimum required amount of sense data. 15319 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15320 * or less. 15321 */ 15322 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15323 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15324 (actual_len == 0)) { 15325 msgp = "Request Sense couldn't get sense data\n"; 15326 goto sense_failed; 15327 } 15328 15329 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15330 msgp = "Not enough sense information\n"; 15331 goto sense_failed; 15332 } 15333 15334 /* 15335 * We require the extended sense data 15336 */ 15337 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15338 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15339 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15340 static char tmp[8]; 15341 static char buf[148]; 15342 char *p = (char *)(xp->xb_sense_data); 15343 int i; 15344 15345 mutex_enter(&sd_sense_mutex); 15346 (void) strcpy(buf, "undecodable sense information:"); 15347 for (i = 0; i < actual_len; i++) { 15348 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15349 (void) strcpy(&buf[strlen(buf)], tmp); 15350 } 15351 i = strlen(buf); 15352 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15353 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15354 mutex_exit(&sd_sense_mutex); 15355 } 15356 /* Note: Legacy behavior, fail the command with no retry */ 15357 sd_return_failed_command(un, bp, EIO); 15358 return (SD_SENSE_DATA_IS_INVALID); 15359 } 15360 15361 /* 15362 * Check that es_code is valid (es_class concatenated with es_code 15363 * make up the "response code" field. es_class will always be 7, so 15364 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15365 * format. 15366 */ 15367 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15368 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15369 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15370 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15371 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15372 goto sense_failed; 15373 } 15374 15375 return (SD_SENSE_DATA_IS_VALID); 15376 15377 sense_failed: 15378 /* 15379 * If the request sense failed (for whatever reason), attempt 15380 * to retry the original command. 15381 */ 15382 #if defined(__i386) || defined(__amd64) 15383 /* 15384 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15385 * sddef.h for Sparc platform, and x86 uses 1 binary 15386 * for both SCSI/FC. 15387 * The SD_RETRY_DELAY value need to be adjusted here 15388 * when SD_RETRY_DELAY change in sddef.h 15389 */ 15390 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15391 sd_print_sense_failed_msg, msgp, EIO, 15392 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15393 #else 15394 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15395 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15396 #endif 15397 15398 return (SD_SENSE_DATA_IS_INVALID); 15399 } 15400 15401 15402 15403 /* 15404 * Function: sd_decode_sense 15405 * 15406 * Description: Take recovery action(s) when SCSI Sense Data is received. 15407 * 15408 * Context: Interrupt context. 15409 */ 15410 15411 static void 15412 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15413 struct scsi_pkt *pktp) 15414 { 15415 uint8_t sense_key; 15416 15417 ASSERT(un != NULL); 15418 ASSERT(mutex_owned(SD_MUTEX(un))); 15419 ASSERT(bp != NULL); 15420 ASSERT(bp != un->un_rqs_bp); 15421 ASSERT(xp != NULL); 15422 ASSERT(pktp != NULL); 15423 15424 sense_key = scsi_sense_key(xp->xb_sense_data); 15425 15426 switch (sense_key) { 15427 case KEY_NO_SENSE: 15428 sd_sense_key_no_sense(un, bp, xp, pktp); 15429 break; 15430 case KEY_RECOVERABLE_ERROR: 15431 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15432 bp, xp, pktp); 15433 break; 15434 case KEY_NOT_READY: 15435 sd_sense_key_not_ready(un, xp->xb_sense_data, 15436 bp, xp, pktp); 15437 break; 15438 case KEY_MEDIUM_ERROR: 15439 case KEY_HARDWARE_ERROR: 15440 sd_sense_key_medium_or_hardware_error(un, 15441 xp->xb_sense_data, bp, xp, pktp); 15442 break; 15443 case KEY_ILLEGAL_REQUEST: 15444 sd_sense_key_illegal_request(un, bp, xp, pktp); 15445 break; 15446 case KEY_UNIT_ATTENTION: 15447 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15448 bp, xp, pktp); 15449 break; 15450 case KEY_WRITE_PROTECT: 15451 case KEY_VOLUME_OVERFLOW: 15452 case KEY_MISCOMPARE: 15453 sd_sense_key_fail_command(un, bp, xp, pktp); 15454 break; 15455 case KEY_BLANK_CHECK: 15456 sd_sense_key_blank_check(un, bp, xp, pktp); 15457 break; 15458 case KEY_ABORTED_COMMAND: 15459 sd_sense_key_aborted_command(un, bp, xp, pktp); 15460 break; 15461 case KEY_VENDOR_UNIQUE: 15462 case KEY_COPY_ABORTED: 15463 case KEY_EQUAL: 15464 case KEY_RESERVED: 15465 default: 15466 sd_sense_key_default(un, xp->xb_sense_data, 15467 bp, xp, pktp); 15468 break; 15469 } 15470 } 15471 15472 15473 /* 15474 * Function: sd_dump_memory 15475 * 15476 * Description: Debug logging routine to print the contents of a user provided 15477 * buffer. The output of the buffer is broken up into 256 byte 15478 * segments due to a size constraint of the scsi_log. 15479 * implementation. 15480 * 15481 * Arguments: un - ptr to softstate 15482 * comp - component mask 15483 * title - "title" string to preceed data when printed 15484 * data - ptr to data block to be printed 15485 * len - size of data block to be printed 15486 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15487 * 15488 * Context: May be called from interrupt context 15489 */ 15490 15491 #define SD_DUMP_MEMORY_BUF_SIZE 256 15492 15493 static char *sd_dump_format_string[] = { 15494 " 0x%02x", 15495 " %c" 15496 }; 15497 15498 static void 15499 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15500 int len, int fmt) 15501 { 15502 int i, j; 15503 int avail_count; 15504 int start_offset; 15505 int end_offset; 15506 size_t entry_len; 15507 char *bufp; 15508 char *local_buf; 15509 char *format_string; 15510 15511 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15512 15513 /* 15514 * In the debug version of the driver, this function is called from a 15515 * number of places which are NOPs in the release driver. 15516 * The debug driver therefore has additional methods of filtering 15517 * debug output. 15518 */ 15519 #ifdef SDDEBUG 15520 /* 15521 * In the debug version of the driver we can reduce the amount of debug 15522 * messages by setting sd_error_level to something other than 15523 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15524 * sd_component_mask. 15525 */ 15526 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15527 (sd_error_level != SCSI_ERR_ALL)) { 15528 return; 15529 } 15530 if (((sd_component_mask & comp) == 0) || 15531 (sd_error_level != SCSI_ERR_ALL)) { 15532 return; 15533 } 15534 #else 15535 if (sd_error_level != SCSI_ERR_ALL) { 15536 return; 15537 } 15538 #endif 15539 15540 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15541 bufp = local_buf; 15542 /* 15543 * Available length is the length of local_buf[], minus the 15544 * length of the title string, minus one for the ":", minus 15545 * one for the newline, minus one for the NULL terminator. 15546 * This gives the #bytes available for holding the printed 15547 * values from the given data buffer. 15548 */ 15549 if (fmt == SD_LOG_HEX) { 15550 format_string = sd_dump_format_string[0]; 15551 } else /* SD_LOG_CHAR */ { 15552 format_string = sd_dump_format_string[1]; 15553 } 15554 /* 15555 * Available count is the number of elements from the given 15556 * data buffer that we can fit into the available length. 15557 * This is based upon the size of the format string used. 15558 * Make one entry and find it's size. 15559 */ 15560 (void) sprintf(bufp, format_string, data[0]); 15561 entry_len = strlen(bufp); 15562 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15563 15564 j = 0; 15565 while (j < len) { 15566 bufp = local_buf; 15567 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15568 start_offset = j; 15569 15570 end_offset = start_offset + avail_count; 15571 15572 (void) sprintf(bufp, "%s:", title); 15573 bufp += strlen(bufp); 15574 for (i = start_offset; ((i < end_offset) && (j < len)); 15575 i++, j++) { 15576 (void) sprintf(bufp, format_string, data[i]); 15577 bufp += entry_len; 15578 } 15579 (void) sprintf(bufp, "\n"); 15580 15581 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15582 } 15583 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15584 } 15585 15586 /* 15587 * Function: sd_print_sense_msg 15588 * 15589 * Description: Log a message based upon the given sense data. 15590 * 15591 * Arguments: un - ptr to associated softstate 15592 * bp - ptr to buf(9S) for the command 15593 * arg - ptr to associate sd_sense_info struct 15594 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15595 * or SD_NO_RETRY_ISSUED 15596 * 15597 * Context: May be called from interrupt context 15598 */ 15599 15600 static void 15601 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15602 { 15603 struct sd_xbuf *xp; 15604 struct scsi_pkt *pktp; 15605 uint8_t *sensep; 15606 daddr_t request_blkno; 15607 diskaddr_t err_blkno; 15608 int severity; 15609 int pfa_flag; 15610 extern struct scsi_key_strings scsi_cmds[]; 15611 15612 ASSERT(un != NULL); 15613 ASSERT(mutex_owned(SD_MUTEX(un))); 15614 ASSERT(bp != NULL); 15615 xp = SD_GET_XBUF(bp); 15616 ASSERT(xp != NULL); 15617 pktp = SD_GET_PKTP(bp); 15618 ASSERT(pktp != NULL); 15619 ASSERT(arg != NULL); 15620 15621 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15622 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15623 15624 if ((code == SD_DELAYED_RETRY_ISSUED) || 15625 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15626 severity = SCSI_ERR_RETRYABLE; 15627 } 15628 15629 /* Use absolute block number for the request block number */ 15630 request_blkno = xp->xb_blkno; 15631 15632 /* 15633 * Now try to get the error block number from the sense data 15634 */ 15635 sensep = xp->xb_sense_data; 15636 15637 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15638 (uint64_t *)&err_blkno)) { 15639 /* 15640 * We retrieved the error block number from the information 15641 * portion of the sense data. 15642 * 15643 * For USCSI commands we are better off using the error 15644 * block no. as the requested block no. (This is the best 15645 * we can estimate.) 15646 */ 15647 if ((SD_IS_BUFIO(xp) == FALSE) && 15648 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15649 request_blkno = err_blkno; 15650 } 15651 } else { 15652 /* 15653 * Without the es_valid bit set (for fixed format) or an 15654 * information descriptor (for descriptor format) we cannot 15655 * be certain of the error blkno, so just use the 15656 * request_blkno. 15657 */ 15658 err_blkno = (diskaddr_t)request_blkno; 15659 } 15660 15661 /* 15662 * The following will log the buffer contents for the release driver 15663 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15664 * level is set to verbose. 15665 */ 15666 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15667 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15668 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15669 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15670 15671 if (pfa_flag == FALSE) { 15672 /* This is normally only set for USCSI */ 15673 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15674 return; 15675 } 15676 15677 if ((SD_IS_BUFIO(xp) == TRUE) && 15678 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15679 (severity < sd_error_level))) { 15680 return; 15681 } 15682 } 15683 15684 /* 15685 * Check for Sonoma Failover and keep a count of how many failed I/O's 15686 */ 15687 if ((SD_IS_LSI(un)) && 15688 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15689 (scsi_sense_asc(sensep) == 0x94) && 15690 (scsi_sense_ascq(sensep) == 0x01)) { 15691 un->un_sonoma_failure_count++; 15692 if (un->un_sonoma_failure_count > 1) { 15693 return; 15694 } 15695 } 15696 15697 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15698 request_blkno, err_blkno, scsi_cmds, 15699 (struct scsi_extended_sense *)sensep, 15700 un->un_additional_codes, NULL); 15701 } 15702 15703 /* 15704 * Function: sd_sense_key_no_sense 15705 * 15706 * Description: Recovery action when sense data was not received. 15707 * 15708 * Context: May be called from interrupt context 15709 */ 15710 15711 static void 15712 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15713 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15714 { 15715 struct sd_sense_info si; 15716 15717 ASSERT(un != NULL); 15718 ASSERT(mutex_owned(SD_MUTEX(un))); 15719 ASSERT(bp != NULL); 15720 ASSERT(xp != NULL); 15721 ASSERT(pktp != NULL); 15722 15723 si.ssi_severity = SCSI_ERR_FATAL; 15724 si.ssi_pfa_flag = FALSE; 15725 15726 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15727 15728 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15729 &si, EIO, (clock_t)0, NULL); 15730 } 15731 15732 15733 /* 15734 * Function: sd_sense_key_recoverable_error 15735 * 15736 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15737 * 15738 * Context: May be called from interrupt context 15739 */ 15740 15741 static void 15742 sd_sense_key_recoverable_error(struct sd_lun *un, 15743 uint8_t *sense_datap, 15744 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15745 { 15746 struct sd_sense_info si; 15747 uint8_t asc = scsi_sense_asc(sense_datap); 15748 15749 ASSERT(un != NULL); 15750 ASSERT(mutex_owned(SD_MUTEX(un))); 15751 ASSERT(bp != NULL); 15752 ASSERT(xp != NULL); 15753 ASSERT(pktp != NULL); 15754 15755 /* 15756 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15757 */ 15758 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15759 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15760 si.ssi_severity = SCSI_ERR_INFO; 15761 si.ssi_pfa_flag = TRUE; 15762 } else { 15763 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15764 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15765 si.ssi_severity = SCSI_ERR_RECOVERED; 15766 si.ssi_pfa_flag = FALSE; 15767 } 15768 15769 if (pktp->pkt_resid == 0) { 15770 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15771 sd_return_command(un, bp); 15772 return; 15773 } 15774 15775 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15776 &si, EIO, (clock_t)0, NULL); 15777 } 15778 15779 15780 15781 15782 /* 15783 * Function: sd_sense_key_not_ready 15784 * 15785 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15786 * 15787 * Context: May be called from interrupt context 15788 */ 15789 15790 static void 15791 sd_sense_key_not_ready(struct sd_lun *un, 15792 uint8_t *sense_datap, 15793 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15794 { 15795 struct sd_sense_info si; 15796 uint8_t asc = scsi_sense_asc(sense_datap); 15797 uint8_t ascq = scsi_sense_ascq(sense_datap); 15798 15799 ASSERT(un != NULL); 15800 ASSERT(mutex_owned(SD_MUTEX(un))); 15801 ASSERT(bp != NULL); 15802 ASSERT(xp != NULL); 15803 ASSERT(pktp != NULL); 15804 15805 si.ssi_severity = SCSI_ERR_FATAL; 15806 si.ssi_pfa_flag = FALSE; 15807 15808 /* 15809 * Update error stats after first NOT READY error. Disks may have 15810 * been powered down and may need to be restarted. For CDROMs, 15811 * report NOT READY errors only if media is present. 15812 */ 15813 if ((ISCD(un) && (asc == 0x3A)) || 15814 (xp->xb_retry_count > 0)) { 15815 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15816 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15817 } 15818 15819 /* 15820 * Just fail if the "not ready" retry limit has been reached. 15821 */ 15822 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15823 /* Special check for error message printing for removables. */ 15824 if (un->un_f_has_removable_media && (asc == 0x04) && 15825 (ascq >= 0x04)) { 15826 si.ssi_severity = SCSI_ERR_ALL; 15827 } 15828 goto fail_command; 15829 } 15830 15831 /* 15832 * Check the ASC and ASCQ in the sense data as needed, to determine 15833 * what to do. 15834 */ 15835 switch (asc) { 15836 case 0x04: /* LOGICAL UNIT NOT READY */ 15837 /* 15838 * disk drives that don't spin up result in a very long delay 15839 * in format without warning messages. We will log a message 15840 * if the error level is set to verbose. 15841 */ 15842 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15843 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15844 "logical unit not ready, resetting disk\n"); 15845 } 15846 15847 /* 15848 * There are different requirements for CDROMs and disks for 15849 * the number of retries. If a CD-ROM is giving this, it is 15850 * probably reading TOC and is in the process of getting 15851 * ready, so we should keep on trying for a long time to make 15852 * sure that all types of media are taken in account (for 15853 * some media the drive takes a long time to read TOC). For 15854 * disks we do not want to retry this too many times as this 15855 * can cause a long hang in format when the drive refuses to 15856 * spin up (a very common failure). 15857 */ 15858 switch (ascq) { 15859 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15860 /* 15861 * Disk drives frequently refuse to spin up which 15862 * results in a very long hang in format without 15863 * warning messages. 15864 * 15865 * Note: This code preserves the legacy behavior of 15866 * comparing xb_retry_count against zero for fibre 15867 * channel targets instead of comparing against the 15868 * un_reset_retry_count value. The reason for this 15869 * discrepancy has been so utterly lost beneath the 15870 * Sands of Time that even Indiana Jones could not 15871 * find it. 15872 */ 15873 if (un->un_f_is_fibre == TRUE) { 15874 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15875 (xp->xb_retry_count > 0)) && 15876 (un->un_startstop_timeid == NULL)) { 15877 scsi_log(SD_DEVINFO(un), sd_label, 15878 CE_WARN, "logical unit not ready, " 15879 "resetting disk\n"); 15880 sd_reset_target(un, pktp); 15881 } 15882 } else { 15883 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15884 (xp->xb_retry_count > 15885 un->un_reset_retry_count)) && 15886 (un->un_startstop_timeid == NULL)) { 15887 scsi_log(SD_DEVINFO(un), sd_label, 15888 CE_WARN, "logical unit not ready, " 15889 "resetting disk\n"); 15890 sd_reset_target(un, pktp); 15891 } 15892 } 15893 break; 15894 15895 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15896 /* 15897 * If the target is in the process of becoming 15898 * ready, just proceed with the retry. This can 15899 * happen with CD-ROMs that take a long time to 15900 * read TOC after a power cycle or reset. 15901 */ 15902 goto do_retry; 15903 15904 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15905 break; 15906 15907 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15908 /* 15909 * Retries cannot help here so just fail right away. 15910 */ 15911 goto fail_command; 15912 15913 case 0x88: 15914 /* 15915 * Vendor-unique code for T3/T4: it indicates a 15916 * path problem in a mutipathed config, but as far as 15917 * the target driver is concerned it equates to a fatal 15918 * error, so we should just fail the command right away 15919 * (without printing anything to the console). If this 15920 * is not a T3/T4, fall thru to the default recovery 15921 * action. 15922 * T3/T4 is FC only, don't need to check is_fibre 15923 */ 15924 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15925 sd_return_failed_command(un, bp, EIO); 15926 return; 15927 } 15928 /* FALLTHRU */ 15929 15930 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15931 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15932 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15933 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15934 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15935 default: /* Possible future codes in SCSI spec? */ 15936 /* 15937 * For removable-media devices, do not retry if 15938 * ASCQ > 2 as these result mostly from USCSI commands 15939 * on MMC devices issued to check status of an 15940 * operation initiated in immediate mode. Also for 15941 * ASCQ >= 4 do not print console messages as these 15942 * mainly represent a user-initiated operation 15943 * instead of a system failure. 15944 */ 15945 if (un->un_f_has_removable_media) { 15946 si.ssi_severity = SCSI_ERR_ALL; 15947 goto fail_command; 15948 } 15949 break; 15950 } 15951 15952 /* 15953 * As part of our recovery attempt for the NOT READY 15954 * condition, we issue a START STOP UNIT command. However 15955 * we want to wait for a short delay before attempting this 15956 * as there may still be more commands coming back from the 15957 * target with the check condition. To do this we use 15958 * timeout(9F) to call sd_start_stop_unit_callback() after 15959 * the delay interval expires. (sd_start_stop_unit_callback() 15960 * dispatches sd_start_stop_unit_task(), which will issue 15961 * the actual START STOP UNIT command. The delay interval 15962 * is one-half of the delay that we will use to retry the 15963 * command that generated the NOT READY condition. 15964 * 15965 * Note that we could just dispatch sd_start_stop_unit_task() 15966 * from here and allow it to sleep for the delay interval, 15967 * but then we would be tying up the taskq thread 15968 * uncesessarily for the duration of the delay. 15969 * 15970 * Do not issue the START STOP UNIT if the current command 15971 * is already a START STOP UNIT. 15972 */ 15973 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15974 break; 15975 } 15976 15977 /* 15978 * Do not schedule the timeout if one is already pending. 15979 */ 15980 if (un->un_startstop_timeid != NULL) { 15981 SD_INFO(SD_LOG_ERROR, un, 15982 "sd_sense_key_not_ready: restart already issued to" 15983 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15984 ddi_get_instance(SD_DEVINFO(un))); 15985 break; 15986 } 15987 15988 /* 15989 * Schedule the START STOP UNIT command, then queue the command 15990 * for a retry. 15991 * 15992 * Note: A timeout is not scheduled for this retry because we 15993 * want the retry to be serial with the START_STOP_UNIT. The 15994 * retry will be started when the START_STOP_UNIT is completed 15995 * in sd_start_stop_unit_task. 15996 */ 15997 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15998 un, SD_BSY_TIMEOUT / 2); 15999 xp->xb_retry_count++; 16000 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16001 return; 16002 16003 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16004 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16005 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16006 "unit does not respond to selection\n"); 16007 } 16008 break; 16009 16010 case 0x3A: /* MEDIUM NOT PRESENT */ 16011 if (sd_error_level >= SCSI_ERR_FATAL) { 16012 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16013 "Caddy not inserted in drive\n"); 16014 } 16015 16016 sr_ejected(un); 16017 un->un_mediastate = DKIO_EJECTED; 16018 /* The state has changed, inform the media watch routines */ 16019 cv_broadcast(&un->un_state_cv); 16020 /* Just fail if no media is present in the drive. */ 16021 goto fail_command; 16022 16023 default: 16024 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16025 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16026 "Unit not Ready. Additional sense code 0x%x\n", 16027 asc); 16028 } 16029 break; 16030 } 16031 16032 do_retry: 16033 16034 /* 16035 * Retry the command, as some targets may report NOT READY for 16036 * several seconds after being reset. 16037 */ 16038 xp->xb_retry_count++; 16039 si.ssi_severity = SCSI_ERR_RETRYABLE; 16040 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16041 &si, EIO, SD_BSY_TIMEOUT, NULL); 16042 16043 return; 16044 16045 fail_command: 16046 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16047 sd_return_failed_command(un, bp, EIO); 16048 } 16049 16050 16051 16052 /* 16053 * Function: sd_sense_key_medium_or_hardware_error 16054 * 16055 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16056 * sense key. 16057 * 16058 * Context: May be called from interrupt context 16059 */ 16060 16061 static void 16062 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16063 uint8_t *sense_datap, 16064 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16065 { 16066 struct sd_sense_info si; 16067 uint8_t sense_key = scsi_sense_key(sense_datap); 16068 uint8_t asc = scsi_sense_asc(sense_datap); 16069 16070 ASSERT(un != NULL); 16071 ASSERT(mutex_owned(SD_MUTEX(un))); 16072 ASSERT(bp != NULL); 16073 ASSERT(xp != NULL); 16074 ASSERT(pktp != NULL); 16075 16076 si.ssi_severity = SCSI_ERR_FATAL; 16077 si.ssi_pfa_flag = FALSE; 16078 16079 if (sense_key == KEY_MEDIUM_ERROR) { 16080 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16081 } 16082 16083 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16084 16085 if ((un->un_reset_retry_count != 0) && 16086 (xp->xb_retry_count == un->un_reset_retry_count)) { 16087 mutex_exit(SD_MUTEX(un)); 16088 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16089 if (un->un_f_allow_bus_device_reset == TRUE) { 16090 16091 boolean_t try_resetting_target = B_TRUE; 16092 16093 /* 16094 * We need to be able to handle specific ASC when we are 16095 * handling a KEY_HARDWARE_ERROR. In particular 16096 * taking the default action of resetting the target may 16097 * not be the appropriate way to attempt recovery. 16098 * Resetting a target because of a single LUN failure 16099 * victimizes all LUNs on that target. 16100 * 16101 * This is true for the LSI arrays, if an LSI 16102 * array controller returns an ASC of 0x84 (LUN Dead) we 16103 * should trust it. 16104 */ 16105 16106 if (sense_key == KEY_HARDWARE_ERROR) { 16107 switch (asc) { 16108 case 0x84: 16109 if (SD_IS_LSI(un)) { 16110 try_resetting_target = B_FALSE; 16111 } 16112 break; 16113 default: 16114 break; 16115 } 16116 } 16117 16118 if (try_resetting_target == B_TRUE) { 16119 int reset_retval = 0; 16120 if (un->un_f_lun_reset_enabled == TRUE) { 16121 SD_TRACE(SD_LOG_IO_CORE, un, 16122 "sd_sense_key_medium_or_hardware_" 16123 "error: issuing RESET_LUN\n"); 16124 reset_retval = 16125 scsi_reset(SD_ADDRESS(un), 16126 RESET_LUN); 16127 } 16128 if (reset_retval == 0) { 16129 SD_TRACE(SD_LOG_IO_CORE, un, 16130 "sd_sense_key_medium_or_hardware_" 16131 "error: issuing RESET_TARGET\n"); 16132 (void) scsi_reset(SD_ADDRESS(un), 16133 RESET_TARGET); 16134 } 16135 } 16136 } 16137 mutex_enter(SD_MUTEX(un)); 16138 } 16139 16140 /* 16141 * This really ought to be a fatal error, but we will retry anyway 16142 * as some drives report this as a spurious error. 16143 */ 16144 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16145 &si, EIO, (clock_t)0, NULL); 16146 } 16147 16148 16149 16150 /* 16151 * Function: sd_sense_key_illegal_request 16152 * 16153 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16154 * 16155 * Context: May be called from interrupt context 16156 */ 16157 16158 static void 16159 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16160 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16161 { 16162 struct sd_sense_info si; 16163 16164 ASSERT(un != NULL); 16165 ASSERT(mutex_owned(SD_MUTEX(un))); 16166 ASSERT(bp != NULL); 16167 ASSERT(xp != NULL); 16168 ASSERT(pktp != NULL); 16169 16170 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16171 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16172 16173 si.ssi_severity = SCSI_ERR_INFO; 16174 si.ssi_pfa_flag = FALSE; 16175 16176 /* Pointless to retry if the target thinks it's an illegal request */ 16177 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16178 sd_return_failed_command(un, bp, EIO); 16179 } 16180 16181 16182 16183 16184 /* 16185 * Function: sd_sense_key_unit_attention 16186 * 16187 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16188 * 16189 * Context: May be called from interrupt context 16190 */ 16191 16192 static void 16193 sd_sense_key_unit_attention(struct sd_lun *un, 16194 uint8_t *sense_datap, 16195 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16196 { 16197 /* 16198 * For UNIT ATTENTION we allow retries for one minute. Devices 16199 * like Sonoma can return UNIT ATTENTION close to a minute 16200 * under certain conditions. 16201 */ 16202 int retry_check_flag = SD_RETRIES_UA; 16203 boolean_t kstat_updated = B_FALSE; 16204 struct sd_sense_info si; 16205 uint8_t asc = scsi_sense_asc(sense_datap); 16206 16207 ASSERT(un != NULL); 16208 ASSERT(mutex_owned(SD_MUTEX(un))); 16209 ASSERT(bp != NULL); 16210 ASSERT(xp != NULL); 16211 ASSERT(pktp != NULL); 16212 16213 si.ssi_severity = SCSI_ERR_INFO; 16214 si.ssi_pfa_flag = FALSE; 16215 16216 16217 switch (asc) { 16218 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16219 if (sd_report_pfa != 0) { 16220 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16221 si.ssi_pfa_flag = TRUE; 16222 retry_check_flag = SD_RETRIES_STANDARD; 16223 goto do_retry; 16224 } 16225 16226 break; 16227 16228 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16229 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16230 un->un_resvd_status |= 16231 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16232 } 16233 #ifdef _LP64 16234 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16235 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16236 un, KM_NOSLEEP) == 0) { 16237 /* 16238 * If we can't dispatch the task we'll just 16239 * live without descriptor sense. We can 16240 * try again on the next "unit attention" 16241 */ 16242 SD_ERROR(SD_LOG_ERROR, un, 16243 "sd_sense_key_unit_attention: " 16244 "Could not dispatch " 16245 "sd_reenable_dsense_task\n"); 16246 } 16247 } 16248 #endif /* _LP64 */ 16249 /* FALLTHRU */ 16250 16251 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16252 if (!un->un_f_has_removable_media) { 16253 break; 16254 } 16255 16256 /* 16257 * When we get a unit attention from a removable-media device, 16258 * it may be in a state that will take a long time to recover 16259 * (e.g., from a reset). Since we are executing in interrupt 16260 * context here, we cannot wait around for the device to come 16261 * back. So hand this command off to sd_media_change_task() 16262 * for deferred processing under taskq thread context. (Note 16263 * that the command still may be failed if a problem is 16264 * encountered at a later time.) 16265 */ 16266 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16267 KM_NOSLEEP) == 0) { 16268 /* 16269 * Cannot dispatch the request so fail the command. 16270 */ 16271 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16272 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16273 si.ssi_severity = SCSI_ERR_FATAL; 16274 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16275 sd_return_failed_command(un, bp, EIO); 16276 } 16277 16278 /* 16279 * If failed to dispatch sd_media_change_task(), we already 16280 * updated kstat. If succeed to dispatch sd_media_change_task(), 16281 * we should update kstat later if it encounters an error. So, 16282 * we update kstat_updated flag here. 16283 */ 16284 kstat_updated = B_TRUE; 16285 16286 /* 16287 * Either the command has been successfully dispatched to a 16288 * task Q for retrying, or the dispatch failed. In either case 16289 * do NOT retry again by calling sd_retry_command. This sets up 16290 * two retries of the same command and when one completes and 16291 * frees the resources the other will access freed memory, 16292 * a bad thing. 16293 */ 16294 return; 16295 16296 default: 16297 break; 16298 } 16299 16300 /* 16301 * Update kstat if we haven't done that. 16302 */ 16303 if (!kstat_updated) { 16304 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16305 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16306 } 16307 16308 do_retry: 16309 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16310 EIO, SD_UA_RETRY_DELAY, NULL); 16311 } 16312 16313 16314 16315 /* 16316 * Function: sd_sense_key_fail_command 16317 * 16318 * Description: Use to fail a command when we don't like the sense key that 16319 * was returned. 16320 * 16321 * Context: May be called from interrupt context 16322 */ 16323 16324 static void 16325 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16326 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16327 { 16328 struct sd_sense_info si; 16329 16330 ASSERT(un != NULL); 16331 ASSERT(mutex_owned(SD_MUTEX(un))); 16332 ASSERT(bp != NULL); 16333 ASSERT(xp != NULL); 16334 ASSERT(pktp != NULL); 16335 16336 si.ssi_severity = SCSI_ERR_FATAL; 16337 si.ssi_pfa_flag = FALSE; 16338 16339 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16340 sd_return_failed_command(un, bp, EIO); 16341 } 16342 16343 16344 16345 /* 16346 * Function: sd_sense_key_blank_check 16347 * 16348 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16349 * Has no monetary connotation. 16350 * 16351 * Context: May be called from interrupt context 16352 */ 16353 16354 static void 16355 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16356 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16357 { 16358 struct sd_sense_info si; 16359 16360 ASSERT(un != NULL); 16361 ASSERT(mutex_owned(SD_MUTEX(un))); 16362 ASSERT(bp != NULL); 16363 ASSERT(xp != NULL); 16364 ASSERT(pktp != NULL); 16365 16366 /* 16367 * Blank check is not fatal for removable devices, therefore 16368 * it does not require a console message. 16369 */ 16370 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16371 SCSI_ERR_FATAL; 16372 si.ssi_pfa_flag = FALSE; 16373 16374 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16375 sd_return_failed_command(un, bp, EIO); 16376 } 16377 16378 16379 16380 16381 /* 16382 * Function: sd_sense_key_aborted_command 16383 * 16384 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16385 * 16386 * Context: May be called from interrupt context 16387 */ 16388 16389 static void 16390 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16391 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16392 { 16393 struct sd_sense_info si; 16394 16395 ASSERT(un != NULL); 16396 ASSERT(mutex_owned(SD_MUTEX(un))); 16397 ASSERT(bp != NULL); 16398 ASSERT(xp != NULL); 16399 ASSERT(pktp != NULL); 16400 16401 si.ssi_severity = SCSI_ERR_FATAL; 16402 si.ssi_pfa_flag = FALSE; 16403 16404 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16405 16406 /* 16407 * This really ought to be a fatal error, but we will retry anyway 16408 * as some drives report this as a spurious error. 16409 */ 16410 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16411 &si, EIO, (clock_t)0, NULL); 16412 } 16413 16414 16415 16416 /* 16417 * Function: sd_sense_key_default 16418 * 16419 * Description: Default recovery action for several SCSI sense keys (basically 16420 * attempts a retry). 16421 * 16422 * Context: May be called from interrupt context 16423 */ 16424 16425 static void 16426 sd_sense_key_default(struct sd_lun *un, 16427 uint8_t *sense_datap, 16428 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16429 { 16430 struct sd_sense_info si; 16431 uint8_t sense_key = scsi_sense_key(sense_datap); 16432 16433 ASSERT(un != NULL); 16434 ASSERT(mutex_owned(SD_MUTEX(un))); 16435 ASSERT(bp != NULL); 16436 ASSERT(xp != NULL); 16437 ASSERT(pktp != NULL); 16438 16439 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16440 16441 /* 16442 * Undecoded sense key. Attempt retries and hope that will fix 16443 * the problem. Otherwise, we're dead. 16444 */ 16445 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16446 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16447 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16448 } 16449 16450 si.ssi_severity = SCSI_ERR_FATAL; 16451 si.ssi_pfa_flag = FALSE; 16452 16453 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16454 &si, EIO, (clock_t)0, NULL); 16455 } 16456 16457 16458 16459 /* 16460 * Function: sd_print_retry_msg 16461 * 16462 * Description: Print a message indicating the retry action being taken. 16463 * 16464 * Arguments: un - ptr to associated softstate 16465 * bp - ptr to buf(9S) for the command 16466 * arg - not used. 16467 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16468 * or SD_NO_RETRY_ISSUED 16469 * 16470 * Context: May be called from interrupt context 16471 */ 16472 /* ARGSUSED */ 16473 static void 16474 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16475 { 16476 struct sd_xbuf *xp; 16477 struct scsi_pkt *pktp; 16478 char *reasonp; 16479 char *msgp; 16480 16481 ASSERT(un != NULL); 16482 ASSERT(mutex_owned(SD_MUTEX(un))); 16483 ASSERT(bp != NULL); 16484 pktp = SD_GET_PKTP(bp); 16485 ASSERT(pktp != NULL); 16486 xp = SD_GET_XBUF(bp); 16487 ASSERT(xp != NULL); 16488 16489 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16490 mutex_enter(&un->un_pm_mutex); 16491 if ((un->un_state == SD_STATE_SUSPENDED) || 16492 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16493 (pktp->pkt_flags & FLAG_SILENT)) { 16494 mutex_exit(&un->un_pm_mutex); 16495 goto update_pkt_reason; 16496 } 16497 mutex_exit(&un->un_pm_mutex); 16498 16499 /* 16500 * Suppress messages if they are all the same pkt_reason; with 16501 * TQ, many (up to 256) are returned with the same pkt_reason. 16502 * If we are in panic, then suppress the retry messages. 16503 */ 16504 switch (flag) { 16505 case SD_NO_RETRY_ISSUED: 16506 msgp = "giving up"; 16507 break; 16508 case SD_IMMEDIATE_RETRY_ISSUED: 16509 case SD_DELAYED_RETRY_ISSUED: 16510 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16511 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16512 (sd_error_level != SCSI_ERR_ALL))) { 16513 return; 16514 } 16515 msgp = "retrying command"; 16516 break; 16517 default: 16518 goto update_pkt_reason; 16519 } 16520 16521 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16522 scsi_rname(pktp->pkt_reason)); 16523 16524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16525 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16526 16527 update_pkt_reason: 16528 /* 16529 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16530 * This is to prevent multiple console messages for the same failure 16531 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16532 * when the command is retried successfully because there still may be 16533 * more commands coming back with the same value of pktp->pkt_reason. 16534 */ 16535 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16536 un->un_last_pkt_reason = pktp->pkt_reason; 16537 } 16538 } 16539 16540 16541 /* 16542 * Function: sd_print_cmd_incomplete_msg 16543 * 16544 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16545 * 16546 * Arguments: un - ptr to associated softstate 16547 * bp - ptr to buf(9S) for the command 16548 * arg - passed to sd_print_retry_msg() 16549 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16550 * or SD_NO_RETRY_ISSUED 16551 * 16552 * Context: May be called from interrupt context 16553 */ 16554 16555 static void 16556 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16557 int code) 16558 { 16559 dev_info_t *dip; 16560 16561 ASSERT(un != NULL); 16562 ASSERT(mutex_owned(SD_MUTEX(un))); 16563 ASSERT(bp != NULL); 16564 16565 switch (code) { 16566 case SD_NO_RETRY_ISSUED: 16567 /* Command was failed. Someone turned off this target? */ 16568 if (un->un_state != SD_STATE_OFFLINE) { 16569 /* 16570 * Suppress message if we are detaching and 16571 * device has been disconnected 16572 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16573 * private interface and not part of the DDI 16574 */ 16575 dip = un->un_sd->sd_dev; 16576 if (!(DEVI_IS_DETACHING(dip) && 16577 DEVI_IS_DEVICE_REMOVED(dip))) { 16578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16579 "disk not responding to selection\n"); 16580 } 16581 New_state(un, SD_STATE_OFFLINE); 16582 } 16583 break; 16584 16585 case SD_DELAYED_RETRY_ISSUED: 16586 case SD_IMMEDIATE_RETRY_ISSUED: 16587 default: 16588 /* Command was successfully queued for retry */ 16589 sd_print_retry_msg(un, bp, arg, code); 16590 break; 16591 } 16592 } 16593 16594 16595 /* 16596 * Function: sd_pkt_reason_cmd_incomplete 16597 * 16598 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16599 * 16600 * Context: May be called from interrupt context 16601 */ 16602 16603 static void 16604 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16605 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16606 { 16607 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16608 16609 ASSERT(un != NULL); 16610 ASSERT(mutex_owned(SD_MUTEX(un))); 16611 ASSERT(bp != NULL); 16612 ASSERT(xp != NULL); 16613 ASSERT(pktp != NULL); 16614 16615 /* Do not do a reset if selection did not complete */ 16616 /* Note: Should this not just check the bit? */ 16617 if (pktp->pkt_state != STATE_GOT_BUS) { 16618 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16619 sd_reset_target(un, pktp); 16620 } 16621 16622 /* 16623 * If the target was not successfully selected, then set 16624 * SD_RETRIES_FAILFAST to indicate that we lost communication 16625 * with the target, and further retries and/or commands are 16626 * likely to take a long time. 16627 */ 16628 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16629 flag |= SD_RETRIES_FAILFAST; 16630 } 16631 16632 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16633 16634 sd_retry_command(un, bp, flag, 16635 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16636 } 16637 16638 16639 16640 /* 16641 * Function: sd_pkt_reason_cmd_tran_err 16642 * 16643 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16644 * 16645 * Context: May be called from interrupt context 16646 */ 16647 16648 static void 16649 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16650 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16651 { 16652 ASSERT(un != NULL); 16653 ASSERT(mutex_owned(SD_MUTEX(un))); 16654 ASSERT(bp != NULL); 16655 ASSERT(xp != NULL); 16656 ASSERT(pktp != NULL); 16657 16658 /* 16659 * Do not reset if we got a parity error, or if 16660 * selection did not complete. 16661 */ 16662 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16663 /* Note: Should this not just check the bit for pkt_state? */ 16664 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16665 (pktp->pkt_state != STATE_GOT_BUS)) { 16666 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16667 sd_reset_target(un, pktp); 16668 } 16669 16670 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16671 16672 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16673 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16674 } 16675 16676 16677 16678 /* 16679 * Function: sd_pkt_reason_cmd_reset 16680 * 16681 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16682 * 16683 * Context: May be called from interrupt context 16684 */ 16685 16686 static void 16687 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16688 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16689 { 16690 ASSERT(un != NULL); 16691 ASSERT(mutex_owned(SD_MUTEX(un))); 16692 ASSERT(bp != NULL); 16693 ASSERT(xp != NULL); 16694 ASSERT(pktp != NULL); 16695 16696 /* The target may still be running the command, so try to reset. */ 16697 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16698 sd_reset_target(un, pktp); 16699 16700 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16701 16702 /* 16703 * If pkt_reason is CMD_RESET chances are that this pkt got 16704 * reset because another target on this bus caused it. The target 16705 * that caused it should get CMD_TIMEOUT with pkt_statistics 16706 * of STAT_TIMEOUT/STAT_DEV_RESET. 16707 */ 16708 16709 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16710 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16711 } 16712 16713 16714 16715 16716 /* 16717 * Function: sd_pkt_reason_cmd_aborted 16718 * 16719 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16720 * 16721 * Context: May be called from interrupt context 16722 */ 16723 16724 static void 16725 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16726 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16727 { 16728 ASSERT(un != NULL); 16729 ASSERT(mutex_owned(SD_MUTEX(un))); 16730 ASSERT(bp != NULL); 16731 ASSERT(xp != NULL); 16732 ASSERT(pktp != NULL); 16733 16734 /* The target may still be running the command, so try to reset. */ 16735 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16736 sd_reset_target(un, pktp); 16737 16738 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16739 16740 /* 16741 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16742 * aborted because another target on this bus caused it. The target 16743 * that caused it should get CMD_TIMEOUT with pkt_statistics 16744 * of STAT_TIMEOUT/STAT_DEV_RESET. 16745 */ 16746 16747 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16748 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16749 } 16750 16751 16752 16753 /* 16754 * Function: sd_pkt_reason_cmd_timeout 16755 * 16756 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16757 * 16758 * Context: May be called from interrupt context 16759 */ 16760 16761 static void 16762 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16763 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16764 { 16765 ASSERT(un != NULL); 16766 ASSERT(mutex_owned(SD_MUTEX(un))); 16767 ASSERT(bp != NULL); 16768 ASSERT(xp != NULL); 16769 ASSERT(pktp != NULL); 16770 16771 16772 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16773 sd_reset_target(un, pktp); 16774 16775 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16776 16777 /* 16778 * A command timeout indicates that we could not establish 16779 * communication with the target, so set SD_RETRIES_FAILFAST 16780 * as further retries/commands are likely to take a long time. 16781 */ 16782 sd_retry_command(un, bp, 16783 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16784 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16785 } 16786 16787 16788 16789 /* 16790 * Function: sd_pkt_reason_cmd_unx_bus_free 16791 * 16792 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16793 * 16794 * Context: May be called from interrupt context 16795 */ 16796 16797 static void 16798 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16799 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16800 { 16801 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16802 16803 ASSERT(un != NULL); 16804 ASSERT(mutex_owned(SD_MUTEX(un))); 16805 ASSERT(bp != NULL); 16806 ASSERT(xp != NULL); 16807 ASSERT(pktp != NULL); 16808 16809 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16810 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16811 16812 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16813 sd_print_retry_msg : NULL; 16814 16815 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16816 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16817 } 16818 16819 16820 /* 16821 * Function: sd_pkt_reason_cmd_tag_reject 16822 * 16823 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16824 * 16825 * Context: May be called from interrupt context 16826 */ 16827 16828 static void 16829 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16830 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16831 { 16832 ASSERT(un != NULL); 16833 ASSERT(mutex_owned(SD_MUTEX(un))); 16834 ASSERT(bp != NULL); 16835 ASSERT(xp != NULL); 16836 ASSERT(pktp != NULL); 16837 16838 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16839 pktp->pkt_flags = 0; 16840 un->un_tagflags = 0; 16841 if (un->un_f_opt_queueing == TRUE) { 16842 un->un_throttle = min(un->un_throttle, 3); 16843 } else { 16844 un->un_throttle = 1; 16845 } 16846 mutex_exit(SD_MUTEX(un)); 16847 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16848 mutex_enter(SD_MUTEX(un)); 16849 16850 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16851 16852 /* Legacy behavior not to check retry counts here. */ 16853 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16854 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16855 } 16856 16857 16858 /* 16859 * Function: sd_pkt_reason_default 16860 * 16861 * Description: Default recovery actions for SCSA pkt_reason values that 16862 * do not have more explicit recovery actions. 16863 * 16864 * Context: May be called from interrupt context 16865 */ 16866 16867 static void 16868 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16869 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16870 { 16871 ASSERT(un != NULL); 16872 ASSERT(mutex_owned(SD_MUTEX(un))); 16873 ASSERT(bp != NULL); 16874 ASSERT(xp != NULL); 16875 ASSERT(pktp != NULL); 16876 16877 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16878 sd_reset_target(un, pktp); 16879 16880 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16881 16882 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16883 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16884 } 16885 16886 16887 16888 /* 16889 * Function: sd_pkt_status_check_condition 16890 * 16891 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16892 * 16893 * Context: May be called from interrupt context 16894 */ 16895 16896 static void 16897 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16898 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16899 { 16900 ASSERT(un != NULL); 16901 ASSERT(mutex_owned(SD_MUTEX(un))); 16902 ASSERT(bp != NULL); 16903 ASSERT(xp != NULL); 16904 ASSERT(pktp != NULL); 16905 16906 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16907 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16908 16909 /* 16910 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16911 * command will be retried after the request sense). Otherwise, retry 16912 * the command. Note: we are issuing the request sense even though the 16913 * retry limit may have been reached for the failed command. 16914 */ 16915 if (un->un_f_arq_enabled == FALSE) { 16916 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16917 "no ARQ, sending request sense command\n"); 16918 sd_send_request_sense_command(un, bp, pktp); 16919 } else { 16920 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16921 "ARQ,retrying request sense command\n"); 16922 #if defined(__i386) || defined(__amd64) 16923 /* 16924 * The SD_RETRY_DELAY value need to be adjusted here 16925 * when SD_RETRY_DELAY change in sddef.h 16926 */ 16927 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16928 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16929 NULL); 16930 #else 16931 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16932 EIO, SD_RETRY_DELAY, NULL); 16933 #endif 16934 } 16935 16936 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16937 } 16938 16939 16940 /* 16941 * Function: sd_pkt_status_busy 16942 * 16943 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16944 * 16945 * Context: May be called from interrupt context 16946 */ 16947 16948 static void 16949 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16950 struct scsi_pkt *pktp) 16951 { 16952 ASSERT(un != NULL); 16953 ASSERT(mutex_owned(SD_MUTEX(un))); 16954 ASSERT(bp != NULL); 16955 ASSERT(xp != NULL); 16956 ASSERT(pktp != NULL); 16957 16958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16959 "sd_pkt_status_busy: entry\n"); 16960 16961 /* If retries are exhausted, just fail the command. */ 16962 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16963 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16964 "device busy too long\n"); 16965 sd_return_failed_command(un, bp, EIO); 16966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16967 "sd_pkt_status_busy: exit\n"); 16968 return; 16969 } 16970 xp->xb_retry_count++; 16971 16972 /* 16973 * Try to reset the target. However, we do not want to perform 16974 * more than one reset if the device continues to fail. The reset 16975 * will be performed when the retry count reaches the reset 16976 * threshold. This threshold should be set such that at least 16977 * one retry is issued before the reset is performed. 16978 */ 16979 if (xp->xb_retry_count == 16980 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16981 int rval = 0; 16982 mutex_exit(SD_MUTEX(un)); 16983 if (un->un_f_allow_bus_device_reset == TRUE) { 16984 /* 16985 * First try to reset the LUN; if we cannot then 16986 * try to reset the target. 16987 */ 16988 if (un->un_f_lun_reset_enabled == TRUE) { 16989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16990 "sd_pkt_status_busy: RESET_LUN\n"); 16991 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16992 } 16993 if (rval == 0) { 16994 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16995 "sd_pkt_status_busy: RESET_TARGET\n"); 16996 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16997 } 16998 } 16999 if (rval == 0) { 17000 /* 17001 * If the RESET_LUN and/or RESET_TARGET failed, 17002 * try RESET_ALL 17003 */ 17004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17005 "sd_pkt_status_busy: RESET_ALL\n"); 17006 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17007 } 17008 mutex_enter(SD_MUTEX(un)); 17009 if (rval == 0) { 17010 /* 17011 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17012 * At this point we give up & fail the command. 17013 */ 17014 sd_return_failed_command(un, bp, EIO); 17015 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17016 "sd_pkt_status_busy: exit (failed cmd)\n"); 17017 return; 17018 } 17019 } 17020 17021 /* 17022 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17023 * we have already checked the retry counts above. 17024 */ 17025 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17026 EIO, SD_BSY_TIMEOUT, NULL); 17027 17028 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17029 "sd_pkt_status_busy: exit\n"); 17030 } 17031 17032 17033 /* 17034 * Function: sd_pkt_status_reservation_conflict 17035 * 17036 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17037 * command status. 17038 * 17039 * Context: May be called from interrupt context 17040 */ 17041 17042 static void 17043 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17044 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17045 { 17046 ASSERT(un != NULL); 17047 ASSERT(mutex_owned(SD_MUTEX(un))); 17048 ASSERT(bp != NULL); 17049 ASSERT(xp != NULL); 17050 ASSERT(pktp != NULL); 17051 17052 /* 17053 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17054 * conflict could be due to various reasons like incorrect keys, not 17055 * registered or not reserved etc. So, we return EACCES to the caller. 17056 */ 17057 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17058 int cmd = SD_GET_PKT_OPCODE(pktp); 17059 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17060 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17061 sd_return_failed_command(un, bp, EACCES); 17062 return; 17063 } 17064 } 17065 17066 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17067 17068 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17069 if (sd_failfast_enable != 0) { 17070 /* By definition, we must panic here.... */ 17071 sd_panic_for_res_conflict(un); 17072 /*NOTREACHED*/ 17073 } 17074 SD_ERROR(SD_LOG_IO, un, 17075 "sd_handle_resv_conflict: Disk Reserved\n"); 17076 sd_return_failed_command(un, bp, EACCES); 17077 return; 17078 } 17079 17080 /* 17081 * 1147670: retry only if sd_retry_on_reservation_conflict 17082 * property is set (default is 1). Retries will not succeed 17083 * on a disk reserved by another initiator. HA systems 17084 * may reset this via sd.conf to avoid these retries. 17085 * 17086 * Note: The legacy return code for this failure is EIO, however EACCES 17087 * seems more appropriate for a reservation conflict. 17088 */ 17089 if (sd_retry_on_reservation_conflict == 0) { 17090 SD_ERROR(SD_LOG_IO, un, 17091 "sd_handle_resv_conflict: Device Reserved\n"); 17092 sd_return_failed_command(un, bp, EIO); 17093 return; 17094 } 17095 17096 /* 17097 * Retry the command if we can. 17098 * 17099 * Note: The legacy return code for this failure is EIO, however EACCES 17100 * seems more appropriate for a reservation conflict. 17101 */ 17102 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17103 (clock_t)2, NULL); 17104 } 17105 17106 17107 17108 /* 17109 * Function: sd_pkt_status_qfull 17110 * 17111 * Description: Handle a QUEUE FULL condition from the target. This can 17112 * occur if the HBA does not handle the queue full condition. 17113 * (Basically this means third-party HBAs as Sun HBAs will 17114 * handle the queue full condition.) Note that if there are 17115 * some commands already in the transport, then the queue full 17116 * has occurred because the queue for this nexus is actually 17117 * full. If there are no commands in the transport, then the 17118 * queue full is resulting from some other initiator or lun 17119 * consuming all the resources at the target. 17120 * 17121 * Context: May be called from interrupt context 17122 */ 17123 17124 static void 17125 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17126 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17127 { 17128 ASSERT(un != NULL); 17129 ASSERT(mutex_owned(SD_MUTEX(un))); 17130 ASSERT(bp != NULL); 17131 ASSERT(xp != NULL); 17132 ASSERT(pktp != NULL); 17133 17134 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17135 "sd_pkt_status_qfull: entry\n"); 17136 17137 /* 17138 * Just lower the QFULL throttle and retry the command. Note that 17139 * we do not limit the number of retries here. 17140 */ 17141 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17142 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17143 SD_RESTART_TIMEOUT, NULL); 17144 17145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17146 "sd_pkt_status_qfull: exit\n"); 17147 } 17148 17149 17150 /* 17151 * Function: sd_reset_target 17152 * 17153 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17154 * RESET_TARGET, or RESET_ALL. 17155 * 17156 * Context: May be called under interrupt context. 17157 */ 17158 17159 static void 17160 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17161 { 17162 int rval = 0; 17163 17164 ASSERT(un != NULL); 17165 ASSERT(mutex_owned(SD_MUTEX(un))); 17166 ASSERT(pktp != NULL); 17167 17168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17169 17170 /* 17171 * No need to reset if the transport layer has already done so. 17172 */ 17173 if ((pktp->pkt_statistics & 17174 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17175 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17176 "sd_reset_target: no reset\n"); 17177 return; 17178 } 17179 17180 mutex_exit(SD_MUTEX(un)); 17181 17182 if (un->un_f_allow_bus_device_reset == TRUE) { 17183 if (un->un_f_lun_reset_enabled == TRUE) { 17184 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17185 "sd_reset_target: RESET_LUN\n"); 17186 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17187 } 17188 if (rval == 0) { 17189 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17190 "sd_reset_target: RESET_TARGET\n"); 17191 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17192 } 17193 } 17194 17195 if (rval == 0) { 17196 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17197 "sd_reset_target: RESET_ALL\n"); 17198 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17199 } 17200 17201 mutex_enter(SD_MUTEX(un)); 17202 17203 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17204 } 17205 17206 17207 /* 17208 * Function: sd_media_change_task 17209 * 17210 * Description: Recovery action for CDROM to become available. 17211 * 17212 * Context: Executes in a taskq() thread context 17213 */ 17214 17215 static void 17216 sd_media_change_task(void *arg) 17217 { 17218 struct scsi_pkt *pktp = arg; 17219 struct sd_lun *un; 17220 struct buf *bp; 17221 struct sd_xbuf *xp; 17222 int err = 0; 17223 int retry_count = 0; 17224 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17225 struct sd_sense_info si; 17226 17227 ASSERT(pktp != NULL); 17228 bp = (struct buf *)pktp->pkt_private; 17229 ASSERT(bp != NULL); 17230 xp = SD_GET_XBUF(bp); 17231 ASSERT(xp != NULL); 17232 un = SD_GET_UN(bp); 17233 ASSERT(un != NULL); 17234 ASSERT(!mutex_owned(SD_MUTEX(un))); 17235 ASSERT(un->un_f_monitor_media_state); 17236 17237 si.ssi_severity = SCSI_ERR_INFO; 17238 si.ssi_pfa_flag = FALSE; 17239 17240 /* 17241 * When a reset is issued on a CDROM, it takes a long time to 17242 * recover. First few attempts to read capacity and other things 17243 * related to handling unit attention fail (with a ASC 0x4 and 17244 * ASCQ 0x1). In that case we want to do enough retries and we want 17245 * to limit the retries in other cases of genuine failures like 17246 * no media in drive. 17247 */ 17248 while (retry_count++ < retry_limit) { 17249 if ((err = sd_handle_mchange(un)) == 0) { 17250 break; 17251 } 17252 if (err == EAGAIN) { 17253 retry_limit = SD_UNIT_ATTENTION_RETRY; 17254 } 17255 /* Sleep for 0.5 sec. & try again */ 17256 delay(drv_usectohz(500000)); 17257 } 17258 17259 /* 17260 * Dispatch (retry or fail) the original command here, 17261 * along with appropriate console messages.... 17262 * 17263 * Must grab the mutex before calling sd_retry_command, 17264 * sd_print_sense_msg and sd_return_failed_command. 17265 */ 17266 mutex_enter(SD_MUTEX(un)); 17267 if (err != SD_CMD_SUCCESS) { 17268 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17269 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17270 si.ssi_severity = SCSI_ERR_FATAL; 17271 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17272 sd_return_failed_command(un, bp, EIO); 17273 } else { 17274 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17275 &si, EIO, (clock_t)0, NULL); 17276 } 17277 mutex_exit(SD_MUTEX(un)); 17278 } 17279 17280 17281 17282 /* 17283 * Function: sd_handle_mchange 17284 * 17285 * Description: Perform geometry validation & other recovery when CDROM 17286 * has been removed from drive. 17287 * 17288 * Return Code: 0 for success 17289 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17290 * sd_send_scsi_READ_CAPACITY() 17291 * 17292 * Context: Executes in a taskq() thread context 17293 */ 17294 17295 static int 17296 sd_handle_mchange(struct sd_lun *un) 17297 { 17298 uint64_t capacity; 17299 uint32_t lbasize; 17300 int rval; 17301 17302 ASSERT(!mutex_owned(SD_MUTEX(un))); 17303 ASSERT(un->un_f_monitor_media_state); 17304 17305 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17306 SD_PATH_DIRECT_PRIORITY)) != 0) { 17307 return (rval); 17308 } 17309 17310 mutex_enter(SD_MUTEX(un)); 17311 sd_update_block_info(un, lbasize, capacity); 17312 17313 if (un->un_errstats != NULL) { 17314 struct sd_errstats *stp = 17315 (struct sd_errstats *)un->un_errstats->ks_data; 17316 stp->sd_capacity.value.ui64 = (uint64_t) 17317 ((uint64_t)un->un_blockcount * 17318 (uint64_t)un->un_tgt_blocksize); 17319 } 17320 17321 17322 /* 17323 * Check if the media in the device is writable or not 17324 */ 17325 if (ISCD(un)) 17326 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17327 17328 /* 17329 * Note: Maybe let the strategy/partitioning chain worry about getting 17330 * valid geometry. 17331 */ 17332 mutex_exit(SD_MUTEX(un)); 17333 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17334 17335 17336 if (cmlb_validate(un->un_cmlbhandle, 0, 17337 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17338 return (EIO); 17339 } else { 17340 if (un->un_f_pkstats_enabled) { 17341 sd_set_pstats(un); 17342 SD_TRACE(SD_LOG_IO_PARTITION, un, 17343 "sd_handle_mchange: un:0x%p pstats created and " 17344 "set\n", un); 17345 } 17346 } 17347 17348 17349 /* 17350 * Try to lock the door 17351 */ 17352 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17353 SD_PATH_DIRECT_PRIORITY)); 17354 } 17355 17356 17357 /* 17358 * Function: sd_send_scsi_DOORLOCK 17359 * 17360 * Description: Issue the scsi DOOR LOCK command 17361 * 17362 * Arguments: un - pointer to driver soft state (unit) structure for 17363 * this target. 17364 * flag - SD_REMOVAL_ALLOW 17365 * SD_REMOVAL_PREVENT 17366 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17367 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17368 * to use the USCSI "direct" chain and bypass the normal 17369 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17370 * command is issued as part of an error recovery action. 17371 * 17372 * Return Code: 0 - Success 17373 * errno return code from sd_send_scsi_cmd() 17374 * 17375 * Context: Can sleep. 17376 */ 17377 17378 static int 17379 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17380 { 17381 union scsi_cdb cdb; 17382 struct uscsi_cmd ucmd_buf; 17383 struct scsi_extended_sense sense_buf; 17384 int status; 17385 17386 ASSERT(un != NULL); 17387 ASSERT(!mutex_owned(SD_MUTEX(un))); 17388 17389 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17390 17391 /* already determined doorlock is not supported, fake success */ 17392 if (un->un_f_doorlock_supported == FALSE) { 17393 return (0); 17394 } 17395 17396 /* 17397 * If we are ejecting and see an SD_REMOVAL_PREVENT 17398 * ignore the command so we can complete the eject 17399 * operation. 17400 */ 17401 if (flag == SD_REMOVAL_PREVENT) { 17402 mutex_enter(SD_MUTEX(un)); 17403 if (un->un_f_ejecting == TRUE) { 17404 mutex_exit(SD_MUTEX(un)); 17405 return (EAGAIN); 17406 } 17407 mutex_exit(SD_MUTEX(un)); 17408 } 17409 17410 bzero(&cdb, sizeof (cdb)); 17411 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17412 17413 cdb.scc_cmd = SCMD_DOORLOCK; 17414 cdb.cdb_opaque[4] = (uchar_t)flag; 17415 17416 ucmd_buf.uscsi_cdb = (char *)&cdb; 17417 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17418 ucmd_buf.uscsi_bufaddr = NULL; 17419 ucmd_buf.uscsi_buflen = 0; 17420 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17421 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17422 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17423 ucmd_buf.uscsi_timeout = 15; 17424 17425 SD_TRACE(SD_LOG_IO, un, 17426 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17427 17428 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17429 UIO_SYSSPACE, path_flag); 17430 17431 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17432 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17433 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17434 /* fake success and skip subsequent doorlock commands */ 17435 un->un_f_doorlock_supported = FALSE; 17436 return (0); 17437 } 17438 17439 return (status); 17440 } 17441 17442 /* 17443 * Function: sd_send_scsi_READ_CAPACITY 17444 * 17445 * Description: This routine uses the scsi READ CAPACITY command to determine 17446 * the device capacity in number of blocks and the device native 17447 * block size. If this function returns a failure, then the 17448 * values in *capp and *lbap are undefined. If the capacity 17449 * returned is 0xffffffff then the lun is too large for a 17450 * normal READ CAPACITY command and the results of a 17451 * READ CAPACITY 16 will be used instead. 17452 * 17453 * Arguments: un - ptr to soft state struct for the target 17454 * capp - ptr to unsigned 64-bit variable to receive the 17455 * capacity value from the command. 17456 * lbap - ptr to unsigned 32-bit varaible to receive the 17457 * block size value from the command 17458 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17459 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17460 * to use the USCSI "direct" chain and bypass the normal 17461 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17462 * command is issued as part of an error recovery action. 17463 * 17464 * Return Code: 0 - Success 17465 * EIO - IO error 17466 * EACCES - Reservation conflict detected 17467 * EAGAIN - Device is becoming ready 17468 * errno return code from sd_send_scsi_cmd() 17469 * 17470 * Context: Can sleep. Blocks until command completes. 17471 */ 17472 17473 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17474 17475 static int 17476 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17477 int path_flag) 17478 { 17479 struct scsi_extended_sense sense_buf; 17480 struct uscsi_cmd ucmd_buf; 17481 union scsi_cdb cdb; 17482 uint32_t *capacity_buf; 17483 uint64_t capacity; 17484 uint32_t lbasize; 17485 int status; 17486 17487 ASSERT(un != NULL); 17488 ASSERT(!mutex_owned(SD_MUTEX(un))); 17489 ASSERT(capp != NULL); 17490 ASSERT(lbap != NULL); 17491 17492 SD_TRACE(SD_LOG_IO, un, 17493 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17494 17495 /* 17496 * First send a READ_CAPACITY command to the target. 17497 * (This command is mandatory under SCSI-2.) 17498 * 17499 * Set up the CDB for the READ_CAPACITY command. The Partial 17500 * Medium Indicator bit is cleared. The address field must be 17501 * zero if the PMI bit is zero. 17502 */ 17503 bzero(&cdb, sizeof (cdb)); 17504 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17505 17506 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17507 17508 cdb.scc_cmd = SCMD_READ_CAPACITY; 17509 17510 ucmd_buf.uscsi_cdb = (char *)&cdb; 17511 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17512 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17513 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17514 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17515 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17516 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17517 ucmd_buf.uscsi_timeout = 60; 17518 17519 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17520 UIO_SYSSPACE, path_flag); 17521 17522 switch (status) { 17523 case 0: 17524 /* Return failure if we did not get valid capacity data. */ 17525 if (ucmd_buf.uscsi_resid != 0) { 17526 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17527 return (EIO); 17528 } 17529 17530 /* 17531 * Read capacity and block size from the READ CAPACITY 10 data. 17532 * This data may be adjusted later due to device specific 17533 * issues. 17534 * 17535 * According to the SCSI spec, the READ CAPACITY 10 17536 * command returns the following: 17537 * 17538 * bytes 0-3: Maximum logical block address available. 17539 * (MSB in byte:0 & LSB in byte:3) 17540 * 17541 * bytes 4-7: Block length in bytes 17542 * (MSB in byte:4 & LSB in byte:7) 17543 * 17544 */ 17545 capacity = BE_32(capacity_buf[0]); 17546 lbasize = BE_32(capacity_buf[1]); 17547 17548 /* 17549 * Done with capacity_buf 17550 */ 17551 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17552 17553 /* 17554 * if the reported capacity is set to all 0xf's, then 17555 * this disk is too large and requires SBC-2 commands. 17556 * Reissue the request using READ CAPACITY 16. 17557 */ 17558 if (capacity == 0xffffffff) { 17559 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17560 &lbasize, path_flag); 17561 if (status != 0) { 17562 return (status); 17563 } 17564 } 17565 break; /* Success! */ 17566 case EIO: 17567 switch (ucmd_buf.uscsi_status) { 17568 case STATUS_RESERVATION_CONFLICT: 17569 status = EACCES; 17570 break; 17571 case STATUS_CHECK: 17572 /* 17573 * Check condition; look for ASC/ASCQ of 0x04/0x01 17574 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17575 */ 17576 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17577 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17578 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17579 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17580 return (EAGAIN); 17581 } 17582 break; 17583 default: 17584 break; 17585 } 17586 /* FALLTHRU */ 17587 default: 17588 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17589 return (status); 17590 } 17591 17592 /* 17593 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17594 * (2352 and 0 are common) so for these devices always force the value 17595 * to 2048 as required by the ATAPI specs. 17596 */ 17597 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17598 lbasize = 2048; 17599 } 17600 17601 /* 17602 * Get the maximum LBA value from the READ CAPACITY data. 17603 * Here we assume that the Partial Medium Indicator (PMI) bit 17604 * was cleared when issuing the command. This means that the LBA 17605 * returned from the device is the LBA of the last logical block 17606 * on the logical unit. The actual logical block count will be 17607 * this value plus one. 17608 * 17609 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17610 * so scale the capacity value to reflect this. 17611 */ 17612 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17613 17614 /* 17615 * Copy the values from the READ CAPACITY command into the space 17616 * provided by the caller. 17617 */ 17618 *capp = capacity; 17619 *lbap = lbasize; 17620 17621 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17622 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17623 17624 /* 17625 * Both the lbasize and capacity from the device must be nonzero, 17626 * otherwise we assume that the values are not valid and return 17627 * failure to the caller. (4203735) 17628 */ 17629 if ((capacity == 0) || (lbasize == 0)) { 17630 return (EIO); 17631 } 17632 17633 return (0); 17634 } 17635 17636 /* 17637 * Function: sd_send_scsi_READ_CAPACITY_16 17638 * 17639 * Description: This routine uses the scsi READ CAPACITY 16 command to 17640 * determine the device capacity in number of blocks and the 17641 * device native block size. If this function returns a failure, 17642 * then the values in *capp and *lbap are undefined. 17643 * This routine should always be called by 17644 * sd_send_scsi_READ_CAPACITY which will appy any device 17645 * specific adjustments to capacity and lbasize. 17646 * 17647 * Arguments: un - ptr to soft state struct for the target 17648 * capp - ptr to unsigned 64-bit variable to receive the 17649 * capacity value from the command. 17650 * lbap - ptr to unsigned 32-bit varaible to receive the 17651 * block size value from the command 17652 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17653 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17654 * to use the USCSI "direct" chain and bypass the normal 17655 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17656 * this command is issued as part of an error recovery 17657 * action. 17658 * 17659 * Return Code: 0 - Success 17660 * EIO - IO error 17661 * EACCES - Reservation conflict detected 17662 * EAGAIN - Device is becoming ready 17663 * errno return code from sd_send_scsi_cmd() 17664 * 17665 * Context: Can sleep. Blocks until command completes. 17666 */ 17667 17668 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17669 17670 static int 17671 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17672 uint32_t *lbap, int path_flag) 17673 { 17674 struct scsi_extended_sense sense_buf; 17675 struct uscsi_cmd ucmd_buf; 17676 union scsi_cdb cdb; 17677 uint64_t *capacity16_buf; 17678 uint64_t capacity; 17679 uint32_t lbasize; 17680 int status; 17681 17682 ASSERT(un != NULL); 17683 ASSERT(!mutex_owned(SD_MUTEX(un))); 17684 ASSERT(capp != NULL); 17685 ASSERT(lbap != NULL); 17686 17687 SD_TRACE(SD_LOG_IO, un, 17688 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17689 17690 /* 17691 * First send a READ_CAPACITY_16 command to the target. 17692 * 17693 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17694 * Medium Indicator bit is cleared. The address field must be 17695 * zero if the PMI bit is zero. 17696 */ 17697 bzero(&cdb, sizeof (cdb)); 17698 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17699 17700 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17701 17702 ucmd_buf.uscsi_cdb = (char *)&cdb; 17703 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17704 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17705 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17706 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17707 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17708 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17709 ucmd_buf.uscsi_timeout = 60; 17710 17711 /* 17712 * Read Capacity (16) is a Service Action In command. One 17713 * command byte (0x9E) is overloaded for multiple operations, 17714 * with the second CDB byte specifying the desired operation 17715 */ 17716 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17717 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17718 17719 /* 17720 * Fill in allocation length field 17721 */ 17722 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17723 17724 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17725 UIO_SYSSPACE, path_flag); 17726 17727 switch (status) { 17728 case 0: 17729 /* Return failure if we did not get valid capacity data. */ 17730 if (ucmd_buf.uscsi_resid > 20) { 17731 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17732 return (EIO); 17733 } 17734 17735 /* 17736 * Read capacity and block size from the READ CAPACITY 10 data. 17737 * This data may be adjusted later due to device specific 17738 * issues. 17739 * 17740 * According to the SCSI spec, the READ CAPACITY 10 17741 * command returns the following: 17742 * 17743 * bytes 0-7: Maximum logical block address available. 17744 * (MSB in byte:0 & LSB in byte:7) 17745 * 17746 * bytes 8-11: Block length in bytes 17747 * (MSB in byte:8 & LSB in byte:11) 17748 * 17749 */ 17750 capacity = BE_64(capacity16_buf[0]); 17751 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17752 17753 /* 17754 * Done with capacity16_buf 17755 */ 17756 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17757 17758 /* 17759 * if the reported capacity is set to all 0xf's, then 17760 * this disk is too large. This could only happen with 17761 * a device that supports LBAs larger than 64 bits which 17762 * are not defined by any current T10 standards. 17763 */ 17764 if (capacity == 0xffffffffffffffff) { 17765 return (EIO); 17766 } 17767 break; /* Success! */ 17768 case EIO: 17769 switch (ucmd_buf.uscsi_status) { 17770 case STATUS_RESERVATION_CONFLICT: 17771 status = EACCES; 17772 break; 17773 case STATUS_CHECK: 17774 /* 17775 * Check condition; look for ASC/ASCQ of 0x04/0x01 17776 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17777 */ 17778 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17779 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17780 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17781 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17782 return (EAGAIN); 17783 } 17784 break; 17785 default: 17786 break; 17787 } 17788 /* FALLTHRU */ 17789 default: 17790 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17791 return (status); 17792 } 17793 17794 *capp = capacity; 17795 *lbap = lbasize; 17796 17797 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17798 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17799 17800 return (0); 17801 } 17802 17803 17804 /* 17805 * Function: sd_send_scsi_START_STOP_UNIT 17806 * 17807 * Description: Issue a scsi START STOP UNIT command to the target. 17808 * 17809 * Arguments: un - pointer to driver soft state (unit) structure for 17810 * this target. 17811 * flag - SD_TARGET_START 17812 * SD_TARGET_STOP 17813 * SD_TARGET_EJECT 17814 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17815 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17816 * to use the USCSI "direct" chain and bypass the normal 17817 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17818 * command is issued as part of an error recovery action. 17819 * 17820 * Return Code: 0 - Success 17821 * EIO - IO error 17822 * EACCES - Reservation conflict detected 17823 * ENXIO - Not Ready, medium not present 17824 * errno return code from sd_send_scsi_cmd() 17825 * 17826 * Context: Can sleep. 17827 */ 17828 17829 static int 17830 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17831 { 17832 struct scsi_extended_sense sense_buf; 17833 union scsi_cdb cdb; 17834 struct uscsi_cmd ucmd_buf; 17835 int status; 17836 17837 ASSERT(un != NULL); 17838 ASSERT(!mutex_owned(SD_MUTEX(un))); 17839 17840 SD_TRACE(SD_LOG_IO, un, 17841 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17842 17843 if (un->un_f_check_start_stop && 17844 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17845 (un->un_f_start_stop_supported != TRUE)) { 17846 return (0); 17847 } 17848 17849 /* 17850 * If we are performing an eject operation and 17851 * we receive any command other than SD_TARGET_EJECT 17852 * we should immediately return. 17853 */ 17854 if (flag != SD_TARGET_EJECT) { 17855 mutex_enter(SD_MUTEX(un)); 17856 if (un->un_f_ejecting == TRUE) { 17857 mutex_exit(SD_MUTEX(un)); 17858 return (EAGAIN); 17859 } 17860 mutex_exit(SD_MUTEX(un)); 17861 } 17862 17863 bzero(&cdb, sizeof (cdb)); 17864 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17865 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17866 17867 cdb.scc_cmd = SCMD_START_STOP; 17868 cdb.cdb_opaque[4] = (uchar_t)flag; 17869 17870 ucmd_buf.uscsi_cdb = (char *)&cdb; 17871 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17872 ucmd_buf.uscsi_bufaddr = NULL; 17873 ucmd_buf.uscsi_buflen = 0; 17874 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17875 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17876 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17877 ucmd_buf.uscsi_timeout = 200; 17878 17879 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17880 UIO_SYSSPACE, path_flag); 17881 17882 switch (status) { 17883 case 0: 17884 break; /* Success! */ 17885 case EIO: 17886 switch (ucmd_buf.uscsi_status) { 17887 case STATUS_RESERVATION_CONFLICT: 17888 status = EACCES; 17889 break; 17890 case STATUS_CHECK: 17891 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17892 switch (scsi_sense_key( 17893 (uint8_t *)&sense_buf)) { 17894 case KEY_ILLEGAL_REQUEST: 17895 status = ENOTSUP; 17896 break; 17897 case KEY_NOT_READY: 17898 if (scsi_sense_asc( 17899 (uint8_t *)&sense_buf) 17900 == 0x3A) { 17901 status = ENXIO; 17902 } 17903 break; 17904 default: 17905 break; 17906 } 17907 } 17908 break; 17909 default: 17910 break; 17911 } 17912 break; 17913 default: 17914 break; 17915 } 17916 17917 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17918 17919 return (status); 17920 } 17921 17922 17923 /* 17924 * Function: sd_start_stop_unit_callback 17925 * 17926 * Description: timeout(9F) callback to begin recovery process for a 17927 * device that has spun down. 17928 * 17929 * Arguments: arg - pointer to associated softstate struct. 17930 * 17931 * Context: Executes in a timeout(9F) thread context 17932 */ 17933 17934 static void 17935 sd_start_stop_unit_callback(void *arg) 17936 { 17937 struct sd_lun *un = arg; 17938 ASSERT(un != NULL); 17939 ASSERT(!mutex_owned(SD_MUTEX(un))); 17940 17941 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17942 17943 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17944 } 17945 17946 17947 /* 17948 * Function: sd_start_stop_unit_task 17949 * 17950 * Description: Recovery procedure when a drive is spun down. 17951 * 17952 * Arguments: arg - pointer to associated softstate struct. 17953 * 17954 * Context: Executes in a taskq() thread context 17955 */ 17956 17957 static void 17958 sd_start_stop_unit_task(void *arg) 17959 { 17960 struct sd_lun *un = arg; 17961 17962 ASSERT(un != NULL); 17963 ASSERT(!mutex_owned(SD_MUTEX(un))); 17964 17965 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17966 17967 /* 17968 * Some unformatted drives report not ready error, no need to 17969 * restart if format has been initiated. 17970 */ 17971 mutex_enter(SD_MUTEX(un)); 17972 if (un->un_f_format_in_progress == TRUE) { 17973 mutex_exit(SD_MUTEX(un)); 17974 return; 17975 } 17976 mutex_exit(SD_MUTEX(un)); 17977 17978 /* 17979 * When a START STOP command is issued from here, it is part of a 17980 * failure recovery operation and must be issued before any other 17981 * commands, including any pending retries. Thus it must be sent 17982 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17983 * succeeds or not, we will start I/O after the attempt. 17984 */ 17985 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17986 SD_PATH_DIRECT_PRIORITY); 17987 17988 /* 17989 * The above call blocks until the START_STOP_UNIT command completes. 17990 * Now that it has completed, we must re-try the original IO that 17991 * received the NOT READY condition in the first place. There are 17992 * three possible conditions here: 17993 * 17994 * (1) The original IO is on un_retry_bp. 17995 * (2) The original IO is on the regular wait queue, and un_retry_bp 17996 * is NULL. 17997 * (3) The original IO is on the regular wait queue, and un_retry_bp 17998 * points to some other, unrelated bp. 17999 * 18000 * For each case, we must call sd_start_cmds() with un_retry_bp 18001 * as the argument. If un_retry_bp is NULL, this will initiate 18002 * processing of the regular wait queue. If un_retry_bp is not NULL, 18003 * then this will process the bp on un_retry_bp. That may or may not 18004 * be the original IO, but that does not matter: the important thing 18005 * is to keep the IO processing going at this point. 18006 * 18007 * Note: This is a very specific error recovery sequence associated 18008 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18009 * serialize the I/O with completion of the spin-up. 18010 */ 18011 mutex_enter(SD_MUTEX(un)); 18012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18013 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18014 un, un->un_retry_bp); 18015 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18016 sd_start_cmds(un, un->un_retry_bp); 18017 mutex_exit(SD_MUTEX(un)); 18018 18019 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18020 } 18021 18022 18023 /* 18024 * Function: sd_send_scsi_INQUIRY 18025 * 18026 * Description: Issue the scsi INQUIRY command. 18027 * 18028 * Arguments: un 18029 * bufaddr 18030 * buflen 18031 * evpd 18032 * page_code 18033 * page_length 18034 * 18035 * Return Code: 0 - Success 18036 * errno return code from sd_send_scsi_cmd() 18037 * 18038 * Context: Can sleep. Does not return until command is completed. 18039 */ 18040 18041 static int 18042 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18043 uchar_t evpd, uchar_t page_code, size_t *residp) 18044 { 18045 union scsi_cdb cdb; 18046 struct uscsi_cmd ucmd_buf; 18047 int status; 18048 18049 ASSERT(un != NULL); 18050 ASSERT(!mutex_owned(SD_MUTEX(un))); 18051 ASSERT(bufaddr != NULL); 18052 18053 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18054 18055 bzero(&cdb, sizeof (cdb)); 18056 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18057 bzero(bufaddr, buflen); 18058 18059 cdb.scc_cmd = SCMD_INQUIRY; 18060 cdb.cdb_opaque[1] = evpd; 18061 cdb.cdb_opaque[2] = page_code; 18062 FORMG0COUNT(&cdb, buflen); 18063 18064 ucmd_buf.uscsi_cdb = (char *)&cdb; 18065 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18066 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18067 ucmd_buf.uscsi_buflen = buflen; 18068 ucmd_buf.uscsi_rqbuf = NULL; 18069 ucmd_buf.uscsi_rqlen = 0; 18070 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18071 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18072 18073 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18074 UIO_SYSSPACE, SD_PATH_DIRECT); 18075 18076 if ((status == 0) && (residp != NULL)) { 18077 *residp = ucmd_buf.uscsi_resid; 18078 } 18079 18080 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18081 18082 return (status); 18083 } 18084 18085 18086 /* 18087 * Function: sd_send_scsi_TEST_UNIT_READY 18088 * 18089 * Description: Issue the scsi TEST UNIT READY command. 18090 * This routine can be told to set the flag USCSI_DIAGNOSE to 18091 * prevent retrying failed commands. Use this when the intent 18092 * is either to check for device readiness, to clear a Unit 18093 * Attention, or to clear any outstanding sense data. 18094 * However under specific conditions the expected behavior 18095 * is for retries to bring a device ready, so use the flag 18096 * with caution. 18097 * 18098 * Arguments: un 18099 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18100 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18101 * 0: dont check for media present, do retries on cmd. 18102 * 18103 * Return Code: 0 - Success 18104 * EIO - IO error 18105 * EACCES - Reservation conflict detected 18106 * ENXIO - Not Ready, medium not present 18107 * errno return code from sd_send_scsi_cmd() 18108 * 18109 * Context: Can sleep. Does not return until command is completed. 18110 */ 18111 18112 static int 18113 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18114 { 18115 struct scsi_extended_sense sense_buf; 18116 union scsi_cdb cdb; 18117 struct uscsi_cmd ucmd_buf; 18118 int status; 18119 18120 ASSERT(un != NULL); 18121 ASSERT(!mutex_owned(SD_MUTEX(un))); 18122 18123 SD_TRACE(SD_LOG_IO, un, 18124 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18125 18126 /* 18127 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18128 * timeouts when they receive a TUR and the queue is not empty. Check 18129 * the configuration flag set during attach (indicating the drive has 18130 * this firmware bug) and un_ncmds_in_transport before issuing the 18131 * TUR. If there are 18132 * pending commands return success, this is a bit arbitrary but is ok 18133 * for non-removables (i.e. the eliteI disks) and non-clustering 18134 * configurations. 18135 */ 18136 if (un->un_f_cfg_tur_check == TRUE) { 18137 mutex_enter(SD_MUTEX(un)); 18138 if (un->un_ncmds_in_transport != 0) { 18139 mutex_exit(SD_MUTEX(un)); 18140 return (0); 18141 } 18142 mutex_exit(SD_MUTEX(un)); 18143 } 18144 18145 bzero(&cdb, sizeof (cdb)); 18146 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18147 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18148 18149 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18150 18151 ucmd_buf.uscsi_cdb = (char *)&cdb; 18152 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18153 ucmd_buf.uscsi_bufaddr = NULL; 18154 ucmd_buf.uscsi_buflen = 0; 18155 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18156 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18157 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18158 18159 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18160 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18161 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18162 } 18163 ucmd_buf.uscsi_timeout = 60; 18164 18165 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18166 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18167 SD_PATH_STANDARD)); 18168 18169 switch (status) { 18170 case 0: 18171 break; /* Success! */ 18172 case EIO: 18173 switch (ucmd_buf.uscsi_status) { 18174 case STATUS_RESERVATION_CONFLICT: 18175 status = EACCES; 18176 break; 18177 case STATUS_CHECK: 18178 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18179 break; 18180 } 18181 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18182 (scsi_sense_key((uint8_t *)&sense_buf) == 18183 KEY_NOT_READY) && 18184 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18185 status = ENXIO; 18186 } 18187 break; 18188 default: 18189 break; 18190 } 18191 break; 18192 default: 18193 break; 18194 } 18195 18196 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18197 18198 return (status); 18199 } 18200 18201 18202 /* 18203 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18204 * 18205 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18206 * 18207 * Arguments: un 18208 * 18209 * Return Code: 0 - Success 18210 * EACCES 18211 * ENOTSUP 18212 * errno return code from sd_send_scsi_cmd() 18213 * 18214 * Context: Can sleep. Does not return until command is completed. 18215 */ 18216 18217 static int 18218 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18219 uint16_t data_len, uchar_t *data_bufp) 18220 { 18221 struct scsi_extended_sense sense_buf; 18222 union scsi_cdb cdb; 18223 struct uscsi_cmd ucmd_buf; 18224 int status; 18225 int no_caller_buf = FALSE; 18226 18227 ASSERT(un != NULL); 18228 ASSERT(!mutex_owned(SD_MUTEX(un))); 18229 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18230 18231 SD_TRACE(SD_LOG_IO, un, 18232 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18233 18234 bzero(&cdb, sizeof (cdb)); 18235 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18236 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18237 if (data_bufp == NULL) { 18238 /* Allocate a default buf if the caller did not give one */ 18239 ASSERT(data_len == 0); 18240 data_len = MHIOC_RESV_KEY_SIZE; 18241 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18242 no_caller_buf = TRUE; 18243 } 18244 18245 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18246 cdb.cdb_opaque[1] = usr_cmd; 18247 FORMG1COUNT(&cdb, data_len); 18248 18249 ucmd_buf.uscsi_cdb = (char *)&cdb; 18250 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18251 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18252 ucmd_buf.uscsi_buflen = data_len; 18253 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18254 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18255 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18256 ucmd_buf.uscsi_timeout = 60; 18257 18258 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18259 UIO_SYSSPACE, SD_PATH_STANDARD); 18260 18261 switch (status) { 18262 case 0: 18263 break; /* Success! */ 18264 case EIO: 18265 switch (ucmd_buf.uscsi_status) { 18266 case STATUS_RESERVATION_CONFLICT: 18267 status = EACCES; 18268 break; 18269 case STATUS_CHECK: 18270 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18271 (scsi_sense_key((uint8_t *)&sense_buf) == 18272 KEY_ILLEGAL_REQUEST)) { 18273 status = ENOTSUP; 18274 } 18275 break; 18276 default: 18277 break; 18278 } 18279 break; 18280 default: 18281 break; 18282 } 18283 18284 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18285 18286 if (no_caller_buf == TRUE) { 18287 kmem_free(data_bufp, data_len); 18288 } 18289 18290 return (status); 18291 } 18292 18293 18294 /* 18295 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18296 * 18297 * Description: This routine is the driver entry point for handling CD-ROM 18298 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18299 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18300 * device. 18301 * 18302 * Arguments: un - Pointer to soft state struct for the target. 18303 * usr_cmd SCSI-3 reservation facility command (one of 18304 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18305 * SD_SCSI3_PREEMPTANDABORT) 18306 * usr_bufp - user provided pointer register, reserve descriptor or 18307 * preempt and abort structure (mhioc_register_t, 18308 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18309 * 18310 * Return Code: 0 - Success 18311 * EACCES 18312 * ENOTSUP 18313 * errno return code from sd_send_scsi_cmd() 18314 * 18315 * Context: Can sleep. Does not return until command is completed. 18316 */ 18317 18318 static int 18319 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18320 uchar_t *usr_bufp) 18321 { 18322 struct scsi_extended_sense sense_buf; 18323 union scsi_cdb cdb; 18324 struct uscsi_cmd ucmd_buf; 18325 int status; 18326 uchar_t data_len = sizeof (sd_prout_t); 18327 sd_prout_t *prp; 18328 18329 ASSERT(un != NULL); 18330 ASSERT(!mutex_owned(SD_MUTEX(un))); 18331 ASSERT(data_len == 24); /* required by scsi spec */ 18332 18333 SD_TRACE(SD_LOG_IO, un, 18334 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18335 18336 if (usr_bufp == NULL) { 18337 return (EINVAL); 18338 } 18339 18340 bzero(&cdb, sizeof (cdb)); 18341 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18342 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18343 prp = kmem_zalloc(data_len, KM_SLEEP); 18344 18345 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18346 cdb.cdb_opaque[1] = usr_cmd; 18347 FORMG1COUNT(&cdb, data_len); 18348 18349 ucmd_buf.uscsi_cdb = (char *)&cdb; 18350 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18351 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18352 ucmd_buf.uscsi_buflen = data_len; 18353 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18354 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18355 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18356 ucmd_buf.uscsi_timeout = 60; 18357 18358 switch (usr_cmd) { 18359 case SD_SCSI3_REGISTER: { 18360 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18361 18362 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18363 bcopy(ptr->newkey.key, prp->service_key, 18364 MHIOC_RESV_KEY_SIZE); 18365 prp->aptpl = ptr->aptpl; 18366 break; 18367 } 18368 case SD_SCSI3_RESERVE: 18369 case SD_SCSI3_RELEASE: { 18370 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18371 18372 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18373 prp->scope_address = BE_32(ptr->scope_specific_addr); 18374 cdb.cdb_opaque[2] = ptr->type; 18375 break; 18376 } 18377 case SD_SCSI3_PREEMPTANDABORT: { 18378 mhioc_preemptandabort_t *ptr = 18379 (mhioc_preemptandabort_t *)usr_bufp; 18380 18381 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18382 bcopy(ptr->victim_key.key, prp->service_key, 18383 MHIOC_RESV_KEY_SIZE); 18384 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18385 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18386 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18387 break; 18388 } 18389 case SD_SCSI3_REGISTERANDIGNOREKEY: 18390 { 18391 mhioc_registerandignorekey_t *ptr; 18392 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18393 bcopy(ptr->newkey.key, 18394 prp->service_key, MHIOC_RESV_KEY_SIZE); 18395 prp->aptpl = ptr->aptpl; 18396 break; 18397 } 18398 default: 18399 ASSERT(FALSE); 18400 break; 18401 } 18402 18403 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18404 UIO_SYSSPACE, SD_PATH_STANDARD); 18405 18406 switch (status) { 18407 case 0: 18408 break; /* Success! */ 18409 case EIO: 18410 switch (ucmd_buf.uscsi_status) { 18411 case STATUS_RESERVATION_CONFLICT: 18412 status = EACCES; 18413 break; 18414 case STATUS_CHECK: 18415 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18416 (scsi_sense_key((uint8_t *)&sense_buf) == 18417 KEY_ILLEGAL_REQUEST)) { 18418 status = ENOTSUP; 18419 } 18420 break; 18421 default: 18422 break; 18423 } 18424 break; 18425 default: 18426 break; 18427 } 18428 18429 kmem_free(prp, data_len); 18430 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18431 return (status); 18432 } 18433 18434 18435 /* 18436 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18437 * 18438 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18439 * 18440 * Arguments: un - pointer to the target's soft state struct 18441 * 18442 * Return Code: 0 - success 18443 * errno-type error code 18444 * 18445 * Context: kernel thread context only. 18446 */ 18447 18448 static int 18449 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18450 { 18451 struct sd_uscsi_info *uip; 18452 struct uscsi_cmd *uscmd; 18453 union scsi_cdb *cdb; 18454 struct buf *bp; 18455 int rval = 0; 18456 18457 SD_TRACE(SD_LOG_IO, un, 18458 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18459 18460 ASSERT(un != NULL); 18461 ASSERT(!mutex_owned(SD_MUTEX(un))); 18462 18463 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18464 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18465 18466 /* 18467 * First get some memory for the uscsi_cmd struct and cdb 18468 * and initialize for SYNCHRONIZE_CACHE cmd. 18469 */ 18470 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18471 uscmd->uscsi_cdblen = CDB_GROUP1; 18472 uscmd->uscsi_cdb = (caddr_t)cdb; 18473 uscmd->uscsi_bufaddr = NULL; 18474 uscmd->uscsi_buflen = 0; 18475 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18476 uscmd->uscsi_rqlen = SENSE_LENGTH; 18477 uscmd->uscsi_rqresid = SENSE_LENGTH; 18478 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18479 uscmd->uscsi_timeout = sd_io_time; 18480 18481 /* 18482 * Allocate an sd_uscsi_info struct and fill it with the info 18483 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18484 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18485 * since we allocate the buf here in this function, we do not 18486 * need to preserve the prior contents of b_private. 18487 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18488 */ 18489 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18490 uip->ui_flags = SD_PATH_DIRECT; 18491 uip->ui_cmdp = uscmd; 18492 18493 bp = getrbuf(KM_SLEEP); 18494 bp->b_private = uip; 18495 18496 /* 18497 * Setup buffer to carry uscsi request. 18498 */ 18499 bp->b_flags = B_BUSY; 18500 bp->b_bcount = 0; 18501 bp->b_blkno = 0; 18502 18503 if (dkc != NULL) { 18504 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18505 uip->ui_dkc = *dkc; 18506 } 18507 18508 bp->b_edev = SD_GET_DEV(un); 18509 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18510 18511 (void) sd_uscsi_strategy(bp); 18512 18513 /* 18514 * If synchronous request, wait for completion 18515 * If async just return and let b_iodone callback 18516 * cleanup. 18517 * NOTE: On return, u_ncmds_in_driver will be decremented, 18518 * but it was also incremented in sd_uscsi_strategy(), so 18519 * we should be ok. 18520 */ 18521 if (dkc == NULL) { 18522 (void) biowait(bp); 18523 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18524 } 18525 18526 return (rval); 18527 } 18528 18529 18530 static int 18531 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18532 { 18533 struct sd_uscsi_info *uip; 18534 struct uscsi_cmd *uscmd; 18535 uint8_t *sense_buf; 18536 struct sd_lun *un; 18537 int status; 18538 18539 uip = (struct sd_uscsi_info *)(bp->b_private); 18540 ASSERT(uip != NULL); 18541 18542 uscmd = uip->ui_cmdp; 18543 ASSERT(uscmd != NULL); 18544 18545 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18546 ASSERT(sense_buf != NULL); 18547 18548 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18549 ASSERT(un != NULL); 18550 18551 status = geterror(bp); 18552 switch (status) { 18553 case 0: 18554 break; /* Success! */ 18555 case EIO: 18556 switch (uscmd->uscsi_status) { 18557 case STATUS_RESERVATION_CONFLICT: 18558 /* Ignore reservation conflict */ 18559 status = 0; 18560 goto done; 18561 18562 case STATUS_CHECK: 18563 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18564 (scsi_sense_key(sense_buf) == 18565 KEY_ILLEGAL_REQUEST)) { 18566 /* Ignore Illegal Request error */ 18567 mutex_enter(SD_MUTEX(un)); 18568 un->un_f_sync_cache_supported = FALSE; 18569 mutex_exit(SD_MUTEX(un)); 18570 status = ENOTSUP; 18571 goto done; 18572 } 18573 break; 18574 default: 18575 break; 18576 } 18577 /* FALLTHRU */ 18578 default: 18579 /* 18580 * Don't log an error message if this device 18581 * has removable media. 18582 */ 18583 if (!un->un_f_has_removable_media) { 18584 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18585 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18586 } 18587 break; 18588 } 18589 18590 done: 18591 if (uip->ui_dkc.dkc_callback != NULL) { 18592 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18593 } 18594 18595 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18596 freerbuf(bp); 18597 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18598 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18599 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18600 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18601 18602 return (status); 18603 } 18604 18605 18606 /* 18607 * Function: sd_send_scsi_GET_CONFIGURATION 18608 * 18609 * Description: Issues the get configuration command to the device. 18610 * Called from sd_check_for_writable_cd & sd_get_media_info 18611 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18612 * Arguments: un 18613 * ucmdbuf 18614 * rqbuf 18615 * rqbuflen 18616 * bufaddr 18617 * buflen 18618 * path_flag 18619 * 18620 * Return Code: 0 - Success 18621 * errno return code from sd_send_scsi_cmd() 18622 * 18623 * Context: Can sleep. Does not return until command is completed. 18624 * 18625 */ 18626 18627 static int 18628 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18629 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18630 int path_flag) 18631 { 18632 char cdb[CDB_GROUP1]; 18633 int status; 18634 18635 ASSERT(un != NULL); 18636 ASSERT(!mutex_owned(SD_MUTEX(un))); 18637 ASSERT(bufaddr != NULL); 18638 ASSERT(ucmdbuf != NULL); 18639 ASSERT(rqbuf != NULL); 18640 18641 SD_TRACE(SD_LOG_IO, un, 18642 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18643 18644 bzero(cdb, sizeof (cdb)); 18645 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18646 bzero(rqbuf, rqbuflen); 18647 bzero(bufaddr, buflen); 18648 18649 /* 18650 * Set up cdb field for the get configuration command. 18651 */ 18652 cdb[0] = SCMD_GET_CONFIGURATION; 18653 cdb[1] = 0x02; /* Requested Type */ 18654 cdb[8] = SD_PROFILE_HEADER_LEN; 18655 ucmdbuf->uscsi_cdb = cdb; 18656 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18657 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18658 ucmdbuf->uscsi_buflen = buflen; 18659 ucmdbuf->uscsi_timeout = sd_io_time; 18660 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18661 ucmdbuf->uscsi_rqlen = rqbuflen; 18662 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18663 18664 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18665 UIO_SYSSPACE, path_flag); 18666 18667 switch (status) { 18668 case 0: 18669 break; /* Success! */ 18670 case EIO: 18671 switch (ucmdbuf->uscsi_status) { 18672 case STATUS_RESERVATION_CONFLICT: 18673 status = EACCES; 18674 break; 18675 default: 18676 break; 18677 } 18678 break; 18679 default: 18680 break; 18681 } 18682 18683 if (status == 0) { 18684 SD_DUMP_MEMORY(un, SD_LOG_IO, 18685 "sd_send_scsi_GET_CONFIGURATION: data", 18686 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18687 } 18688 18689 SD_TRACE(SD_LOG_IO, un, 18690 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18691 18692 return (status); 18693 } 18694 18695 /* 18696 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18697 * 18698 * Description: Issues the get configuration command to the device to 18699 * retrieve a specific feature. Called from 18700 * sd_check_for_writable_cd & sd_set_mmc_caps. 18701 * Arguments: un 18702 * ucmdbuf 18703 * rqbuf 18704 * rqbuflen 18705 * bufaddr 18706 * buflen 18707 * feature 18708 * 18709 * Return Code: 0 - Success 18710 * errno return code from sd_send_scsi_cmd() 18711 * 18712 * Context: Can sleep. Does not return until command is completed. 18713 * 18714 */ 18715 static int 18716 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18717 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18718 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18719 { 18720 char cdb[CDB_GROUP1]; 18721 int status; 18722 18723 ASSERT(un != NULL); 18724 ASSERT(!mutex_owned(SD_MUTEX(un))); 18725 ASSERT(bufaddr != NULL); 18726 ASSERT(ucmdbuf != NULL); 18727 ASSERT(rqbuf != NULL); 18728 18729 SD_TRACE(SD_LOG_IO, un, 18730 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18731 18732 bzero(cdb, sizeof (cdb)); 18733 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18734 bzero(rqbuf, rqbuflen); 18735 bzero(bufaddr, buflen); 18736 18737 /* 18738 * Set up cdb field for the get configuration command. 18739 */ 18740 cdb[0] = SCMD_GET_CONFIGURATION; 18741 cdb[1] = 0x02; /* Requested Type */ 18742 cdb[3] = feature; 18743 cdb[8] = buflen; 18744 ucmdbuf->uscsi_cdb = cdb; 18745 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18746 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18747 ucmdbuf->uscsi_buflen = buflen; 18748 ucmdbuf->uscsi_timeout = sd_io_time; 18749 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18750 ucmdbuf->uscsi_rqlen = rqbuflen; 18751 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18752 18753 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18754 UIO_SYSSPACE, path_flag); 18755 18756 switch (status) { 18757 case 0: 18758 break; /* Success! */ 18759 case EIO: 18760 switch (ucmdbuf->uscsi_status) { 18761 case STATUS_RESERVATION_CONFLICT: 18762 status = EACCES; 18763 break; 18764 default: 18765 break; 18766 } 18767 break; 18768 default: 18769 break; 18770 } 18771 18772 if (status == 0) { 18773 SD_DUMP_MEMORY(un, SD_LOG_IO, 18774 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18775 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18776 } 18777 18778 SD_TRACE(SD_LOG_IO, un, 18779 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18780 18781 return (status); 18782 } 18783 18784 18785 /* 18786 * Function: sd_send_scsi_MODE_SENSE 18787 * 18788 * Description: Utility function for issuing a scsi MODE SENSE command. 18789 * Note: This routine uses a consistent implementation for Group0, 18790 * Group1, and Group2 commands across all platforms. ATAPI devices 18791 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18792 * 18793 * Arguments: un - pointer to the softstate struct for the target. 18794 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18795 * CDB_GROUP[1|2] (10 byte). 18796 * bufaddr - buffer for page data retrieved from the target. 18797 * buflen - size of page to be retrieved. 18798 * page_code - page code of data to be retrieved from the target. 18799 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18800 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18801 * to use the USCSI "direct" chain and bypass the normal 18802 * command waitq. 18803 * 18804 * Return Code: 0 - Success 18805 * errno return code from sd_send_scsi_cmd() 18806 * 18807 * Context: Can sleep. Does not return until command is completed. 18808 */ 18809 18810 static int 18811 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18812 size_t buflen, uchar_t page_code, int path_flag) 18813 { 18814 struct scsi_extended_sense sense_buf; 18815 union scsi_cdb cdb; 18816 struct uscsi_cmd ucmd_buf; 18817 int status; 18818 int headlen; 18819 18820 ASSERT(un != NULL); 18821 ASSERT(!mutex_owned(SD_MUTEX(un))); 18822 ASSERT(bufaddr != NULL); 18823 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18824 (cdbsize == CDB_GROUP2)); 18825 18826 SD_TRACE(SD_LOG_IO, un, 18827 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18828 18829 bzero(&cdb, sizeof (cdb)); 18830 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18831 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18832 bzero(bufaddr, buflen); 18833 18834 if (cdbsize == CDB_GROUP0) { 18835 cdb.scc_cmd = SCMD_MODE_SENSE; 18836 cdb.cdb_opaque[2] = page_code; 18837 FORMG0COUNT(&cdb, buflen); 18838 headlen = MODE_HEADER_LENGTH; 18839 } else { 18840 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18841 cdb.cdb_opaque[2] = page_code; 18842 FORMG1COUNT(&cdb, buflen); 18843 headlen = MODE_HEADER_LENGTH_GRP2; 18844 } 18845 18846 ASSERT(headlen <= buflen); 18847 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18848 18849 ucmd_buf.uscsi_cdb = (char *)&cdb; 18850 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18851 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18852 ucmd_buf.uscsi_buflen = buflen; 18853 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18854 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18855 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18856 ucmd_buf.uscsi_timeout = 60; 18857 18858 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18859 UIO_SYSSPACE, path_flag); 18860 18861 switch (status) { 18862 case 0: 18863 /* 18864 * sr_check_wp() uses 0x3f page code and check the header of 18865 * mode page to determine if target device is write-protected. 18866 * But some USB devices return 0 bytes for 0x3f page code. For 18867 * this case, make sure that mode page header is returned at 18868 * least. 18869 */ 18870 if (buflen - ucmd_buf.uscsi_resid < headlen) 18871 status = EIO; 18872 break; /* Success! */ 18873 case EIO: 18874 switch (ucmd_buf.uscsi_status) { 18875 case STATUS_RESERVATION_CONFLICT: 18876 status = EACCES; 18877 break; 18878 default: 18879 break; 18880 } 18881 break; 18882 default: 18883 break; 18884 } 18885 18886 if (status == 0) { 18887 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18888 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18889 } 18890 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18891 18892 return (status); 18893 } 18894 18895 18896 /* 18897 * Function: sd_send_scsi_MODE_SELECT 18898 * 18899 * Description: Utility function for issuing a scsi MODE SELECT command. 18900 * Note: This routine uses a consistent implementation for Group0, 18901 * Group1, and Group2 commands across all platforms. ATAPI devices 18902 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18903 * 18904 * Arguments: un - pointer to the softstate struct for the target. 18905 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18906 * CDB_GROUP[1|2] (10 byte). 18907 * bufaddr - buffer for page data retrieved from the target. 18908 * buflen - size of page to be retrieved. 18909 * save_page - boolean to determin if SP bit should be set. 18910 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18911 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18912 * to use the USCSI "direct" chain and bypass the normal 18913 * command waitq. 18914 * 18915 * Return Code: 0 - Success 18916 * errno return code from sd_send_scsi_cmd() 18917 * 18918 * Context: Can sleep. Does not return until command is completed. 18919 */ 18920 18921 static int 18922 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18923 size_t buflen, uchar_t save_page, int path_flag) 18924 { 18925 struct scsi_extended_sense sense_buf; 18926 union scsi_cdb cdb; 18927 struct uscsi_cmd ucmd_buf; 18928 int status; 18929 18930 ASSERT(un != NULL); 18931 ASSERT(!mutex_owned(SD_MUTEX(un))); 18932 ASSERT(bufaddr != NULL); 18933 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18934 (cdbsize == CDB_GROUP2)); 18935 18936 SD_TRACE(SD_LOG_IO, un, 18937 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18938 18939 bzero(&cdb, sizeof (cdb)); 18940 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18941 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18942 18943 /* Set the PF bit for many third party drives */ 18944 cdb.cdb_opaque[1] = 0x10; 18945 18946 /* Set the savepage(SP) bit if given */ 18947 if (save_page == SD_SAVE_PAGE) { 18948 cdb.cdb_opaque[1] |= 0x01; 18949 } 18950 18951 if (cdbsize == CDB_GROUP0) { 18952 cdb.scc_cmd = SCMD_MODE_SELECT; 18953 FORMG0COUNT(&cdb, buflen); 18954 } else { 18955 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18956 FORMG1COUNT(&cdb, buflen); 18957 } 18958 18959 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18960 18961 ucmd_buf.uscsi_cdb = (char *)&cdb; 18962 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18963 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18964 ucmd_buf.uscsi_buflen = buflen; 18965 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18966 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18967 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18968 ucmd_buf.uscsi_timeout = 60; 18969 18970 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18971 UIO_SYSSPACE, path_flag); 18972 18973 switch (status) { 18974 case 0: 18975 break; /* Success! */ 18976 case EIO: 18977 switch (ucmd_buf.uscsi_status) { 18978 case STATUS_RESERVATION_CONFLICT: 18979 status = EACCES; 18980 break; 18981 default: 18982 break; 18983 } 18984 break; 18985 default: 18986 break; 18987 } 18988 18989 if (status == 0) { 18990 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18991 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18992 } 18993 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18994 18995 return (status); 18996 } 18997 18998 18999 /* 19000 * Function: sd_send_scsi_RDWR 19001 * 19002 * Description: Issue a scsi READ or WRITE command with the given parameters. 19003 * 19004 * Arguments: un: Pointer to the sd_lun struct for the target. 19005 * cmd: SCMD_READ or SCMD_WRITE 19006 * bufaddr: Address of caller's buffer to receive the RDWR data 19007 * buflen: Length of caller's buffer receive the RDWR data. 19008 * start_block: Block number for the start of the RDWR operation. 19009 * (Assumes target-native block size.) 19010 * residp: Pointer to variable to receive the redisual of the 19011 * RDWR operation (may be NULL of no residual requested). 19012 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19013 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19014 * to use the USCSI "direct" chain and bypass the normal 19015 * command waitq. 19016 * 19017 * Return Code: 0 - Success 19018 * errno return code from sd_send_scsi_cmd() 19019 * 19020 * Context: Can sleep. Does not return until command is completed. 19021 */ 19022 19023 static int 19024 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19025 size_t buflen, daddr_t start_block, int path_flag) 19026 { 19027 struct scsi_extended_sense sense_buf; 19028 union scsi_cdb cdb; 19029 struct uscsi_cmd ucmd_buf; 19030 uint32_t block_count; 19031 int status; 19032 int cdbsize; 19033 uchar_t flag; 19034 19035 ASSERT(un != NULL); 19036 ASSERT(!mutex_owned(SD_MUTEX(un))); 19037 ASSERT(bufaddr != NULL); 19038 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19039 19040 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19041 19042 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19043 return (EINVAL); 19044 } 19045 19046 mutex_enter(SD_MUTEX(un)); 19047 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19048 mutex_exit(SD_MUTEX(un)); 19049 19050 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19051 19052 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19053 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19054 bufaddr, buflen, start_block, block_count); 19055 19056 bzero(&cdb, sizeof (cdb)); 19057 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19058 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19059 19060 /* Compute CDB size to use */ 19061 if (start_block > 0xffffffff) 19062 cdbsize = CDB_GROUP4; 19063 else if ((start_block & 0xFFE00000) || 19064 (un->un_f_cfg_is_atapi == TRUE)) 19065 cdbsize = CDB_GROUP1; 19066 else 19067 cdbsize = CDB_GROUP0; 19068 19069 switch (cdbsize) { 19070 case CDB_GROUP0: /* 6-byte CDBs */ 19071 cdb.scc_cmd = cmd; 19072 FORMG0ADDR(&cdb, start_block); 19073 FORMG0COUNT(&cdb, block_count); 19074 break; 19075 case CDB_GROUP1: /* 10-byte CDBs */ 19076 cdb.scc_cmd = cmd | SCMD_GROUP1; 19077 FORMG1ADDR(&cdb, start_block); 19078 FORMG1COUNT(&cdb, block_count); 19079 break; 19080 case CDB_GROUP4: /* 16-byte CDBs */ 19081 cdb.scc_cmd = cmd | SCMD_GROUP4; 19082 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19083 FORMG4COUNT(&cdb, block_count); 19084 break; 19085 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19086 default: 19087 /* All others reserved */ 19088 return (EINVAL); 19089 } 19090 19091 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19092 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19093 19094 ucmd_buf.uscsi_cdb = (char *)&cdb; 19095 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19096 ucmd_buf.uscsi_bufaddr = bufaddr; 19097 ucmd_buf.uscsi_buflen = buflen; 19098 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19099 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19100 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19101 ucmd_buf.uscsi_timeout = 60; 19102 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19103 UIO_SYSSPACE, path_flag); 19104 switch (status) { 19105 case 0: 19106 break; /* Success! */ 19107 case EIO: 19108 switch (ucmd_buf.uscsi_status) { 19109 case STATUS_RESERVATION_CONFLICT: 19110 status = EACCES; 19111 break; 19112 default: 19113 break; 19114 } 19115 break; 19116 default: 19117 break; 19118 } 19119 19120 if (status == 0) { 19121 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19122 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19123 } 19124 19125 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19126 19127 return (status); 19128 } 19129 19130 19131 /* 19132 * Function: sd_send_scsi_LOG_SENSE 19133 * 19134 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19135 * 19136 * Arguments: un: Pointer to the sd_lun struct for the target. 19137 * 19138 * Return Code: 0 - Success 19139 * errno return code from sd_send_scsi_cmd() 19140 * 19141 * Context: Can sleep. Does not return until command is completed. 19142 */ 19143 19144 static int 19145 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19146 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19147 int path_flag) 19148 19149 { 19150 struct scsi_extended_sense sense_buf; 19151 union scsi_cdb cdb; 19152 struct uscsi_cmd ucmd_buf; 19153 int status; 19154 19155 ASSERT(un != NULL); 19156 ASSERT(!mutex_owned(SD_MUTEX(un))); 19157 19158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19159 19160 bzero(&cdb, sizeof (cdb)); 19161 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19162 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19163 19164 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19165 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19166 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19167 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19168 FORMG1COUNT(&cdb, buflen); 19169 19170 ucmd_buf.uscsi_cdb = (char *)&cdb; 19171 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19172 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19173 ucmd_buf.uscsi_buflen = buflen; 19174 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19175 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19176 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19177 ucmd_buf.uscsi_timeout = 60; 19178 19179 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19180 UIO_SYSSPACE, path_flag); 19181 19182 switch (status) { 19183 case 0: 19184 break; 19185 case EIO: 19186 switch (ucmd_buf.uscsi_status) { 19187 case STATUS_RESERVATION_CONFLICT: 19188 status = EACCES; 19189 break; 19190 case STATUS_CHECK: 19191 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19192 (scsi_sense_key((uint8_t *)&sense_buf) == 19193 KEY_ILLEGAL_REQUEST) && 19194 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19195 /* 19196 * ASC 0x24: INVALID FIELD IN CDB 19197 */ 19198 switch (page_code) { 19199 case START_STOP_CYCLE_PAGE: 19200 /* 19201 * The start stop cycle counter is 19202 * implemented as page 0x31 in earlier 19203 * generation disks. In new generation 19204 * disks the start stop cycle counter is 19205 * implemented as page 0xE. To properly 19206 * handle this case if an attempt for 19207 * log page 0xE is made and fails we 19208 * will try again using page 0x31. 19209 * 19210 * Network storage BU committed to 19211 * maintain the page 0x31 for this 19212 * purpose and will not have any other 19213 * page implemented with page code 0x31 19214 * until all disks transition to the 19215 * standard page. 19216 */ 19217 mutex_enter(SD_MUTEX(un)); 19218 un->un_start_stop_cycle_page = 19219 START_STOP_CYCLE_VU_PAGE; 19220 cdb.cdb_opaque[2] = 19221 (char)(page_control << 6) | 19222 un->un_start_stop_cycle_page; 19223 mutex_exit(SD_MUTEX(un)); 19224 status = sd_send_scsi_cmd( 19225 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19226 UIO_SYSSPACE, path_flag); 19227 19228 break; 19229 case TEMPERATURE_PAGE: 19230 status = ENOTTY; 19231 break; 19232 default: 19233 break; 19234 } 19235 } 19236 break; 19237 default: 19238 break; 19239 } 19240 break; 19241 default: 19242 break; 19243 } 19244 19245 if (status == 0) { 19246 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19247 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19248 } 19249 19250 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19251 19252 return (status); 19253 } 19254 19255 19256 /* 19257 * Function: sdioctl 19258 * 19259 * Description: Driver's ioctl(9e) entry point function. 19260 * 19261 * Arguments: dev - device number 19262 * cmd - ioctl operation to be performed 19263 * arg - user argument, contains data to be set or reference 19264 * parameter for get 19265 * flag - bit flag, indicating open settings, 32/64 bit type 19266 * cred_p - user credential pointer 19267 * rval_p - calling process return value (OPT) 19268 * 19269 * Return Code: EINVAL 19270 * ENOTTY 19271 * ENXIO 19272 * EIO 19273 * EFAULT 19274 * ENOTSUP 19275 * EPERM 19276 * 19277 * Context: Called from the device switch at normal priority. 19278 */ 19279 19280 static int 19281 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19282 { 19283 struct sd_lun *un = NULL; 19284 int err = 0; 19285 int i = 0; 19286 cred_t *cr; 19287 int tmprval = EINVAL; 19288 int is_valid; 19289 19290 /* 19291 * All device accesses go thru sdstrategy where we check on suspend 19292 * status 19293 */ 19294 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19295 return (ENXIO); 19296 } 19297 19298 ASSERT(!mutex_owned(SD_MUTEX(un))); 19299 19300 19301 is_valid = SD_IS_VALID_LABEL(un); 19302 19303 /* 19304 * Moved this wait from sd_uscsi_strategy to here for 19305 * reasons of deadlock prevention. Internal driver commands, 19306 * specifically those to change a devices power level, result 19307 * in a call to sd_uscsi_strategy. 19308 */ 19309 mutex_enter(SD_MUTEX(un)); 19310 while ((un->un_state == SD_STATE_SUSPENDED) || 19311 (un->un_state == SD_STATE_PM_CHANGING)) { 19312 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19313 } 19314 /* 19315 * Twiddling the counter here protects commands from now 19316 * through to the top of sd_uscsi_strategy. Without the 19317 * counter inc. a power down, for example, could get in 19318 * after the above check for state is made and before 19319 * execution gets to the top of sd_uscsi_strategy. 19320 * That would cause problems. 19321 */ 19322 un->un_ncmds_in_driver++; 19323 19324 if (!is_valid && 19325 (flag & (FNDELAY | FNONBLOCK))) { 19326 switch (cmd) { 19327 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19328 case DKIOCGVTOC: 19329 case DKIOCGAPART: 19330 case DKIOCPARTINFO: 19331 case DKIOCSGEOM: 19332 case DKIOCSAPART: 19333 case DKIOCGETEFI: 19334 case DKIOCPARTITION: 19335 case DKIOCSVTOC: 19336 case DKIOCSETEFI: 19337 case DKIOCGMBOOT: 19338 case DKIOCSMBOOT: 19339 case DKIOCG_PHYGEOM: 19340 case DKIOCG_VIRTGEOM: 19341 /* let cmlb handle it */ 19342 goto skip_ready_valid; 19343 19344 case CDROMPAUSE: 19345 case CDROMRESUME: 19346 case CDROMPLAYMSF: 19347 case CDROMPLAYTRKIND: 19348 case CDROMREADTOCHDR: 19349 case CDROMREADTOCENTRY: 19350 case CDROMSTOP: 19351 case CDROMSTART: 19352 case CDROMVOLCTRL: 19353 case CDROMSUBCHNL: 19354 case CDROMREADMODE2: 19355 case CDROMREADMODE1: 19356 case CDROMREADOFFSET: 19357 case CDROMSBLKMODE: 19358 case CDROMGBLKMODE: 19359 case CDROMGDRVSPEED: 19360 case CDROMSDRVSPEED: 19361 case CDROMCDDA: 19362 case CDROMCDXA: 19363 case CDROMSUBCODE: 19364 if (!ISCD(un)) { 19365 un->un_ncmds_in_driver--; 19366 ASSERT(un->un_ncmds_in_driver >= 0); 19367 mutex_exit(SD_MUTEX(un)); 19368 return (ENOTTY); 19369 } 19370 break; 19371 case FDEJECT: 19372 case DKIOCEJECT: 19373 case CDROMEJECT: 19374 if (!un->un_f_eject_media_supported) { 19375 un->un_ncmds_in_driver--; 19376 ASSERT(un->un_ncmds_in_driver >= 0); 19377 mutex_exit(SD_MUTEX(un)); 19378 return (ENOTTY); 19379 } 19380 break; 19381 case DKIOCFLUSHWRITECACHE: 19382 mutex_exit(SD_MUTEX(un)); 19383 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19384 if (err != 0) { 19385 mutex_enter(SD_MUTEX(un)); 19386 un->un_ncmds_in_driver--; 19387 ASSERT(un->un_ncmds_in_driver >= 0); 19388 mutex_exit(SD_MUTEX(un)); 19389 return (EIO); 19390 } 19391 mutex_enter(SD_MUTEX(un)); 19392 /* FALLTHROUGH */ 19393 case DKIOCREMOVABLE: 19394 case DKIOCHOTPLUGGABLE: 19395 case DKIOCINFO: 19396 case DKIOCGMEDIAINFO: 19397 case MHIOCENFAILFAST: 19398 case MHIOCSTATUS: 19399 case MHIOCTKOWN: 19400 case MHIOCRELEASE: 19401 case MHIOCGRP_INKEYS: 19402 case MHIOCGRP_INRESV: 19403 case MHIOCGRP_REGISTER: 19404 case MHIOCGRP_RESERVE: 19405 case MHIOCGRP_PREEMPTANDABORT: 19406 case MHIOCGRP_REGISTERANDIGNOREKEY: 19407 case CDROMCLOSETRAY: 19408 case USCSICMD: 19409 goto skip_ready_valid; 19410 default: 19411 break; 19412 } 19413 19414 mutex_exit(SD_MUTEX(un)); 19415 err = sd_ready_and_valid(un); 19416 mutex_enter(SD_MUTEX(un)); 19417 19418 if (err != SD_READY_VALID) { 19419 switch (cmd) { 19420 case DKIOCSTATE: 19421 case CDROMGDRVSPEED: 19422 case CDROMSDRVSPEED: 19423 case FDEJECT: /* for eject command */ 19424 case DKIOCEJECT: 19425 case CDROMEJECT: 19426 case DKIOCREMOVABLE: 19427 case DKIOCHOTPLUGGABLE: 19428 break; 19429 default: 19430 if (un->un_f_has_removable_media) { 19431 err = ENXIO; 19432 } else { 19433 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19434 if (err == SD_RESERVED_BY_OTHERS) { 19435 err = EACCES; 19436 } else { 19437 err = EIO; 19438 } 19439 } 19440 un->un_ncmds_in_driver--; 19441 ASSERT(un->un_ncmds_in_driver >= 0); 19442 mutex_exit(SD_MUTEX(un)); 19443 return (err); 19444 } 19445 } 19446 } 19447 19448 skip_ready_valid: 19449 mutex_exit(SD_MUTEX(un)); 19450 19451 switch (cmd) { 19452 case DKIOCINFO: 19453 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19454 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19455 break; 19456 19457 case DKIOCGMEDIAINFO: 19458 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19459 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19460 break; 19461 19462 case DKIOCGGEOM: 19463 case DKIOCGVTOC: 19464 case DKIOCGAPART: 19465 case DKIOCPARTINFO: 19466 case DKIOCSGEOM: 19467 case DKIOCSAPART: 19468 case DKIOCGETEFI: 19469 case DKIOCPARTITION: 19470 case DKIOCSVTOC: 19471 case DKIOCSETEFI: 19472 case DKIOCGMBOOT: 19473 case DKIOCSMBOOT: 19474 case DKIOCG_PHYGEOM: 19475 case DKIOCG_VIRTGEOM: 19476 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19477 19478 /* TUR should spin up */ 19479 19480 if (un->un_f_has_removable_media) 19481 err = sd_send_scsi_TEST_UNIT_READY(un, 19482 SD_CHECK_FOR_MEDIA); 19483 else 19484 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19485 19486 if (err != 0) 19487 break; 19488 19489 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19490 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19491 19492 if ((err == 0) && 19493 ((cmd == DKIOCSETEFI) || 19494 (un->un_f_pkstats_enabled) && 19495 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19496 19497 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19498 (void *)SD_PATH_DIRECT); 19499 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19500 sd_set_pstats(un); 19501 SD_TRACE(SD_LOG_IO_PARTITION, un, 19502 "sd_ioctl: un:0x%p pstats created and " 19503 "set\n", un); 19504 } 19505 } 19506 19507 if ((cmd == DKIOCSVTOC) || 19508 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19509 19510 mutex_enter(SD_MUTEX(un)); 19511 if (un->un_f_devid_supported && 19512 (un->un_f_opt_fab_devid == TRUE)) { 19513 if (un->un_devid == NULL) { 19514 sd_register_devid(un, SD_DEVINFO(un), 19515 SD_TARGET_IS_UNRESERVED); 19516 } else { 19517 /* 19518 * The device id for this disk 19519 * has been fabricated. The 19520 * device id must be preserved 19521 * by writing it back out to 19522 * disk. 19523 */ 19524 if (sd_write_deviceid(un) != 0) { 19525 ddi_devid_free(un->un_devid); 19526 un->un_devid = NULL; 19527 } 19528 } 19529 } 19530 mutex_exit(SD_MUTEX(un)); 19531 } 19532 19533 break; 19534 19535 case DKIOCLOCK: 19536 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19537 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19538 SD_PATH_STANDARD); 19539 break; 19540 19541 case DKIOCUNLOCK: 19542 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19543 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19544 SD_PATH_STANDARD); 19545 break; 19546 19547 case DKIOCSTATE: { 19548 enum dkio_state state; 19549 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19550 19551 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19552 err = EFAULT; 19553 } else { 19554 err = sd_check_media(dev, state); 19555 if (err == 0) { 19556 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19557 sizeof (int), flag) != 0) 19558 err = EFAULT; 19559 } 19560 } 19561 break; 19562 } 19563 19564 case DKIOCREMOVABLE: 19565 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19566 i = un->un_f_has_removable_media ? 1 : 0; 19567 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19568 err = EFAULT; 19569 } else { 19570 err = 0; 19571 } 19572 break; 19573 19574 case DKIOCHOTPLUGGABLE: 19575 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19576 i = un->un_f_is_hotpluggable ? 1 : 0; 19577 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19578 err = EFAULT; 19579 } else { 19580 err = 0; 19581 } 19582 break; 19583 19584 case DKIOCGTEMPERATURE: 19585 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19586 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19587 break; 19588 19589 case MHIOCENFAILFAST: 19590 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19591 if ((err = drv_priv(cred_p)) == 0) { 19592 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19593 } 19594 break; 19595 19596 case MHIOCTKOWN: 19597 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19598 if ((err = drv_priv(cred_p)) == 0) { 19599 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19600 } 19601 break; 19602 19603 case MHIOCRELEASE: 19604 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19605 if ((err = drv_priv(cred_p)) == 0) { 19606 err = sd_mhdioc_release(dev); 19607 } 19608 break; 19609 19610 case MHIOCSTATUS: 19611 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19612 if ((err = drv_priv(cred_p)) == 0) { 19613 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19614 case 0: 19615 err = 0; 19616 break; 19617 case EACCES: 19618 *rval_p = 1; 19619 err = 0; 19620 break; 19621 default: 19622 err = EIO; 19623 break; 19624 } 19625 } 19626 break; 19627 19628 case MHIOCQRESERVE: 19629 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19630 if ((err = drv_priv(cred_p)) == 0) { 19631 err = sd_reserve_release(dev, SD_RESERVE); 19632 } 19633 break; 19634 19635 case MHIOCREREGISTERDEVID: 19636 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19637 if (drv_priv(cred_p) == EPERM) { 19638 err = EPERM; 19639 } else if (!un->un_f_devid_supported) { 19640 err = ENOTTY; 19641 } else { 19642 err = sd_mhdioc_register_devid(dev); 19643 } 19644 break; 19645 19646 case MHIOCGRP_INKEYS: 19647 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19648 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19649 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19650 err = ENOTSUP; 19651 } else { 19652 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19653 flag); 19654 } 19655 } 19656 break; 19657 19658 case MHIOCGRP_INRESV: 19659 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19660 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19661 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19662 err = ENOTSUP; 19663 } else { 19664 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19665 } 19666 } 19667 break; 19668 19669 case MHIOCGRP_REGISTER: 19670 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19671 if ((err = drv_priv(cred_p)) != EPERM) { 19672 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19673 err = ENOTSUP; 19674 } else if (arg != NULL) { 19675 mhioc_register_t reg; 19676 if (ddi_copyin((void *)arg, ®, 19677 sizeof (mhioc_register_t), flag) != 0) { 19678 err = EFAULT; 19679 } else { 19680 err = 19681 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19682 un, SD_SCSI3_REGISTER, 19683 (uchar_t *)®); 19684 } 19685 } 19686 } 19687 break; 19688 19689 case MHIOCGRP_RESERVE: 19690 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19691 if ((err = drv_priv(cred_p)) != EPERM) { 19692 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19693 err = ENOTSUP; 19694 } else if (arg != NULL) { 19695 mhioc_resv_desc_t resv_desc; 19696 if (ddi_copyin((void *)arg, &resv_desc, 19697 sizeof (mhioc_resv_desc_t), flag) != 0) { 19698 err = EFAULT; 19699 } else { 19700 err = 19701 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19702 un, SD_SCSI3_RESERVE, 19703 (uchar_t *)&resv_desc); 19704 } 19705 } 19706 } 19707 break; 19708 19709 case MHIOCGRP_PREEMPTANDABORT: 19710 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19711 if ((err = drv_priv(cred_p)) != EPERM) { 19712 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19713 err = ENOTSUP; 19714 } else if (arg != NULL) { 19715 mhioc_preemptandabort_t preempt_abort; 19716 if (ddi_copyin((void *)arg, &preempt_abort, 19717 sizeof (mhioc_preemptandabort_t), 19718 flag) != 0) { 19719 err = EFAULT; 19720 } else { 19721 err = 19722 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19723 un, SD_SCSI3_PREEMPTANDABORT, 19724 (uchar_t *)&preempt_abort); 19725 } 19726 } 19727 } 19728 break; 19729 19730 case MHIOCGRP_REGISTERANDIGNOREKEY: 19731 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 19732 if ((err = drv_priv(cred_p)) != EPERM) { 19733 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19734 err = ENOTSUP; 19735 } else if (arg != NULL) { 19736 mhioc_registerandignorekey_t r_and_i; 19737 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19738 sizeof (mhioc_registerandignorekey_t), 19739 flag) != 0) { 19740 err = EFAULT; 19741 } else { 19742 err = 19743 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19744 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19745 (uchar_t *)&r_and_i); 19746 } 19747 } 19748 } 19749 break; 19750 19751 case USCSICMD: 19752 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19753 cr = ddi_get_cred(); 19754 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19755 err = EPERM; 19756 } else { 19757 enum uio_seg uioseg; 19758 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19759 UIO_USERSPACE; 19760 if (un->un_f_format_in_progress == TRUE) { 19761 err = EAGAIN; 19762 break; 19763 } 19764 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19765 flag, uioseg, SD_PATH_STANDARD); 19766 } 19767 break; 19768 19769 case CDROMPAUSE: 19770 case CDROMRESUME: 19771 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19772 if (!ISCD(un)) { 19773 err = ENOTTY; 19774 } else { 19775 err = sr_pause_resume(dev, cmd); 19776 } 19777 break; 19778 19779 case CDROMPLAYMSF: 19780 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19781 if (!ISCD(un)) { 19782 err = ENOTTY; 19783 } else { 19784 err = sr_play_msf(dev, (caddr_t)arg, flag); 19785 } 19786 break; 19787 19788 case CDROMPLAYTRKIND: 19789 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19790 #if defined(__i386) || defined(__amd64) 19791 /* 19792 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19793 */ 19794 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19795 #else 19796 if (!ISCD(un)) { 19797 #endif 19798 err = ENOTTY; 19799 } else { 19800 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19801 } 19802 break; 19803 19804 case CDROMREADTOCHDR: 19805 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19806 if (!ISCD(un)) { 19807 err = ENOTTY; 19808 } else { 19809 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19810 } 19811 break; 19812 19813 case CDROMREADTOCENTRY: 19814 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19815 if (!ISCD(un)) { 19816 err = ENOTTY; 19817 } else { 19818 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19819 } 19820 break; 19821 19822 case CDROMSTOP: 19823 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19824 if (!ISCD(un)) { 19825 err = ENOTTY; 19826 } else { 19827 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19828 SD_PATH_STANDARD); 19829 } 19830 break; 19831 19832 case CDROMSTART: 19833 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19834 if (!ISCD(un)) { 19835 err = ENOTTY; 19836 } else { 19837 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19838 SD_PATH_STANDARD); 19839 } 19840 break; 19841 19842 case CDROMCLOSETRAY: 19843 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19844 if (!ISCD(un)) { 19845 err = ENOTTY; 19846 } else { 19847 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19848 SD_PATH_STANDARD); 19849 } 19850 break; 19851 19852 case FDEJECT: /* for eject command */ 19853 case DKIOCEJECT: 19854 case CDROMEJECT: 19855 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19856 if (!un->un_f_eject_media_supported) { 19857 err = ENOTTY; 19858 } else { 19859 err = sr_eject(dev); 19860 } 19861 break; 19862 19863 case CDROMVOLCTRL: 19864 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19865 if (!ISCD(un)) { 19866 err = ENOTTY; 19867 } else { 19868 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19869 } 19870 break; 19871 19872 case CDROMSUBCHNL: 19873 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19874 if (!ISCD(un)) { 19875 err = ENOTTY; 19876 } else { 19877 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19878 } 19879 break; 19880 19881 case CDROMREADMODE2: 19882 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19883 if (!ISCD(un)) { 19884 err = ENOTTY; 19885 } else if (un->un_f_cfg_is_atapi == TRUE) { 19886 /* 19887 * If the drive supports READ CD, use that instead of 19888 * switching the LBA size via a MODE SELECT 19889 * Block Descriptor 19890 */ 19891 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19892 } else { 19893 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19894 } 19895 break; 19896 19897 case CDROMREADMODE1: 19898 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19899 if (!ISCD(un)) { 19900 err = ENOTTY; 19901 } else { 19902 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19903 } 19904 break; 19905 19906 case CDROMREADOFFSET: 19907 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19908 if (!ISCD(un)) { 19909 err = ENOTTY; 19910 } else { 19911 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19912 flag); 19913 } 19914 break; 19915 19916 case CDROMSBLKMODE: 19917 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19918 /* 19919 * There is no means of changing block size in case of atapi 19920 * drives, thus return ENOTTY if drive type is atapi 19921 */ 19922 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19923 err = ENOTTY; 19924 } else if (un->un_f_mmc_cap == TRUE) { 19925 19926 /* 19927 * MMC Devices do not support changing the 19928 * logical block size 19929 * 19930 * Note: EINVAL is being returned instead of ENOTTY to 19931 * maintain consistancy with the original mmc 19932 * driver update. 19933 */ 19934 err = EINVAL; 19935 } else { 19936 mutex_enter(SD_MUTEX(un)); 19937 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19938 (un->un_ncmds_in_transport > 0)) { 19939 mutex_exit(SD_MUTEX(un)); 19940 err = EINVAL; 19941 } else { 19942 mutex_exit(SD_MUTEX(un)); 19943 err = sr_change_blkmode(dev, cmd, arg, flag); 19944 } 19945 } 19946 break; 19947 19948 case CDROMGBLKMODE: 19949 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19950 if (!ISCD(un)) { 19951 err = ENOTTY; 19952 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19953 (un->un_f_blockcount_is_valid != FALSE)) { 19954 /* 19955 * Drive is an ATAPI drive so return target block 19956 * size for ATAPI drives since we cannot change the 19957 * blocksize on ATAPI drives. Used primarily to detect 19958 * if an ATAPI cdrom is present. 19959 */ 19960 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19961 sizeof (int), flag) != 0) { 19962 err = EFAULT; 19963 } else { 19964 err = 0; 19965 } 19966 19967 } else { 19968 /* 19969 * Drive supports changing block sizes via a Mode 19970 * Select. 19971 */ 19972 err = sr_change_blkmode(dev, cmd, arg, flag); 19973 } 19974 break; 19975 19976 case CDROMGDRVSPEED: 19977 case CDROMSDRVSPEED: 19978 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19979 if (!ISCD(un)) { 19980 err = ENOTTY; 19981 } else if (un->un_f_mmc_cap == TRUE) { 19982 /* 19983 * Note: In the future the driver implementation 19984 * for getting and 19985 * setting cd speed should entail: 19986 * 1) If non-mmc try the Toshiba mode page 19987 * (sr_change_speed) 19988 * 2) If mmc but no support for Real Time Streaming try 19989 * the SET CD SPEED (0xBB) command 19990 * (sr_atapi_change_speed) 19991 * 3) If mmc and support for Real Time Streaming 19992 * try the GET PERFORMANCE and SET STREAMING 19993 * commands (not yet implemented, 4380808) 19994 */ 19995 /* 19996 * As per recent MMC spec, CD-ROM speed is variable 19997 * and changes with LBA. Since there is no such 19998 * things as drive speed now, fail this ioctl. 19999 * 20000 * Note: EINVAL is returned for consistancy of original 20001 * implementation which included support for getting 20002 * the drive speed of mmc devices but not setting 20003 * the drive speed. Thus EINVAL would be returned 20004 * if a set request was made for an mmc device. 20005 * We no longer support get or set speed for 20006 * mmc but need to remain consistent with regard 20007 * to the error code returned. 20008 */ 20009 err = EINVAL; 20010 } else if (un->un_f_cfg_is_atapi == TRUE) { 20011 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20012 } else { 20013 err = sr_change_speed(dev, cmd, arg, flag); 20014 } 20015 break; 20016 20017 case CDROMCDDA: 20018 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20019 if (!ISCD(un)) { 20020 err = ENOTTY; 20021 } else { 20022 err = sr_read_cdda(dev, (void *)arg, flag); 20023 } 20024 break; 20025 20026 case CDROMCDXA: 20027 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20028 if (!ISCD(un)) { 20029 err = ENOTTY; 20030 } else { 20031 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20032 } 20033 break; 20034 20035 case CDROMSUBCODE: 20036 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20037 if (!ISCD(un)) { 20038 err = ENOTTY; 20039 } else { 20040 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20041 } 20042 break; 20043 20044 20045 #ifdef SDDEBUG 20046 /* RESET/ABORTS testing ioctls */ 20047 case DKIOCRESET: { 20048 int reset_level; 20049 20050 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20051 err = EFAULT; 20052 } else { 20053 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20054 "reset_level = 0x%lx\n", reset_level); 20055 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20056 err = 0; 20057 } else { 20058 err = EIO; 20059 } 20060 } 20061 break; 20062 } 20063 20064 case DKIOCABORT: 20065 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20066 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20067 err = 0; 20068 } else { 20069 err = EIO; 20070 } 20071 break; 20072 #endif 20073 20074 #ifdef SD_FAULT_INJECTION 20075 /* SDIOC FaultInjection testing ioctls */ 20076 case SDIOCSTART: 20077 case SDIOCSTOP: 20078 case SDIOCINSERTPKT: 20079 case SDIOCINSERTXB: 20080 case SDIOCINSERTUN: 20081 case SDIOCINSERTARQ: 20082 case SDIOCPUSH: 20083 case SDIOCRETRIEVE: 20084 case SDIOCRUN: 20085 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20086 "SDIOC detected cmd:0x%X:\n", cmd); 20087 /* call error generator */ 20088 sd_faultinjection_ioctl(cmd, arg, un); 20089 err = 0; 20090 break; 20091 20092 #endif /* SD_FAULT_INJECTION */ 20093 20094 case DKIOCFLUSHWRITECACHE: 20095 { 20096 struct dk_callback *dkc = (struct dk_callback *)arg; 20097 20098 mutex_enter(SD_MUTEX(un)); 20099 if (!un->un_f_sync_cache_supported || 20100 !un->un_f_write_cache_enabled) { 20101 err = un->un_f_sync_cache_supported ? 20102 0 : ENOTSUP; 20103 mutex_exit(SD_MUTEX(un)); 20104 if ((flag & FKIOCTL) && dkc != NULL && 20105 dkc->dkc_callback != NULL) { 20106 (*dkc->dkc_callback)(dkc->dkc_cookie, 20107 err); 20108 /* 20109 * Did callback and reported error. 20110 * Since we did a callback, ioctl 20111 * should return 0. 20112 */ 20113 err = 0; 20114 } 20115 break; 20116 } 20117 mutex_exit(SD_MUTEX(un)); 20118 20119 if ((flag & FKIOCTL) && dkc != NULL && 20120 dkc->dkc_callback != NULL) { 20121 /* async SYNC CACHE request */ 20122 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20123 } else { 20124 /* synchronous SYNC CACHE request */ 20125 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20126 } 20127 } 20128 break; 20129 20130 case DKIOCGETWCE: { 20131 20132 int wce; 20133 20134 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20135 break; 20136 } 20137 20138 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20139 err = EFAULT; 20140 } 20141 break; 20142 } 20143 20144 case DKIOCSETWCE: { 20145 20146 int wce, sync_supported; 20147 20148 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20149 err = EFAULT; 20150 break; 20151 } 20152 20153 /* 20154 * Synchronize multiple threads trying to enable 20155 * or disable the cache via the un_f_wcc_cv 20156 * condition variable. 20157 */ 20158 mutex_enter(SD_MUTEX(un)); 20159 20160 /* 20161 * Don't allow the cache to be enabled if the 20162 * config file has it disabled. 20163 */ 20164 if (un->un_f_opt_disable_cache && wce) { 20165 mutex_exit(SD_MUTEX(un)); 20166 err = EINVAL; 20167 break; 20168 } 20169 20170 /* 20171 * Wait for write cache change in progress 20172 * bit to be clear before proceeding. 20173 */ 20174 while (un->un_f_wcc_inprog) 20175 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20176 20177 un->un_f_wcc_inprog = 1; 20178 20179 if (un->un_f_write_cache_enabled && wce == 0) { 20180 /* 20181 * Disable the write cache. Don't clear 20182 * un_f_write_cache_enabled until after 20183 * the mode select and flush are complete. 20184 */ 20185 sync_supported = un->un_f_sync_cache_supported; 20186 mutex_exit(SD_MUTEX(un)); 20187 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20188 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20189 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20190 } 20191 20192 mutex_enter(SD_MUTEX(un)); 20193 if (err == 0) { 20194 un->un_f_write_cache_enabled = 0; 20195 } 20196 20197 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20198 /* 20199 * Set un_f_write_cache_enabled first, so there is 20200 * no window where the cache is enabled, but the 20201 * bit says it isn't. 20202 */ 20203 un->un_f_write_cache_enabled = 1; 20204 mutex_exit(SD_MUTEX(un)); 20205 20206 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20207 SD_CACHE_ENABLE); 20208 20209 mutex_enter(SD_MUTEX(un)); 20210 20211 if (err) { 20212 un->un_f_write_cache_enabled = 0; 20213 } 20214 } 20215 20216 un->un_f_wcc_inprog = 0; 20217 cv_broadcast(&un->un_wcc_cv); 20218 mutex_exit(SD_MUTEX(un)); 20219 break; 20220 } 20221 20222 default: 20223 err = ENOTTY; 20224 break; 20225 } 20226 mutex_enter(SD_MUTEX(un)); 20227 un->un_ncmds_in_driver--; 20228 ASSERT(un->un_ncmds_in_driver >= 0); 20229 mutex_exit(SD_MUTEX(un)); 20230 20231 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20232 return (err); 20233 } 20234 20235 20236 /* 20237 * Function: sd_dkio_ctrl_info 20238 * 20239 * Description: This routine is the driver entry point for handling controller 20240 * information ioctl requests (DKIOCINFO). 20241 * 20242 * Arguments: dev - the device number 20243 * arg - pointer to user provided dk_cinfo structure 20244 * specifying the controller type and attributes. 20245 * flag - this argument is a pass through to ddi_copyxxx() 20246 * directly from the mode argument of ioctl(). 20247 * 20248 * Return Code: 0 20249 * EFAULT 20250 * ENXIO 20251 */ 20252 20253 static int 20254 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20255 { 20256 struct sd_lun *un = NULL; 20257 struct dk_cinfo *info; 20258 dev_info_t *pdip; 20259 int lun, tgt; 20260 20261 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20262 return (ENXIO); 20263 } 20264 20265 info = (struct dk_cinfo *) 20266 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20267 20268 switch (un->un_ctype) { 20269 case CTYPE_CDROM: 20270 info->dki_ctype = DKC_CDROM; 20271 break; 20272 default: 20273 info->dki_ctype = DKC_SCSI_CCS; 20274 break; 20275 } 20276 pdip = ddi_get_parent(SD_DEVINFO(un)); 20277 info->dki_cnum = ddi_get_instance(pdip); 20278 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20279 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20280 } else { 20281 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20282 DK_DEVLEN - 1); 20283 } 20284 20285 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20286 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20287 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20288 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20289 20290 /* Unit Information */ 20291 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20292 info->dki_slave = ((tgt << 3) | lun); 20293 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20294 DK_DEVLEN - 1); 20295 info->dki_flags = DKI_FMTVOL; 20296 info->dki_partition = SDPART(dev); 20297 20298 /* Max Transfer size of this device in blocks */ 20299 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20300 info->dki_addr = 0; 20301 info->dki_space = 0; 20302 info->dki_prio = 0; 20303 info->dki_vec = 0; 20304 20305 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20306 kmem_free(info, sizeof (struct dk_cinfo)); 20307 return (EFAULT); 20308 } else { 20309 kmem_free(info, sizeof (struct dk_cinfo)); 20310 return (0); 20311 } 20312 } 20313 20314 20315 /* 20316 * Function: sd_get_media_info 20317 * 20318 * Description: This routine is the driver entry point for handling ioctl 20319 * requests for the media type or command set profile used by the 20320 * drive to operate on the media (DKIOCGMEDIAINFO). 20321 * 20322 * Arguments: dev - the device number 20323 * arg - pointer to user provided dk_minfo structure 20324 * specifying the media type, logical block size and 20325 * drive capacity. 20326 * flag - this argument is a pass through to ddi_copyxxx() 20327 * directly from the mode argument of ioctl(). 20328 * 20329 * Return Code: 0 20330 * EACCESS 20331 * EFAULT 20332 * ENXIO 20333 * EIO 20334 */ 20335 20336 static int 20337 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20338 { 20339 struct sd_lun *un = NULL; 20340 struct uscsi_cmd com; 20341 struct scsi_inquiry *sinq; 20342 struct dk_minfo media_info; 20343 u_longlong_t media_capacity; 20344 uint64_t capacity; 20345 uint_t lbasize; 20346 uchar_t *out_data; 20347 uchar_t *rqbuf; 20348 int rval = 0; 20349 int rtn; 20350 20351 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20352 (un->un_state == SD_STATE_OFFLINE)) { 20353 return (ENXIO); 20354 } 20355 20356 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20357 20358 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20359 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20360 20361 /* Issue a TUR to determine if the drive is ready with media present */ 20362 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20363 if (rval == ENXIO) { 20364 goto done; 20365 } 20366 20367 /* Now get configuration data */ 20368 if (ISCD(un)) { 20369 media_info.dki_media_type = DK_CDROM; 20370 20371 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20372 if (un->un_f_mmc_cap == TRUE) { 20373 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20374 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20375 SD_PATH_STANDARD); 20376 20377 if (rtn) { 20378 /* 20379 * Failed for other than an illegal request 20380 * or command not supported 20381 */ 20382 if ((com.uscsi_status == STATUS_CHECK) && 20383 (com.uscsi_rqstatus == STATUS_GOOD)) { 20384 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20385 (rqbuf[12] != 0x20)) { 20386 rval = EIO; 20387 goto done; 20388 } 20389 } 20390 } else { 20391 /* 20392 * The GET CONFIGURATION command succeeded 20393 * so set the media type according to the 20394 * returned data 20395 */ 20396 media_info.dki_media_type = out_data[6]; 20397 media_info.dki_media_type <<= 8; 20398 media_info.dki_media_type |= out_data[7]; 20399 } 20400 } 20401 } else { 20402 /* 20403 * The profile list is not available, so we attempt to identify 20404 * the media type based on the inquiry data 20405 */ 20406 sinq = un->un_sd->sd_inq; 20407 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20408 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20409 /* This is a direct access device or optical disk */ 20410 media_info.dki_media_type = DK_FIXED_DISK; 20411 20412 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20413 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20414 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20415 media_info.dki_media_type = DK_ZIP; 20416 } else if ( 20417 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20418 media_info.dki_media_type = DK_JAZ; 20419 } 20420 } 20421 } else { 20422 /* 20423 * Not a CD, direct access or optical disk so return 20424 * unknown media 20425 */ 20426 media_info.dki_media_type = DK_UNKNOWN; 20427 } 20428 } 20429 20430 /* Now read the capacity so we can provide the lbasize and capacity */ 20431 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20432 SD_PATH_DIRECT)) { 20433 case 0: 20434 break; 20435 case EACCES: 20436 rval = EACCES; 20437 goto done; 20438 default: 20439 rval = EIO; 20440 goto done; 20441 } 20442 20443 media_info.dki_lbsize = lbasize; 20444 media_capacity = capacity; 20445 20446 /* 20447 * sd_send_scsi_READ_CAPACITY() reports capacity in 20448 * un->un_sys_blocksize chunks. So we need to convert it into 20449 * cap.lbasize chunks. 20450 */ 20451 media_capacity *= un->un_sys_blocksize; 20452 media_capacity /= lbasize; 20453 media_info.dki_capacity = media_capacity; 20454 20455 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20456 rval = EFAULT; 20457 /* Put goto. Anybody might add some code below in future */ 20458 goto done; 20459 } 20460 done: 20461 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20462 kmem_free(rqbuf, SENSE_LENGTH); 20463 return (rval); 20464 } 20465 20466 20467 /* 20468 * Function: sd_check_media 20469 * 20470 * Description: This utility routine implements the functionality for the 20471 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20472 * driver state changes from that specified by the user 20473 * (inserted or ejected). For example, if the user specifies 20474 * DKIO_EJECTED and the current media state is inserted this 20475 * routine will immediately return DKIO_INSERTED. However, if the 20476 * current media state is not inserted the user thread will be 20477 * blocked until the drive state changes. If DKIO_NONE is specified 20478 * the user thread will block until a drive state change occurs. 20479 * 20480 * Arguments: dev - the device number 20481 * state - user pointer to a dkio_state, updated with the current 20482 * drive state at return. 20483 * 20484 * Return Code: ENXIO 20485 * EIO 20486 * EAGAIN 20487 * EINTR 20488 */ 20489 20490 static int 20491 sd_check_media(dev_t dev, enum dkio_state state) 20492 { 20493 struct sd_lun *un = NULL; 20494 enum dkio_state prev_state; 20495 opaque_t token = NULL; 20496 int rval = 0; 20497 20498 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20499 return (ENXIO); 20500 } 20501 20502 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20503 20504 mutex_enter(SD_MUTEX(un)); 20505 20506 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20507 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20508 20509 prev_state = un->un_mediastate; 20510 20511 /* is there anything to do? */ 20512 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20513 /* 20514 * submit the request to the scsi_watch service; 20515 * scsi_media_watch_cb() does the real work 20516 */ 20517 mutex_exit(SD_MUTEX(un)); 20518 20519 /* 20520 * This change handles the case where a scsi watch request is 20521 * added to a device that is powered down. To accomplish this 20522 * we power up the device before adding the scsi watch request, 20523 * since the scsi watch sends a TUR directly to the device 20524 * which the device cannot handle if it is powered down. 20525 */ 20526 if (sd_pm_entry(un) != DDI_SUCCESS) { 20527 mutex_enter(SD_MUTEX(un)); 20528 goto done; 20529 } 20530 20531 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20532 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20533 (caddr_t)dev); 20534 20535 sd_pm_exit(un); 20536 20537 mutex_enter(SD_MUTEX(un)); 20538 if (token == NULL) { 20539 rval = EAGAIN; 20540 goto done; 20541 } 20542 20543 /* 20544 * This is a special case IOCTL that doesn't return 20545 * until the media state changes. Routine sdpower 20546 * knows about and handles this so don't count it 20547 * as an active cmd in the driver, which would 20548 * keep the device busy to the pm framework. 20549 * If the count isn't decremented the device can't 20550 * be powered down. 20551 */ 20552 un->un_ncmds_in_driver--; 20553 ASSERT(un->un_ncmds_in_driver >= 0); 20554 20555 /* 20556 * if a prior request had been made, this will be the same 20557 * token, as scsi_watch was designed that way. 20558 */ 20559 un->un_swr_token = token; 20560 un->un_specified_mediastate = state; 20561 20562 /* 20563 * now wait for media change 20564 * we will not be signalled unless mediastate == state but it is 20565 * still better to test for this condition, since there is a 20566 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20567 */ 20568 SD_TRACE(SD_LOG_COMMON, un, 20569 "sd_check_media: waiting for media state change\n"); 20570 while (un->un_mediastate == state) { 20571 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20572 SD_TRACE(SD_LOG_COMMON, un, 20573 "sd_check_media: waiting for media state " 20574 "was interrupted\n"); 20575 un->un_ncmds_in_driver++; 20576 rval = EINTR; 20577 goto done; 20578 } 20579 SD_TRACE(SD_LOG_COMMON, un, 20580 "sd_check_media: received signal, state=%x\n", 20581 un->un_mediastate); 20582 } 20583 /* 20584 * Inc the counter to indicate the device once again 20585 * has an active outstanding cmd. 20586 */ 20587 un->un_ncmds_in_driver++; 20588 } 20589 20590 /* invalidate geometry */ 20591 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20592 sr_ejected(un); 20593 } 20594 20595 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20596 uint64_t capacity; 20597 uint_t lbasize; 20598 20599 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20600 mutex_exit(SD_MUTEX(un)); 20601 /* 20602 * Since the following routines use SD_PATH_DIRECT, we must 20603 * call PM directly before the upcoming disk accesses. This 20604 * may cause the disk to be power/spin up. 20605 */ 20606 20607 if (sd_pm_entry(un) == DDI_SUCCESS) { 20608 rval = sd_send_scsi_READ_CAPACITY(un, 20609 &capacity, 20610 &lbasize, SD_PATH_DIRECT); 20611 if (rval != 0) { 20612 sd_pm_exit(un); 20613 mutex_enter(SD_MUTEX(un)); 20614 goto done; 20615 } 20616 } else { 20617 rval = EIO; 20618 mutex_enter(SD_MUTEX(un)); 20619 goto done; 20620 } 20621 mutex_enter(SD_MUTEX(un)); 20622 20623 sd_update_block_info(un, lbasize, capacity); 20624 20625 /* 20626 * Check if the media in the device is writable or not 20627 */ 20628 if (ISCD(un)) 20629 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20630 20631 mutex_exit(SD_MUTEX(un)); 20632 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20633 if ((cmlb_validate(un->un_cmlbhandle, 0, 20634 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20635 sd_set_pstats(un); 20636 SD_TRACE(SD_LOG_IO_PARTITION, un, 20637 "sd_check_media: un:0x%p pstats created and " 20638 "set\n", un); 20639 } 20640 20641 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20642 SD_PATH_DIRECT); 20643 sd_pm_exit(un); 20644 20645 mutex_enter(SD_MUTEX(un)); 20646 } 20647 done: 20648 un->un_f_watcht_stopped = FALSE; 20649 if (un->un_swr_token) { 20650 /* 20651 * Use of this local token and the mutex ensures that we avoid 20652 * some race conditions associated with terminating the 20653 * scsi watch. 20654 */ 20655 token = un->un_swr_token; 20656 un->un_swr_token = (opaque_t)NULL; 20657 mutex_exit(SD_MUTEX(un)); 20658 (void) scsi_watch_request_terminate(token, 20659 SCSI_WATCH_TERMINATE_WAIT); 20660 mutex_enter(SD_MUTEX(un)); 20661 } 20662 20663 /* 20664 * Update the capacity kstat value, if no media previously 20665 * (capacity kstat is 0) and a media has been inserted 20666 * (un_f_blockcount_is_valid == TRUE) 20667 */ 20668 if (un->un_errstats) { 20669 struct sd_errstats *stp = NULL; 20670 20671 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20672 if ((stp->sd_capacity.value.ui64 == 0) && 20673 (un->un_f_blockcount_is_valid == TRUE)) { 20674 stp->sd_capacity.value.ui64 = 20675 (uint64_t)((uint64_t)un->un_blockcount * 20676 un->un_sys_blocksize); 20677 } 20678 } 20679 mutex_exit(SD_MUTEX(un)); 20680 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20681 return (rval); 20682 } 20683 20684 20685 /* 20686 * Function: sd_delayed_cv_broadcast 20687 * 20688 * Description: Delayed cv_broadcast to allow for target to recover from media 20689 * insertion. 20690 * 20691 * Arguments: arg - driver soft state (unit) structure 20692 */ 20693 20694 static void 20695 sd_delayed_cv_broadcast(void *arg) 20696 { 20697 struct sd_lun *un = arg; 20698 20699 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20700 20701 mutex_enter(SD_MUTEX(un)); 20702 un->un_dcvb_timeid = NULL; 20703 cv_broadcast(&un->un_state_cv); 20704 mutex_exit(SD_MUTEX(un)); 20705 } 20706 20707 20708 /* 20709 * Function: sd_media_watch_cb 20710 * 20711 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20712 * routine processes the TUR sense data and updates the driver 20713 * state if a transition has occurred. The user thread 20714 * (sd_check_media) is then signalled. 20715 * 20716 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20717 * among multiple watches that share this callback function 20718 * resultp - scsi watch facility result packet containing scsi 20719 * packet, status byte and sense data 20720 * 20721 * Return Code: 0 for success, -1 for failure 20722 */ 20723 20724 static int 20725 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20726 { 20727 struct sd_lun *un; 20728 struct scsi_status *statusp = resultp->statusp; 20729 uint8_t *sensep = (uint8_t *)resultp->sensep; 20730 enum dkio_state state = DKIO_NONE; 20731 dev_t dev = (dev_t)arg; 20732 uchar_t actual_sense_length; 20733 uint8_t skey, asc, ascq; 20734 20735 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20736 return (-1); 20737 } 20738 actual_sense_length = resultp->actual_sense_length; 20739 20740 mutex_enter(SD_MUTEX(un)); 20741 SD_TRACE(SD_LOG_COMMON, un, 20742 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20743 *((char *)statusp), (void *)sensep, actual_sense_length); 20744 20745 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20746 un->un_mediastate = DKIO_DEV_GONE; 20747 cv_broadcast(&un->un_state_cv); 20748 mutex_exit(SD_MUTEX(un)); 20749 20750 return (0); 20751 } 20752 20753 /* 20754 * If there was a check condition then sensep points to valid sense data 20755 * If status was not a check condition but a reservation or busy status 20756 * then the new state is DKIO_NONE 20757 */ 20758 if (sensep != NULL) { 20759 skey = scsi_sense_key(sensep); 20760 asc = scsi_sense_asc(sensep); 20761 ascq = scsi_sense_ascq(sensep); 20762 20763 SD_INFO(SD_LOG_COMMON, un, 20764 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20765 skey, asc, ascq); 20766 /* This routine only uses up to 13 bytes of sense data. */ 20767 if (actual_sense_length >= 13) { 20768 if (skey == KEY_UNIT_ATTENTION) { 20769 if (asc == 0x28) { 20770 state = DKIO_INSERTED; 20771 } 20772 } else if (skey == KEY_NOT_READY) { 20773 /* 20774 * if 02/04/02 means that the host 20775 * should send start command. Explicitly 20776 * leave the media state as is 20777 * (inserted) as the media is inserted 20778 * and host has stopped device for PM 20779 * reasons. Upon next true read/write 20780 * to this media will bring the 20781 * device to the right state good for 20782 * media access. 20783 */ 20784 if (asc == 0x3a) { 20785 state = DKIO_EJECTED; 20786 } else { 20787 /* 20788 * If the drive is busy with an 20789 * operation or long write, keep the 20790 * media in an inserted state. 20791 */ 20792 20793 if ((asc == 0x04) && 20794 ((ascq == 0x02) || 20795 (ascq == 0x07) || 20796 (ascq == 0x08))) { 20797 state = DKIO_INSERTED; 20798 } 20799 } 20800 } else if (skey == KEY_NO_SENSE) { 20801 if ((asc == 0x00) && (ascq == 0x00)) { 20802 /* 20803 * Sense Data 00/00/00 does not provide 20804 * any information about the state of 20805 * the media. Ignore it. 20806 */ 20807 mutex_exit(SD_MUTEX(un)); 20808 return (0); 20809 } 20810 } 20811 } 20812 } else if ((*((char *)statusp) == STATUS_GOOD) && 20813 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20814 state = DKIO_INSERTED; 20815 } 20816 20817 SD_TRACE(SD_LOG_COMMON, un, 20818 "sd_media_watch_cb: state=%x, specified=%x\n", 20819 state, un->un_specified_mediastate); 20820 20821 /* 20822 * now signal the waiting thread if this is *not* the specified state; 20823 * delay the signal if the state is DKIO_INSERTED to allow the target 20824 * to recover 20825 */ 20826 if (state != un->un_specified_mediastate) { 20827 un->un_mediastate = state; 20828 if (state == DKIO_INSERTED) { 20829 /* 20830 * delay the signal to give the drive a chance 20831 * to do what it apparently needs to do 20832 */ 20833 SD_TRACE(SD_LOG_COMMON, un, 20834 "sd_media_watch_cb: delayed cv_broadcast\n"); 20835 if (un->un_dcvb_timeid == NULL) { 20836 un->un_dcvb_timeid = 20837 timeout(sd_delayed_cv_broadcast, un, 20838 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20839 } 20840 } else { 20841 SD_TRACE(SD_LOG_COMMON, un, 20842 "sd_media_watch_cb: immediate cv_broadcast\n"); 20843 cv_broadcast(&un->un_state_cv); 20844 } 20845 } 20846 mutex_exit(SD_MUTEX(un)); 20847 return (0); 20848 } 20849 20850 20851 /* 20852 * Function: sd_dkio_get_temp 20853 * 20854 * Description: This routine is the driver entry point for handling ioctl 20855 * requests to get the disk temperature. 20856 * 20857 * Arguments: dev - the device number 20858 * arg - pointer to user provided dk_temperature structure. 20859 * flag - this argument is a pass through to ddi_copyxxx() 20860 * directly from the mode argument of ioctl(). 20861 * 20862 * Return Code: 0 20863 * EFAULT 20864 * ENXIO 20865 * EAGAIN 20866 */ 20867 20868 static int 20869 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20870 { 20871 struct sd_lun *un = NULL; 20872 struct dk_temperature *dktemp = NULL; 20873 uchar_t *temperature_page; 20874 int rval = 0; 20875 int path_flag = SD_PATH_STANDARD; 20876 20877 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20878 return (ENXIO); 20879 } 20880 20881 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20882 20883 /* copyin the disk temp argument to get the user flags */ 20884 if (ddi_copyin((void *)arg, dktemp, 20885 sizeof (struct dk_temperature), flag) != 0) { 20886 rval = EFAULT; 20887 goto done; 20888 } 20889 20890 /* Initialize the temperature to invalid. */ 20891 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20892 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20893 20894 /* 20895 * Note: Investigate removing the "bypass pm" semantic. 20896 * Can we just bypass PM always? 20897 */ 20898 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20899 path_flag = SD_PATH_DIRECT; 20900 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20901 mutex_enter(&un->un_pm_mutex); 20902 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20903 /* 20904 * If DKT_BYPASS_PM is set, and the drive happens to be 20905 * in low power mode, we can not wake it up, Need to 20906 * return EAGAIN. 20907 */ 20908 mutex_exit(&un->un_pm_mutex); 20909 rval = EAGAIN; 20910 goto done; 20911 } else { 20912 /* 20913 * Indicate to PM the device is busy. This is required 20914 * to avoid a race - i.e. the ioctl is issuing a 20915 * command and the pm framework brings down the device 20916 * to low power mode (possible power cut-off on some 20917 * platforms). 20918 */ 20919 mutex_exit(&un->un_pm_mutex); 20920 if (sd_pm_entry(un) != DDI_SUCCESS) { 20921 rval = EAGAIN; 20922 goto done; 20923 } 20924 } 20925 } 20926 20927 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20928 20929 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20930 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20931 goto done2; 20932 } 20933 20934 /* 20935 * For the current temperature verify that the parameter length is 0x02 20936 * and the parameter code is 0x00 20937 */ 20938 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20939 (temperature_page[5] == 0x00)) { 20940 if (temperature_page[9] == 0xFF) { 20941 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20942 } else { 20943 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20944 } 20945 } 20946 20947 /* 20948 * For the reference temperature verify that the parameter 20949 * length is 0x02 and the parameter code is 0x01 20950 */ 20951 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20952 (temperature_page[11] == 0x01)) { 20953 if (temperature_page[15] == 0xFF) { 20954 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20955 } else { 20956 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20957 } 20958 } 20959 20960 /* Do the copyout regardless of the temperature commands status. */ 20961 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20962 flag) != 0) { 20963 rval = EFAULT; 20964 } 20965 20966 done2: 20967 if (path_flag == SD_PATH_DIRECT) { 20968 sd_pm_exit(un); 20969 } 20970 20971 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20972 done: 20973 if (dktemp != NULL) { 20974 kmem_free(dktemp, sizeof (struct dk_temperature)); 20975 } 20976 20977 return (rval); 20978 } 20979 20980 20981 /* 20982 * Function: sd_log_page_supported 20983 * 20984 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20985 * supported log pages. 20986 * 20987 * Arguments: un - 20988 * log_page - 20989 * 20990 * Return Code: -1 - on error (log sense is optional and may not be supported). 20991 * 0 - log page not found. 20992 * 1 - log page found. 20993 */ 20994 20995 static int 20996 sd_log_page_supported(struct sd_lun *un, int log_page) 20997 { 20998 uchar_t *log_page_data; 20999 int i; 21000 int match = 0; 21001 int log_size; 21002 21003 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21004 21005 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21006 SD_PATH_DIRECT) != 0) { 21007 SD_ERROR(SD_LOG_COMMON, un, 21008 "sd_log_page_supported: failed log page retrieval\n"); 21009 kmem_free(log_page_data, 0xFF); 21010 return (-1); 21011 } 21012 log_size = log_page_data[3]; 21013 21014 /* 21015 * The list of supported log pages start from the fourth byte. Check 21016 * until we run out of log pages or a match is found. 21017 */ 21018 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21019 if (log_page_data[i] == log_page) { 21020 match++; 21021 } 21022 } 21023 kmem_free(log_page_data, 0xFF); 21024 return (match); 21025 } 21026 21027 21028 /* 21029 * Function: sd_mhdioc_failfast 21030 * 21031 * Description: This routine is the driver entry point for handling ioctl 21032 * requests to enable/disable the multihost failfast option. 21033 * (MHIOCENFAILFAST) 21034 * 21035 * Arguments: dev - the device number 21036 * arg - user specified probing interval. 21037 * flag - this argument is a pass through to ddi_copyxxx() 21038 * directly from the mode argument of ioctl(). 21039 * 21040 * Return Code: 0 21041 * EFAULT 21042 * ENXIO 21043 */ 21044 21045 static int 21046 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21047 { 21048 struct sd_lun *un = NULL; 21049 int mh_time; 21050 int rval = 0; 21051 21052 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21053 return (ENXIO); 21054 } 21055 21056 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21057 return (EFAULT); 21058 21059 if (mh_time) { 21060 mutex_enter(SD_MUTEX(un)); 21061 un->un_resvd_status |= SD_FAILFAST; 21062 mutex_exit(SD_MUTEX(un)); 21063 /* 21064 * If mh_time is INT_MAX, then this ioctl is being used for 21065 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21066 */ 21067 if (mh_time != INT_MAX) { 21068 rval = sd_check_mhd(dev, mh_time); 21069 } 21070 } else { 21071 (void) sd_check_mhd(dev, 0); 21072 mutex_enter(SD_MUTEX(un)); 21073 un->un_resvd_status &= ~SD_FAILFAST; 21074 mutex_exit(SD_MUTEX(un)); 21075 } 21076 return (rval); 21077 } 21078 21079 21080 /* 21081 * Function: sd_mhdioc_takeown 21082 * 21083 * Description: This routine is the driver entry point for handling ioctl 21084 * requests to forcefully acquire exclusive access rights to the 21085 * multihost disk (MHIOCTKOWN). 21086 * 21087 * Arguments: dev - the device number 21088 * arg - user provided structure specifying the delay 21089 * parameters in milliseconds 21090 * flag - this argument is a pass through to ddi_copyxxx() 21091 * directly from the mode argument of ioctl(). 21092 * 21093 * Return Code: 0 21094 * EFAULT 21095 * ENXIO 21096 */ 21097 21098 static int 21099 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21100 { 21101 struct sd_lun *un = NULL; 21102 struct mhioctkown *tkown = NULL; 21103 int rval = 0; 21104 21105 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21106 return (ENXIO); 21107 } 21108 21109 if (arg != NULL) { 21110 tkown = (struct mhioctkown *) 21111 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21112 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21113 if (rval != 0) { 21114 rval = EFAULT; 21115 goto error; 21116 } 21117 } 21118 21119 rval = sd_take_ownership(dev, tkown); 21120 mutex_enter(SD_MUTEX(un)); 21121 if (rval == 0) { 21122 un->un_resvd_status |= SD_RESERVE; 21123 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21124 sd_reinstate_resv_delay = 21125 tkown->reinstate_resv_delay * 1000; 21126 } else { 21127 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21128 } 21129 /* 21130 * Give the scsi_watch routine interval set by 21131 * the MHIOCENFAILFAST ioctl precedence here. 21132 */ 21133 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21134 mutex_exit(SD_MUTEX(un)); 21135 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21136 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21137 "sd_mhdioc_takeown : %d\n", 21138 sd_reinstate_resv_delay); 21139 } else { 21140 mutex_exit(SD_MUTEX(un)); 21141 } 21142 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21143 sd_mhd_reset_notify_cb, (caddr_t)un); 21144 } else { 21145 un->un_resvd_status &= ~SD_RESERVE; 21146 mutex_exit(SD_MUTEX(un)); 21147 } 21148 21149 error: 21150 if (tkown != NULL) { 21151 kmem_free(tkown, sizeof (struct mhioctkown)); 21152 } 21153 return (rval); 21154 } 21155 21156 21157 /* 21158 * Function: sd_mhdioc_release 21159 * 21160 * Description: This routine is the driver entry point for handling ioctl 21161 * requests to release exclusive access rights to the multihost 21162 * disk (MHIOCRELEASE). 21163 * 21164 * Arguments: dev - the device number 21165 * 21166 * Return Code: 0 21167 * ENXIO 21168 */ 21169 21170 static int 21171 sd_mhdioc_release(dev_t dev) 21172 { 21173 struct sd_lun *un = NULL; 21174 timeout_id_t resvd_timeid_save; 21175 int resvd_status_save; 21176 int rval = 0; 21177 21178 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21179 return (ENXIO); 21180 } 21181 21182 mutex_enter(SD_MUTEX(un)); 21183 resvd_status_save = un->un_resvd_status; 21184 un->un_resvd_status &= 21185 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21186 if (un->un_resvd_timeid) { 21187 resvd_timeid_save = un->un_resvd_timeid; 21188 un->un_resvd_timeid = NULL; 21189 mutex_exit(SD_MUTEX(un)); 21190 (void) untimeout(resvd_timeid_save); 21191 } else { 21192 mutex_exit(SD_MUTEX(un)); 21193 } 21194 21195 /* 21196 * destroy any pending timeout thread that may be attempting to 21197 * reinstate reservation on this device. 21198 */ 21199 sd_rmv_resv_reclaim_req(dev); 21200 21201 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21202 mutex_enter(SD_MUTEX(un)); 21203 if ((un->un_mhd_token) && 21204 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21205 mutex_exit(SD_MUTEX(un)); 21206 (void) sd_check_mhd(dev, 0); 21207 } else { 21208 mutex_exit(SD_MUTEX(un)); 21209 } 21210 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21211 sd_mhd_reset_notify_cb, (caddr_t)un); 21212 } else { 21213 /* 21214 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21215 */ 21216 mutex_enter(SD_MUTEX(un)); 21217 un->un_resvd_status = resvd_status_save; 21218 mutex_exit(SD_MUTEX(un)); 21219 } 21220 return (rval); 21221 } 21222 21223 21224 /* 21225 * Function: sd_mhdioc_register_devid 21226 * 21227 * Description: This routine is the driver entry point for handling ioctl 21228 * requests to register the device id (MHIOCREREGISTERDEVID). 21229 * 21230 * Note: The implementation for this ioctl has been updated to 21231 * be consistent with the original PSARC case (1999/357) 21232 * (4375899, 4241671, 4220005) 21233 * 21234 * Arguments: dev - the device number 21235 * 21236 * Return Code: 0 21237 * ENXIO 21238 */ 21239 21240 static int 21241 sd_mhdioc_register_devid(dev_t dev) 21242 { 21243 struct sd_lun *un = NULL; 21244 int rval = 0; 21245 21246 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21247 return (ENXIO); 21248 } 21249 21250 ASSERT(!mutex_owned(SD_MUTEX(un))); 21251 21252 mutex_enter(SD_MUTEX(un)); 21253 21254 /* If a devid already exists, de-register it */ 21255 if (un->un_devid != NULL) { 21256 ddi_devid_unregister(SD_DEVINFO(un)); 21257 /* 21258 * After unregister devid, needs to free devid memory 21259 */ 21260 ddi_devid_free(un->un_devid); 21261 un->un_devid = NULL; 21262 } 21263 21264 /* Check for reservation conflict */ 21265 mutex_exit(SD_MUTEX(un)); 21266 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21267 mutex_enter(SD_MUTEX(un)); 21268 21269 switch (rval) { 21270 case 0: 21271 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21272 break; 21273 case EACCES: 21274 break; 21275 default: 21276 rval = EIO; 21277 } 21278 21279 mutex_exit(SD_MUTEX(un)); 21280 return (rval); 21281 } 21282 21283 21284 /* 21285 * Function: sd_mhdioc_inkeys 21286 * 21287 * Description: This routine is the driver entry point for handling ioctl 21288 * requests to issue the SCSI-3 Persistent In Read Keys command 21289 * to the device (MHIOCGRP_INKEYS). 21290 * 21291 * Arguments: dev - the device number 21292 * arg - user provided in_keys structure 21293 * flag - this argument is a pass through to ddi_copyxxx() 21294 * directly from the mode argument of ioctl(). 21295 * 21296 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21297 * ENXIO 21298 * EFAULT 21299 */ 21300 21301 static int 21302 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21303 { 21304 struct sd_lun *un; 21305 mhioc_inkeys_t inkeys; 21306 int rval = 0; 21307 21308 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21309 return (ENXIO); 21310 } 21311 21312 #ifdef _MULTI_DATAMODEL 21313 switch (ddi_model_convert_from(flag & FMODELS)) { 21314 case DDI_MODEL_ILP32: { 21315 struct mhioc_inkeys32 inkeys32; 21316 21317 if (ddi_copyin(arg, &inkeys32, 21318 sizeof (struct mhioc_inkeys32), flag) != 0) { 21319 return (EFAULT); 21320 } 21321 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21322 if ((rval = sd_persistent_reservation_in_read_keys(un, 21323 &inkeys, flag)) != 0) { 21324 return (rval); 21325 } 21326 inkeys32.generation = inkeys.generation; 21327 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21328 flag) != 0) { 21329 return (EFAULT); 21330 } 21331 break; 21332 } 21333 case DDI_MODEL_NONE: 21334 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21335 flag) != 0) { 21336 return (EFAULT); 21337 } 21338 if ((rval = sd_persistent_reservation_in_read_keys(un, 21339 &inkeys, flag)) != 0) { 21340 return (rval); 21341 } 21342 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21343 flag) != 0) { 21344 return (EFAULT); 21345 } 21346 break; 21347 } 21348 21349 #else /* ! _MULTI_DATAMODEL */ 21350 21351 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21352 return (EFAULT); 21353 } 21354 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21355 if (rval != 0) { 21356 return (rval); 21357 } 21358 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21359 return (EFAULT); 21360 } 21361 21362 #endif /* _MULTI_DATAMODEL */ 21363 21364 return (rval); 21365 } 21366 21367 21368 /* 21369 * Function: sd_mhdioc_inresv 21370 * 21371 * Description: This routine is the driver entry point for handling ioctl 21372 * requests to issue the SCSI-3 Persistent In Read Reservations 21373 * command to the device (MHIOCGRP_INKEYS). 21374 * 21375 * Arguments: dev - the device number 21376 * arg - user provided in_resv structure 21377 * flag - this argument is a pass through to ddi_copyxxx() 21378 * directly from the mode argument of ioctl(). 21379 * 21380 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21381 * ENXIO 21382 * EFAULT 21383 */ 21384 21385 static int 21386 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21387 { 21388 struct sd_lun *un; 21389 mhioc_inresvs_t inresvs; 21390 int rval = 0; 21391 21392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21393 return (ENXIO); 21394 } 21395 21396 #ifdef _MULTI_DATAMODEL 21397 21398 switch (ddi_model_convert_from(flag & FMODELS)) { 21399 case DDI_MODEL_ILP32: { 21400 struct mhioc_inresvs32 inresvs32; 21401 21402 if (ddi_copyin(arg, &inresvs32, 21403 sizeof (struct mhioc_inresvs32), flag) != 0) { 21404 return (EFAULT); 21405 } 21406 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21407 if ((rval = sd_persistent_reservation_in_read_resv(un, 21408 &inresvs, flag)) != 0) { 21409 return (rval); 21410 } 21411 inresvs32.generation = inresvs.generation; 21412 if (ddi_copyout(&inresvs32, arg, 21413 sizeof (struct mhioc_inresvs32), flag) != 0) { 21414 return (EFAULT); 21415 } 21416 break; 21417 } 21418 case DDI_MODEL_NONE: 21419 if (ddi_copyin(arg, &inresvs, 21420 sizeof (mhioc_inresvs_t), flag) != 0) { 21421 return (EFAULT); 21422 } 21423 if ((rval = sd_persistent_reservation_in_read_resv(un, 21424 &inresvs, flag)) != 0) { 21425 return (rval); 21426 } 21427 if (ddi_copyout(&inresvs, arg, 21428 sizeof (mhioc_inresvs_t), flag) != 0) { 21429 return (EFAULT); 21430 } 21431 break; 21432 } 21433 21434 #else /* ! _MULTI_DATAMODEL */ 21435 21436 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21437 return (EFAULT); 21438 } 21439 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21440 if (rval != 0) { 21441 return (rval); 21442 } 21443 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21444 return (EFAULT); 21445 } 21446 21447 #endif /* ! _MULTI_DATAMODEL */ 21448 21449 return (rval); 21450 } 21451 21452 21453 /* 21454 * The following routines support the clustering functionality described below 21455 * and implement lost reservation reclaim functionality. 21456 * 21457 * Clustering 21458 * ---------- 21459 * The clustering code uses two different, independent forms of SCSI 21460 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21461 * Persistent Group Reservations. For any particular disk, it will use either 21462 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21463 * 21464 * SCSI-2 21465 * The cluster software takes ownership of a multi-hosted disk by issuing the 21466 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21467 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21468 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21469 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21470 * driver. The meaning of failfast is that if the driver (on this host) ever 21471 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21472 * it should immediately panic the host. The motivation for this ioctl is that 21473 * if this host does encounter reservation conflict, the underlying cause is 21474 * that some other host of the cluster has decided that this host is no longer 21475 * in the cluster and has seized control of the disks for itself. Since this 21476 * host is no longer in the cluster, it ought to panic itself. The 21477 * MHIOCENFAILFAST ioctl does two things: 21478 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21479 * error to panic the host 21480 * (b) it sets up a periodic timer to test whether this host still has 21481 * "access" (in that no other host has reserved the device): if the 21482 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21483 * purpose of that periodic timer is to handle scenarios where the host is 21484 * otherwise temporarily quiescent, temporarily doing no real i/o. 21485 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21486 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21487 * the device itself. 21488 * 21489 * SCSI-3 PGR 21490 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21491 * facility is supported through the shared multihost disk ioctls 21492 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21493 * MHIOCGRP_PREEMPTANDABORT) 21494 * 21495 * Reservation Reclaim: 21496 * -------------------- 21497 * To support the lost reservation reclaim operations this driver creates a 21498 * single thread to handle reinstating reservations on all devices that have 21499 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21500 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21501 * and the reservation reclaim thread loops through the requests to regain the 21502 * lost reservations. 21503 */ 21504 21505 /* 21506 * Function: sd_check_mhd() 21507 * 21508 * Description: This function sets up and submits a scsi watch request or 21509 * terminates an existing watch request. This routine is used in 21510 * support of reservation reclaim. 21511 * 21512 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21513 * among multiple watches that share the callback function 21514 * interval - the number of microseconds specifying the watch 21515 * interval for issuing TEST UNIT READY commands. If 21516 * set to 0 the watch should be terminated. If the 21517 * interval is set to 0 and if the device is required 21518 * to hold reservation while disabling failfast, the 21519 * watch is restarted with an interval of 21520 * reinstate_resv_delay. 21521 * 21522 * Return Code: 0 - Successful submit/terminate of scsi watch request 21523 * ENXIO - Indicates an invalid device was specified 21524 * EAGAIN - Unable to submit the scsi watch request 21525 */ 21526 21527 static int 21528 sd_check_mhd(dev_t dev, int interval) 21529 { 21530 struct sd_lun *un; 21531 opaque_t token; 21532 21533 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21534 return (ENXIO); 21535 } 21536 21537 /* is this a watch termination request? */ 21538 if (interval == 0) { 21539 mutex_enter(SD_MUTEX(un)); 21540 /* if there is an existing watch task then terminate it */ 21541 if (un->un_mhd_token) { 21542 token = un->un_mhd_token; 21543 un->un_mhd_token = NULL; 21544 mutex_exit(SD_MUTEX(un)); 21545 (void) scsi_watch_request_terminate(token, 21546 SCSI_WATCH_TERMINATE_WAIT); 21547 mutex_enter(SD_MUTEX(un)); 21548 } else { 21549 mutex_exit(SD_MUTEX(un)); 21550 /* 21551 * Note: If we return here we don't check for the 21552 * failfast case. This is the original legacy 21553 * implementation but perhaps we should be checking 21554 * the failfast case. 21555 */ 21556 return (0); 21557 } 21558 /* 21559 * If the device is required to hold reservation while 21560 * disabling failfast, we need to restart the scsi_watch 21561 * routine with an interval of reinstate_resv_delay. 21562 */ 21563 if (un->un_resvd_status & SD_RESERVE) { 21564 interval = sd_reinstate_resv_delay/1000; 21565 } else { 21566 /* no failfast so bail */ 21567 mutex_exit(SD_MUTEX(un)); 21568 return (0); 21569 } 21570 mutex_exit(SD_MUTEX(un)); 21571 } 21572 21573 /* 21574 * adjust minimum time interval to 1 second, 21575 * and convert from msecs to usecs 21576 */ 21577 if (interval > 0 && interval < 1000) { 21578 interval = 1000; 21579 } 21580 interval *= 1000; 21581 21582 /* 21583 * submit the request to the scsi_watch service 21584 */ 21585 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21586 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21587 if (token == NULL) { 21588 return (EAGAIN); 21589 } 21590 21591 /* 21592 * save token for termination later on 21593 */ 21594 mutex_enter(SD_MUTEX(un)); 21595 un->un_mhd_token = token; 21596 mutex_exit(SD_MUTEX(un)); 21597 return (0); 21598 } 21599 21600 21601 /* 21602 * Function: sd_mhd_watch_cb() 21603 * 21604 * Description: This function is the call back function used by the scsi watch 21605 * facility. The scsi watch facility sends the "Test Unit Ready" 21606 * and processes the status. If applicable (i.e. a "Unit Attention" 21607 * status and automatic "Request Sense" not used) the scsi watch 21608 * facility will send a "Request Sense" and retrieve the sense data 21609 * to be passed to this callback function. In either case the 21610 * automatic "Request Sense" or the facility submitting one, this 21611 * callback is passed the status and sense data. 21612 * 21613 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21614 * among multiple watches that share this callback function 21615 * resultp - scsi watch facility result packet containing scsi 21616 * packet, status byte and sense data 21617 * 21618 * Return Code: 0 - continue the watch task 21619 * non-zero - terminate the watch task 21620 */ 21621 21622 static int 21623 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21624 { 21625 struct sd_lun *un; 21626 struct scsi_status *statusp; 21627 uint8_t *sensep; 21628 struct scsi_pkt *pkt; 21629 uchar_t actual_sense_length; 21630 dev_t dev = (dev_t)arg; 21631 21632 ASSERT(resultp != NULL); 21633 statusp = resultp->statusp; 21634 sensep = (uint8_t *)resultp->sensep; 21635 pkt = resultp->pkt; 21636 actual_sense_length = resultp->actual_sense_length; 21637 21638 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21639 return (ENXIO); 21640 } 21641 21642 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21643 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21644 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21645 21646 /* Begin processing of the status and/or sense data */ 21647 if (pkt->pkt_reason != CMD_CMPLT) { 21648 /* Handle the incomplete packet */ 21649 sd_mhd_watch_incomplete(un, pkt); 21650 return (0); 21651 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21652 if (*((unsigned char *)statusp) 21653 == STATUS_RESERVATION_CONFLICT) { 21654 /* 21655 * Handle a reservation conflict by panicking if 21656 * configured for failfast or by logging the conflict 21657 * and updating the reservation status 21658 */ 21659 mutex_enter(SD_MUTEX(un)); 21660 if ((un->un_resvd_status & SD_FAILFAST) && 21661 (sd_failfast_enable)) { 21662 sd_panic_for_res_conflict(un); 21663 /*NOTREACHED*/ 21664 } 21665 SD_INFO(SD_LOG_IOCTL_MHD, un, 21666 "sd_mhd_watch_cb: Reservation Conflict\n"); 21667 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21668 mutex_exit(SD_MUTEX(un)); 21669 } 21670 } 21671 21672 if (sensep != NULL) { 21673 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21674 mutex_enter(SD_MUTEX(un)); 21675 if ((scsi_sense_asc(sensep) == 21676 SD_SCSI_RESET_SENSE_CODE) && 21677 (un->un_resvd_status & SD_RESERVE)) { 21678 /* 21679 * The additional sense code indicates a power 21680 * on or bus device reset has occurred; update 21681 * the reservation status. 21682 */ 21683 un->un_resvd_status |= 21684 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21685 SD_INFO(SD_LOG_IOCTL_MHD, un, 21686 "sd_mhd_watch_cb: Lost Reservation\n"); 21687 } 21688 } else { 21689 return (0); 21690 } 21691 } else { 21692 mutex_enter(SD_MUTEX(un)); 21693 } 21694 21695 if ((un->un_resvd_status & SD_RESERVE) && 21696 (un->un_resvd_status & SD_LOST_RESERVE)) { 21697 if (un->un_resvd_status & SD_WANT_RESERVE) { 21698 /* 21699 * A reset occurred in between the last probe and this 21700 * one so if a timeout is pending cancel it. 21701 */ 21702 if (un->un_resvd_timeid) { 21703 timeout_id_t temp_id = un->un_resvd_timeid; 21704 un->un_resvd_timeid = NULL; 21705 mutex_exit(SD_MUTEX(un)); 21706 (void) untimeout(temp_id); 21707 mutex_enter(SD_MUTEX(un)); 21708 } 21709 un->un_resvd_status &= ~SD_WANT_RESERVE; 21710 } 21711 if (un->un_resvd_timeid == 0) { 21712 /* Schedule a timeout to handle the lost reservation */ 21713 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21714 (void *)dev, 21715 drv_usectohz(sd_reinstate_resv_delay)); 21716 } 21717 } 21718 mutex_exit(SD_MUTEX(un)); 21719 return (0); 21720 } 21721 21722 21723 /* 21724 * Function: sd_mhd_watch_incomplete() 21725 * 21726 * Description: This function is used to find out why a scsi pkt sent by the 21727 * scsi watch facility was not completed. Under some scenarios this 21728 * routine will return. Otherwise it will send a bus reset to see 21729 * if the drive is still online. 21730 * 21731 * Arguments: un - driver soft state (unit) structure 21732 * pkt - incomplete scsi pkt 21733 */ 21734 21735 static void 21736 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21737 { 21738 int be_chatty; 21739 int perr; 21740 21741 ASSERT(pkt != NULL); 21742 ASSERT(un != NULL); 21743 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21744 perr = (pkt->pkt_statistics & STAT_PERR); 21745 21746 mutex_enter(SD_MUTEX(un)); 21747 if (un->un_state == SD_STATE_DUMPING) { 21748 mutex_exit(SD_MUTEX(un)); 21749 return; 21750 } 21751 21752 switch (pkt->pkt_reason) { 21753 case CMD_UNX_BUS_FREE: 21754 /* 21755 * If we had a parity error that caused the target to drop BSY*, 21756 * don't be chatty about it. 21757 */ 21758 if (perr && be_chatty) { 21759 be_chatty = 0; 21760 } 21761 break; 21762 case CMD_TAG_REJECT: 21763 /* 21764 * The SCSI-2 spec states that a tag reject will be sent by the 21765 * target if tagged queuing is not supported. A tag reject may 21766 * also be sent during certain initialization periods or to 21767 * control internal resources. For the latter case the target 21768 * may also return Queue Full. 21769 * 21770 * If this driver receives a tag reject from a target that is 21771 * going through an init period or controlling internal 21772 * resources tagged queuing will be disabled. This is a less 21773 * than optimal behavior but the driver is unable to determine 21774 * the target state and assumes tagged queueing is not supported 21775 */ 21776 pkt->pkt_flags = 0; 21777 un->un_tagflags = 0; 21778 21779 if (un->un_f_opt_queueing == TRUE) { 21780 un->un_throttle = min(un->un_throttle, 3); 21781 } else { 21782 un->un_throttle = 1; 21783 } 21784 mutex_exit(SD_MUTEX(un)); 21785 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21786 mutex_enter(SD_MUTEX(un)); 21787 break; 21788 case CMD_INCOMPLETE: 21789 /* 21790 * The transport stopped with an abnormal state, fallthrough and 21791 * reset the target and/or bus unless selection did not complete 21792 * (indicated by STATE_GOT_BUS) in which case we don't want to 21793 * go through a target/bus reset 21794 */ 21795 if (pkt->pkt_state == STATE_GOT_BUS) { 21796 break; 21797 } 21798 /*FALLTHROUGH*/ 21799 21800 case CMD_TIMEOUT: 21801 default: 21802 /* 21803 * The lun may still be running the command, so a lun reset 21804 * should be attempted. If the lun reset fails or cannot be 21805 * issued, than try a target reset. Lastly try a bus reset. 21806 */ 21807 if ((pkt->pkt_statistics & 21808 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21809 int reset_retval = 0; 21810 mutex_exit(SD_MUTEX(un)); 21811 if (un->un_f_allow_bus_device_reset == TRUE) { 21812 if (un->un_f_lun_reset_enabled == TRUE) { 21813 reset_retval = 21814 scsi_reset(SD_ADDRESS(un), 21815 RESET_LUN); 21816 } 21817 if (reset_retval == 0) { 21818 reset_retval = 21819 scsi_reset(SD_ADDRESS(un), 21820 RESET_TARGET); 21821 } 21822 } 21823 if (reset_retval == 0) { 21824 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21825 } 21826 mutex_enter(SD_MUTEX(un)); 21827 } 21828 break; 21829 } 21830 21831 /* A device/bus reset has occurred; update the reservation status. */ 21832 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21833 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21834 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21835 un->un_resvd_status |= 21836 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21837 SD_INFO(SD_LOG_IOCTL_MHD, un, 21838 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21839 } 21840 } 21841 21842 /* 21843 * The disk has been turned off; Update the device state. 21844 * 21845 * Note: Should we be offlining the disk here? 21846 */ 21847 if (pkt->pkt_state == STATE_GOT_BUS) { 21848 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21849 "Disk not responding to selection\n"); 21850 if (un->un_state != SD_STATE_OFFLINE) { 21851 New_state(un, SD_STATE_OFFLINE); 21852 } 21853 } else if (be_chatty) { 21854 /* 21855 * suppress messages if they are all the same pkt reason; 21856 * with TQ, many (up to 256) are returned with the same 21857 * pkt_reason 21858 */ 21859 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21860 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21861 "sd_mhd_watch_incomplete: " 21862 "SCSI transport failed: reason '%s'\n", 21863 scsi_rname(pkt->pkt_reason)); 21864 } 21865 } 21866 un->un_last_pkt_reason = pkt->pkt_reason; 21867 mutex_exit(SD_MUTEX(un)); 21868 } 21869 21870 21871 /* 21872 * Function: sd_sname() 21873 * 21874 * Description: This is a simple little routine to return a string containing 21875 * a printable description of command status byte for use in 21876 * logging. 21877 * 21878 * Arguments: status - pointer to a status byte 21879 * 21880 * Return Code: char * - string containing status description. 21881 */ 21882 21883 static char * 21884 sd_sname(uchar_t status) 21885 { 21886 switch (status & STATUS_MASK) { 21887 case STATUS_GOOD: 21888 return ("good status"); 21889 case STATUS_CHECK: 21890 return ("check condition"); 21891 case STATUS_MET: 21892 return ("condition met"); 21893 case STATUS_BUSY: 21894 return ("busy"); 21895 case STATUS_INTERMEDIATE: 21896 return ("intermediate"); 21897 case STATUS_INTERMEDIATE_MET: 21898 return ("intermediate - condition met"); 21899 case STATUS_RESERVATION_CONFLICT: 21900 return ("reservation_conflict"); 21901 case STATUS_TERMINATED: 21902 return ("command terminated"); 21903 case STATUS_QFULL: 21904 return ("queue full"); 21905 default: 21906 return ("<unknown status>"); 21907 } 21908 } 21909 21910 21911 /* 21912 * Function: sd_mhd_resvd_recover() 21913 * 21914 * Description: This function adds a reservation entry to the 21915 * sd_resv_reclaim_request list and signals the reservation 21916 * reclaim thread that there is work pending. If the reservation 21917 * reclaim thread has not been previously created this function 21918 * will kick it off. 21919 * 21920 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21921 * among multiple watches that share this callback function 21922 * 21923 * Context: This routine is called by timeout() and is run in interrupt 21924 * context. It must not sleep or call other functions which may 21925 * sleep. 21926 */ 21927 21928 static void 21929 sd_mhd_resvd_recover(void *arg) 21930 { 21931 dev_t dev = (dev_t)arg; 21932 struct sd_lun *un; 21933 struct sd_thr_request *sd_treq = NULL; 21934 struct sd_thr_request *sd_cur = NULL; 21935 struct sd_thr_request *sd_prev = NULL; 21936 int already_there = 0; 21937 21938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21939 return; 21940 } 21941 21942 mutex_enter(SD_MUTEX(un)); 21943 un->un_resvd_timeid = NULL; 21944 if (un->un_resvd_status & SD_WANT_RESERVE) { 21945 /* 21946 * There was a reset so don't issue the reserve, allow the 21947 * sd_mhd_watch_cb callback function to notice this and 21948 * reschedule the timeout for reservation. 21949 */ 21950 mutex_exit(SD_MUTEX(un)); 21951 return; 21952 } 21953 mutex_exit(SD_MUTEX(un)); 21954 21955 /* 21956 * Add this device to the sd_resv_reclaim_request list and the 21957 * sd_resv_reclaim_thread should take care of the rest. 21958 * 21959 * Note: We can't sleep in this context so if the memory allocation 21960 * fails allow the sd_mhd_watch_cb callback function to notice this and 21961 * reschedule the timeout for reservation. (4378460) 21962 */ 21963 sd_treq = (struct sd_thr_request *) 21964 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21965 if (sd_treq == NULL) { 21966 return; 21967 } 21968 21969 sd_treq->sd_thr_req_next = NULL; 21970 sd_treq->dev = dev; 21971 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21972 if (sd_tr.srq_thr_req_head == NULL) { 21973 sd_tr.srq_thr_req_head = sd_treq; 21974 } else { 21975 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21976 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21977 if (sd_cur->dev == dev) { 21978 /* 21979 * already in Queue so don't log 21980 * another request for the device 21981 */ 21982 already_there = 1; 21983 break; 21984 } 21985 sd_prev = sd_cur; 21986 } 21987 if (!already_there) { 21988 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21989 "logging request for %lx\n", dev); 21990 sd_prev->sd_thr_req_next = sd_treq; 21991 } else { 21992 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21993 } 21994 } 21995 21996 /* 21997 * Create a kernel thread to do the reservation reclaim and free up this 21998 * thread. We cannot block this thread while we go away to do the 21999 * reservation reclaim 22000 */ 22001 if (sd_tr.srq_resv_reclaim_thread == NULL) 22002 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22003 sd_resv_reclaim_thread, NULL, 22004 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22005 22006 /* Tell the reservation reclaim thread that it has work to do */ 22007 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22008 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22009 } 22010 22011 /* 22012 * Function: sd_resv_reclaim_thread() 22013 * 22014 * Description: This function implements the reservation reclaim operations 22015 * 22016 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22017 * among multiple watches that share this callback function 22018 */ 22019 22020 static void 22021 sd_resv_reclaim_thread() 22022 { 22023 struct sd_lun *un; 22024 struct sd_thr_request *sd_mhreq; 22025 22026 /* Wait for work */ 22027 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22028 if (sd_tr.srq_thr_req_head == NULL) { 22029 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22030 &sd_tr.srq_resv_reclaim_mutex); 22031 } 22032 22033 /* Loop while we have work */ 22034 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22035 un = ddi_get_soft_state(sd_state, 22036 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22037 if (un == NULL) { 22038 /* 22039 * softstate structure is NULL so just 22040 * dequeue the request and continue 22041 */ 22042 sd_tr.srq_thr_req_head = 22043 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22044 kmem_free(sd_tr.srq_thr_cur_req, 22045 sizeof (struct sd_thr_request)); 22046 continue; 22047 } 22048 22049 /* dequeue the request */ 22050 sd_mhreq = sd_tr.srq_thr_cur_req; 22051 sd_tr.srq_thr_req_head = 22052 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22053 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22054 22055 /* 22056 * Reclaim reservation only if SD_RESERVE is still set. There 22057 * may have been a call to MHIOCRELEASE before we got here. 22058 */ 22059 mutex_enter(SD_MUTEX(un)); 22060 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22061 /* 22062 * Note: The SD_LOST_RESERVE flag is cleared before 22063 * reclaiming the reservation. If this is done after the 22064 * call to sd_reserve_release a reservation loss in the 22065 * window between pkt completion of reserve cmd and 22066 * mutex_enter below may not be recognized 22067 */ 22068 un->un_resvd_status &= ~SD_LOST_RESERVE; 22069 mutex_exit(SD_MUTEX(un)); 22070 22071 if (sd_reserve_release(sd_mhreq->dev, 22072 SD_RESERVE) == 0) { 22073 mutex_enter(SD_MUTEX(un)); 22074 un->un_resvd_status |= SD_RESERVE; 22075 mutex_exit(SD_MUTEX(un)); 22076 SD_INFO(SD_LOG_IOCTL_MHD, un, 22077 "sd_resv_reclaim_thread: " 22078 "Reservation Recovered\n"); 22079 } else { 22080 mutex_enter(SD_MUTEX(un)); 22081 un->un_resvd_status |= SD_LOST_RESERVE; 22082 mutex_exit(SD_MUTEX(un)); 22083 SD_INFO(SD_LOG_IOCTL_MHD, un, 22084 "sd_resv_reclaim_thread: Failed " 22085 "Reservation Recovery\n"); 22086 } 22087 } else { 22088 mutex_exit(SD_MUTEX(un)); 22089 } 22090 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22091 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22092 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22093 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22094 /* 22095 * wakeup the destroy thread if anyone is waiting on 22096 * us to complete. 22097 */ 22098 cv_signal(&sd_tr.srq_inprocess_cv); 22099 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22100 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22101 } 22102 22103 /* 22104 * cleanup the sd_tr structure now that this thread will not exist 22105 */ 22106 ASSERT(sd_tr.srq_thr_req_head == NULL); 22107 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22108 sd_tr.srq_resv_reclaim_thread = NULL; 22109 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22110 thread_exit(); 22111 } 22112 22113 22114 /* 22115 * Function: sd_rmv_resv_reclaim_req() 22116 * 22117 * Description: This function removes any pending reservation reclaim requests 22118 * for the specified device. 22119 * 22120 * Arguments: dev - the device 'dev_t' 22121 */ 22122 22123 static void 22124 sd_rmv_resv_reclaim_req(dev_t dev) 22125 { 22126 struct sd_thr_request *sd_mhreq; 22127 struct sd_thr_request *sd_prev; 22128 22129 /* Remove a reservation reclaim request from the list */ 22130 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22131 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22132 /* 22133 * We are attempting to reinstate reservation for 22134 * this device. We wait for sd_reserve_release() 22135 * to return before we return. 22136 */ 22137 cv_wait(&sd_tr.srq_inprocess_cv, 22138 &sd_tr.srq_resv_reclaim_mutex); 22139 } else { 22140 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22141 if (sd_mhreq && sd_mhreq->dev == dev) { 22142 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22143 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22144 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22145 return; 22146 } 22147 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22148 if (sd_mhreq && sd_mhreq->dev == dev) { 22149 break; 22150 } 22151 sd_prev = sd_mhreq; 22152 } 22153 if (sd_mhreq != NULL) { 22154 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22155 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22156 } 22157 } 22158 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22159 } 22160 22161 22162 /* 22163 * Function: sd_mhd_reset_notify_cb() 22164 * 22165 * Description: This is a call back function for scsi_reset_notify. This 22166 * function updates the softstate reserved status and logs the 22167 * reset. The driver scsi watch facility callback function 22168 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22169 * will reclaim the reservation. 22170 * 22171 * Arguments: arg - driver soft state (unit) structure 22172 */ 22173 22174 static void 22175 sd_mhd_reset_notify_cb(caddr_t arg) 22176 { 22177 struct sd_lun *un = (struct sd_lun *)arg; 22178 22179 mutex_enter(SD_MUTEX(un)); 22180 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22181 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22182 SD_INFO(SD_LOG_IOCTL_MHD, un, 22183 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22184 } 22185 mutex_exit(SD_MUTEX(un)); 22186 } 22187 22188 22189 /* 22190 * Function: sd_take_ownership() 22191 * 22192 * Description: This routine implements an algorithm to achieve a stable 22193 * reservation on disks which don't implement priority reserve, 22194 * and makes sure that other host lose re-reservation attempts. 22195 * This algorithm contains of a loop that keeps issuing the RESERVE 22196 * for some period of time (min_ownership_delay, default 6 seconds) 22197 * During that loop, it looks to see if there has been a bus device 22198 * reset or bus reset (both of which cause an existing reservation 22199 * to be lost). If the reservation is lost issue RESERVE until a 22200 * period of min_ownership_delay with no resets has gone by, or 22201 * until max_ownership_delay has expired. This loop ensures that 22202 * the host really did manage to reserve the device, in spite of 22203 * resets. The looping for min_ownership_delay (default six 22204 * seconds) is important to early generation clustering products, 22205 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22206 * MHIOCENFAILFAST periodic timer of two seconds. By having 22207 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22208 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22209 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22210 * have already noticed, via the MHIOCENFAILFAST polling, that it 22211 * no longer "owns" the disk and will have panicked itself. Thus, 22212 * the host issuing the MHIOCTKOWN is assured (with timing 22213 * dependencies) that by the time it actually starts to use the 22214 * disk for real work, the old owner is no longer accessing it. 22215 * 22216 * min_ownership_delay is the minimum amount of time for which the 22217 * disk must be reserved continuously devoid of resets before the 22218 * MHIOCTKOWN ioctl will return success. 22219 * 22220 * max_ownership_delay indicates the amount of time by which the 22221 * take ownership should succeed or timeout with an error. 22222 * 22223 * Arguments: dev - the device 'dev_t' 22224 * *p - struct containing timing info. 22225 * 22226 * Return Code: 0 for success or error code 22227 */ 22228 22229 static int 22230 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22231 { 22232 struct sd_lun *un; 22233 int rval; 22234 int err; 22235 int reservation_count = 0; 22236 int min_ownership_delay = 6000000; /* in usec */ 22237 int max_ownership_delay = 30000000; /* in usec */ 22238 clock_t start_time; /* starting time of this algorithm */ 22239 clock_t end_time; /* time limit for giving up */ 22240 clock_t ownership_time; /* time limit for stable ownership */ 22241 clock_t current_time; 22242 clock_t previous_current_time; 22243 22244 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22245 return (ENXIO); 22246 } 22247 22248 /* 22249 * Attempt a device reservation. A priority reservation is requested. 22250 */ 22251 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22252 != SD_SUCCESS) { 22253 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22254 "sd_take_ownership: return(1)=%d\n", rval); 22255 return (rval); 22256 } 22257 22258 /* Update the softstate reserved status to indicate the reservation */ 22259 mutex_enter(SD_MUTEX(un)); 22260 un->un_resvd_status |= SD_RESERVE; 22261 un->un_resvd_status &= 22262 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22263 mutex_exit(SD_MUTEX(un)); 22264 22265 if (p != NULL) { 22266 if (p->min_ownership_delay != 0) { 22267 min_ownership_delay = p->min_ownership_delay * 1000; 22268 } 22269 if (p->max_ownership_delay != 0) { 22270 max_ownership_delay = p->max_ownership_delay * 1000; 22271 } 22272 } 22273 SD_INFO(SD_LOG_IOCTL_MHD, un, 22274 "sd_take_ownership: min, max delays: %d, %d\n", 22275 min_ownership_delay, max_ownership_delay); 22276 22277 start_time = ddi_get_lbolt(); 22278 current_time = start_time; 22279 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22280 end_time = start_time + drv_usectohz(max_ownership_delay); 22281 22282 while (current_time - end_time < 0) { 22283 delay(drv_usectohz(500000)); 22284 22285 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22286 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22287 mutex_enter(SD_MUTEX(un)); 22288 rval = (un->un_resvd_status & 22289 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22290 mutex_exit(SD_MUTEX(un)); 22291 break; 22292 } 22293 } 22294 previous_current_time = current_time; 22295 current_time = ddi_get_lbolt(); 22296 mutex_enter(SD_MUTEX(un)); 22297 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22298 ownership_time = ddi_get_lbolt() + 22299 drv_usectohz(min_ownership_delay); 22300 reservation_count = 0; 22301 } else { 22302 reservation_count++; 22303 } 22304 un->un_resvd_status |= SD_RESERVE; 22305 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22306 mutex_exit(SD_MUTEX(un)); 22307 22308 SD_INFO(SD_LOG_IOCTL_MHD, un, 22309 "sd_take_ownership: ticks for loop iteration=%ld, " 22310 "reservation=%s\n", (current_time - previous_current_time), 22311 reservation_count ? "ok" : "reclaimed"); 22312 22313 if (current_time - ownership_time >= 0 && 22314 reservation_count >= 4) { 22315 rval = 0; /* Achieved a stable ownership */ 22316 break; 22317 } 22318 if (current_time - end_time >= 0) { 22319 rval = EACCES; /* No ownership in max possible time */ 22320 break; 22321 } 22322 } 22323 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22324 "sd_take_ownership: return(2)=%d\n", rval); 22325 return (rval); 22326 } 22327 22328 22329 /* 22330 * Function: sd_reserve_release() 22331 * 22332 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22333 * PRIORITY RESERVE commands based on a user specified command type 22334 * 22335 * Arguments: dev - the device 'dev_t' 22336 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22337 * SD_RESERVE, SD_RELEASE 22338 * 22339 * Return Code: 0 or Error Code 22340 */ 22341 22342 static int 22343 sd_reserve_release(dev_t dev, int cmd) 22344 { 22345 struct uscsi_cmd *com = NULL; 22346 struct sd_lun *un = NULL; 22347 char cdb[CDB_GROUP0]; 22348 int rval; 22349 22350 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22351 (cmd == SD_PRIORITY_RESERVE)); 22352 22353 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22354 return (ENXIO); 22355 } 22356 22357 /* instantiate and initialize the command and cdb */ 22358 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22359 bzero(cdb, CDB_GROUP0); 22360 com->uscsi_flags = USCSI_SILENT; 22361 com->uscsi_timeout = un->un_reserve_release_time; 22362 com->uscsi_cdblen = CDB_GROUP0; 22363 com->uscsi_cdb = cdb; 22364 if (cmd == SD_RELEASE) { 22365 cdb[0] = SCMD_RELEASE; 22366 } else { 22367 cdb[0] = SCMD_RESERVE; 22368 } 22369 22370 /* Send the command. */ 22371 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22372 SD_PATH_STANDARD); 22373 22374 /* 22375 * "break" a reservation that is held by another host, by issuing a 22376 * reset if priority reserve is desired, and we could not get the 22377 * device. 22378 */ 22379 if ((cmd == SD_PRIORITY_RESERVE) && 22380 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22381 /* 22382 * First try to reset the LUN. If we cannot, then try a target 22383 * reset, followed by a bus reset if the target reset fails. 22384 */ 22385 int reset_retval = 0; 22386 if (un->un_f_lun_reset_enabled == TRUE) { 22387 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22388 } 22389 if (reset_retval == 0) { 22390 /* The LUN reset either failed or was not issued */ 22391 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22392 } 22393 if ((reset_retval == 0) && 22394 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22395 rval = EIO; 22396 kmem_free(com, sizeof (*com)); 22397 return (rval); 22398 } 22399 22400 bzero(com, sizeof (struct uscsi_cmd)); 22401 com->uscsi_flags = USCSI_SILENT; 22402 com->uscsi_cdb = cdb; 22403 com->uscsi_cdblen = CDB_GROUP0; 22404 com->uscsi_timeout = 5; 22405 22406 /* 22407 * Reissue the last reserve command, this time without request 22408 * sense. Assume that it is just a regular reserve command. 22409 */ 22410 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22411 SD_PATH_STANDARD); 22412 } 22413 22414 /* Return an error if still getting a reservation conflict. */ 22415 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22416 rval = EACCES; 22417 } 22418 22419 kmem_free(com, sizeof (*com)); 22420 return (rval); 22421 } 22422 22423 22424 #define SD_NDUMP_RETRIES 12 22425 /* 22426 * System Crash Dump routine 22427 */ 22428 22429 static int 22430 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22431 { 22432 int instance; 22433 int partition; 22434 int i; 22435 int err; 22436 struct sd_lun *un; 22437 struct scsi_pkt *wr_pktp; 22438 struct buf *wr_bp; 22439 struct buf wr_buf; 22440 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22441 daddr_t tgt_blkno; /* rmw - blkno for target */ 22442 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22443 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22444 size_t io_start_offset; 22445 int doing_rmw = FALSE; 22446 int rval; 22447 #if defined(__i386) || defined(__amd64) 22448 ssize_t dma_resid; 22449 daddr_t oblkno; 22450 #endif 22451 diskaddr_t nblks = 0; 22452 diskaddr_t start_block; 22453 22454 instance = SDUNIT(dev); 22455 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22456 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22457 return (ENXIO); 22458 } 22459 22460 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22461 22462 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22463 22464 partition = SDPART(dev); 22465 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22466 22467 /* Validate blocks to dump at against partition size. */ 22468 22469 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22470 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22471 22472 if ((blkno + nblk) > nblks) { 22473 SD_TRACE(SD_LOG_DUMP, un, 22474 "sddump: dump range larger than partition: " 22475 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22476 blkno, nblk, nblks); 22477 return (EINVAL); 22478 } 22479 22480 mutex_enter(&un->un_pm_mutex); 22481 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22482 struct scsi_pkt *start_pktp; 22483 22484 mutex_exit(&un->un_pm_mutex); 22485 22486 /* 22487 * use pm framework to power on HBA 1st 22488 */ 22489 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22490 22491 /* 22492 * Dump no long uses sdpower to power on a device, it's 22493 * in-line here so it can be done in polled mode. 22494 */ 22495 22496 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22497 22498 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22499 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22500 22501 if (start_pktp == NULL) { 22502 /* We were not given a SCSI packet, fail. */ 22503 return (EIO); 22504 } 22505 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22506 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22507 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22508 start_pktp->pkt_flags = FLAG_NOINTR; 22509 22510 mutex_enter(SD_MUTEX(un)); 22511 SD_FILL_SCSI1_LUN(un, start_pktp); 22512 mutex_exit(SD_MUTEX(un)); 22513 /* 22514 * Scsi_poll returns 0 (success) if the command completes and 22515 * the status block is STATUS_GOOD. 22516 */ 22517 if (sd_scsi_poll(un, start_pktp) != 0) { 22518 scsi_destroy_pkt(start_pktp); 22519 return (EIO); 22520 } 22521 scsi_destroy_pkt(start_pktp); 22522 (void) sd_ddi_pm_resume(un); 22523 } else { 22524 mutex_exit(&un->un_pm_mutex); 22525 } 22526 22527 mutex_enter(SD_MUTEX(un)); 22528 un->un_throttle = 0; 22529 22530 /* 22531 * The first time through, reset the specific target device. 22532 * However, when cpr calls sddump we know that sd is in a 22533 * a good state so no bus reset is required. 22534 * Clear sense data via Request Sense cmd. 22535 * In sddump we don't care about allow_bus_device_reset anymore 22536 */ 22537 22538 if ((un->un_state != SD_STATE_SUSPENDED) && 22539 (un->un_state != SD_STATE_DUMPING)) { 22540 22541 New_state(un, SD_STATE_DUMPING); 22542 22543 if (un->un_f_is_fibre == FALSE) { 22544 mutex_exit(SD_MUTEX(un)); 22545 /* 22546 * Attempt a bus reset for parallel scsi. 22547 * 22548 * Note: A bus reset is required because on some host 22549 * systems (i.e. E420R) a bus device reset is 22550 * insufficient to reset the state of the target. 22551 * 22552 * Note: Don't issue the reset for fibre-channel, 22553 * because this tends to hang the bus (loop) for 22554 * too long while everyone is logging out and in 22555 * and the deadman timer for dumping will fire 22556 * before the dump is complete. 22557 */ 22558 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22559 mutex_enter(SD_MUTEX(un)); 22560 Restore_state(un); 22561 mutex_exit(SD_MUTEX(un)); 22562 return (EIO); 22563 } 22564 22565 /* Delay to give the device some recovery time. */ 22566 drv_usecwait(10000); 22567 22568 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22569 SD_INFO(SD_LOG_DUMP, un, 22570 "sddump: sd_send_polled_RQS failed\n"); 22571 } 22572 mutex_enter(SD_MUTEX(un)); 22573 } 22574 } 22575 22576 /* 22577 * Convert the partition-relative block number to a 22578 * disk physical block number. 22579 */ 22580 blkno += start_block; 22581 22582 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22583 22584 22585 /* 22586 * Check if the device has a non-512 block size. 22587 */ 22588 wr_bp = NULL; 22589 if (NOT_DEVBSIZE(un)) { 22590 tgt_byte_offset = blkno * un->un_sys_blocksize; 22591 tgt_byte_count = nblk * un->un_sys_blocksize; 22592 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22593 (tgt_byte_count % un->un_tgt_blocksize)) { 22594 doing_rmw = TRUE; 22595 /* 22596 * Calculate the block number and number of block 22597 * in terms of the media block size. 22598 */ 22599 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22600 tgt_nblk = 22601 ((tgt_byte_offset + tgt_byte_count + 22602 (un->un_tgt_blocksize - 1)) / 22603 un->un_tgt_blocksize) - tgt_blkno; 22604 22605 /* 22606 * Invoke the routine which is going to do read part 22607 * of read-modify-write. 22608 * Note that this routine returns a pointer to 22609 * a valid bp in wr_bp. 22610 */ 22611 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22612 &wr_bp); 22613 if (err) { 22614 mutex_exit(SD_MUTEX(un)); 22615 return (err); 22616 } 22617 /* 22618 * Offset is being calculated as - 22619 * (original block # * system block size) - 22620 * (new block # * target block size) 22621 */ 22622 io_start_offset = 22623 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22624 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22625 22626 ASSERT((io_start_offset >= 0) && 22627 (io_start_offset < un->un_tgt_blocksize)); 22628 /* 22629 * Do the modify portion of read modify write. 22630 */ 22631 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22632 (size_t)nblk * un->un_sys_blocksize); 22633 } else { 22634 doing_rmw = FALSE; 22635 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22636 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22637 } 22638 22639 /* Convert blkno and nblk to target blocks */ 22640 blkno = tgt_blkno; 22641 nblk = tgt_nblk; 22642 } else { 22643 wr_bp = &wr_buf; 22644 bzero(wr_bp, sizeof (struct buf)); 22645 wr_bp->b_flags = B_BUSY; 22646 wr_bp->b_un.b_addr = addr; 22647 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22648 wr_bp->b_resid = 0; 22649 } 22650 22651 mutex_exit(SD_MUTEX(un)); 22652 22653 /* 22654 * Obtain a SCSI packet for the write command. 22655 * It should be safe to call the allocator here without 22656 * worrying about being locked for DVMA mapping because 22657 * the address we're passed is already a DVMA mapping 22658 * 22659 * We are also not going to worry about semaphore ownership 22660 * in the dump buffer. Dumping is single threaded at present. 22661 */ 22662 22663 wr_pktp = NULL; 22664 22665 #if defined(__i386) || defined(__amd64) 22666 dma_resid = wr_bp->b_bcount; 22667 oblkno = blkno; 22668 while (dma_resid != 0) { 22669 #endif 22670 22671 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22672 wr_bp->b_flags &= ~B_ERROR; 22673 22674 #if defined(__i386) || defined(__amd64) 22675 blkno = oblkno + 22676 ((wr_bp->b_bcount - dma_resid) / 22677 un->un_tgt_blocksize); 22678 nblk = dma_resid / un->un_tgt_blocksize; 22679 22680 if (wr_pktp) { 22681 /* Partial DMA transfers after initial transfer */ 22682 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22683 blkno, nblk); 22684 } else { 22685 /* Initial transfer */ 22686 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22687 un->un_pkt_flags, NULL_FUNC, NULL, 22688 blkno, nblk); 22689 } 22690 #else 22691 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22692 0, NULL_FUNC, NULL, blkno, nblk); 22693 #endif 22694 22695 if (rval == 0) { 22696 /* We were given a SCSI packet, continue. */ 22697 break; 22698 } 22699 22700 if (i == 0) { 22701 if (wr_bp->b_flags & B_ERROR) { 22702 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22703 "no resources for dumping; " 22704 "error code: 0x%x, retrying", 22705 geterror(wr_bp)); 22706 } else { 22707 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22708 "no resources for dumping; retrying"); 22709 } 22710 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22711 if (wr_bp->b_flags & B_ERROR) { 22712 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22713 "no resources for dumping; error code: " 22714 "0x%x, retrying\n", geterror(wr_bp)); 22715 } 22716 } else { 22717 if (wr_bp->b_flags & B_ERROR) { 22718 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22719 "no resources for dumping; " 22720 "error code: 0x%x, retries failed, " 22721 "giving up.\n", geterror(wr_bp)); 22722 } else { 22723 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22724 "no resources for dumping; " 22725 "retries failed, giving up.\n"); 22726 } 22727 mutex_enter(SD_MUTEX(un)); 22728 Restore_state(un); 22729 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22730 mutex_exit(SD_MUTEX(un)); 22731 scsi_free_consistent_buf(wr_bp); 22732 } else { 22733 mutex_exit(SD_MUTEX(un)); 22734 } 22735 return (EIO); 22736 } 22737 drv_usecwait(10000); 22738 } 22739 22740 #if defined(__i386) || defined(__amd64) 22741 /* 22742 * save the resid from PARTIAL_DMA 22743 */ 22744 dma_resid = wr_pktp->pkt_resid; 22745 if (dma_resid != 0) 22746 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22747 wr_pktp->pkt_resid = 0; 22748 #endif 22749 22750 /* SunBug 1222170 */ 22751 wr_pktp->pkt_flags = FLAG_NOINTR; 22752 22753 err = EIO; 22754 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22755 22756 /* 22757 * Scsi_poll returns 0 (success) if the command completes and 22758 * the status block is STATUS_GOOD. We should only check 22759 * errors if this condition is not true. Even then we should 22760 * send our own request sense packet only if we have a check 22761 * condition and auto request sense has not been performed by 22762 * the hba. 22763 */ 22764 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22765 22766 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22767 (wr_pktp->pkt_resid == 0)) { 22768 err = SD_SUCCESS; 22769 break; 22770 } 22771 22772 /* 22773 * Check CMD_DEV_GONE 1st, give up if device is gone. 22774 */ 22775 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22776 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22777 "Device is gone\n"); 22778 break; 22779 } 22780 22781 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22782 SD_INFO(SD_LOG_DUMP, un, 22783 "sddump: write failed with CHECK, try # %d\n", i); 22784 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22785 (void) sd_send_polled_RQS(un); 22786 } 22787 22788 continue; 22789 } 22790 22791 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22792 int reset_retval = 0; 22793 22794 SD_INFO(SD_LOG_DUMP, un, 22795 "sddump: write failed with BUSY, try # %d\n", i); 22796 22797 if (un->un_f_lun_reset_enabled == TRUE) { 22798 reset_retval = scsi_reset(SD_ADDRESS(un), 22799 RESET_LUN); 22800 } 22801 if (reset_retval == 0) { 22802 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22803 } 22804 (void) sd_send_polled_RQS(un); 22805 22806 } else { 22807 SD_INFO(SD_LOG_DUMP, un, 22808 "sddump: write failed with 0x%x, try # %d\n", 22809 SD_GET_PKT_STATUS(wr_pktp), i); 22810 mutex_enter(SD_MUTEX(un)); 22811 sd_reset_target(un, wr_pktp); 22812 mutex_exit(SD_MUTEX(un)); 22813 } 22814 22815 /* 22816 * If we are not getting anywhere with lun/target resets, 22817 * let's reset the bus. 22818 */ 22819 if (i == SD_NDUMP_RETRIES/2) { 22820 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22821 (void) sd_send_polled_RQS(un); 22822 } 22823 22824 } 22825 #if defined(__i386) || defined(__amd64) 22826 } /* dma_resid */ 22827 #endif 22828 22829 scsi_destroy_pkt(wr_pktp); 22830 mutex_enter(SD_MUTEX(un)); 22831 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22832 mutex_exit(SD_MUTEX(un)); 22833 scsi_free_consistent_buf(wr_bp); 22834 } else { 22835 mutex_exit(SD_MUTEX(un)); 22836 } 22837 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22838 return (err); 22839 } 22840 22841 /* 22842 * Function: sd_scsi_poll() 22843 * 22844 * Description: This is a wrapper for the scsi_poll call. 22845 * 22846 * Arguments: sd_lun - The unit structure 22847 * scsi_pkt - The scsi packet being sent to the device. 22848 * 22849 * Return Code: 0 - Command completed successfully with good status 22850 * -1 - Command failed. This could indicate a check condition 22851 * or other status value requiring recovery action. 22852 * 22853 */ 22854 22855 static int 22856 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22857 { 22858 int status; 22859 22860 ASSERT(un != NULL); 22861 ASSERT(!mutex_owned(SD_MUTEX(un))); 22862 ASSERT(pktp != NULL); 22863 22864 status = SD_SUCCESS; 22865 22866 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22867 pktp->pkt_flags |= un->un_tagflags; 22868 pktp->pkt_flags &= ~FLAG_NODISCON; 22869 } 22870 22871 status = sd_ddi_scsi_poll(pktp); 22872 /* 22873 * Scsi_poll returns 0 (success) if the command completes and the 22874 * status block is STATUS_GOOD. We should only check errors if this 22875 * condition is not true. Even then we should send our own request 22876 * sense packet only if we have a check condition and auto 22877 * request sense has not been performed by the hba. 22878 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22879 */ 22880 if ((status != SD_SUCCESS) && 22881 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22882 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22883 (pktp->pkt_reason != CMD_DEV_GONE)) 22884 (void) sd_send_polled_RQS(un); 22885 22886 return (status); 22887 } 22888 22889 /* 22890 * Function: sd_send_polled_RQS() 22891 * 22892 * Description: This sends the request sense command to a device. 22893 * 22894 * Arguments: sd_lun - The unit structure 22895 * 22896 * Return Code: 0 - Command completed successfully with good status 22897 * -1 - Command failed. 22898 * 22899 */ 22900 22901 static int 22902 sd_send_polled_RQS(struct sd_lun *un) 22903 { 22904 int ret_val; 22905 struct scsi_pkt *rqs_pktp; 22906 struct buf *rqs_bp; 22907 22908 ASSERT(un != NULL); 22909 ASSERT(!mutex_owned(SD_MUTEX(un))); 22910 22911 ret_val = SD_SUCCESS; 22912 22913 rqs_pktp = un->un_rqs_pktp; 22914 rqs_bp = un->un_rqs_bp; 22915 22916 mutex_enter(SD_MUTEX(un)); 22917 22918 if (un->un_sense_isbusy) { 22919 ret_val = SD_FAILURE; 22920 mutex_exit(SD_MUTEX(un)); 22921 return (ret_val); 22922 } 22923 22924 /* 22925 * If the request sense buffer (and packet) is not in use, 22926 * let's set the un_sense_isbusy and send our packet 22927 */ 22928 un->un_sense_isbusy = 1; 22929 rqs_pktp->pkt_resid = 0; 22930 rqs_pktp->pkt_reason = 0; 22931 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22932 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22933 22934 mutex_exit(SD_MUTEX(un)); 22935 22936 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22937 " 0x%p\n", rqs_bp->b_un.b_addr); 22938 22939 /* 22940 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22941 * axle - it has a call into us! 22942 */ 22943 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22944 SD_INFO(SD_LOG_COMMON, un, 22945 "sd_send_polled_RQS: RQS failed\n"); 22946 } 22947 22948 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22949 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22950 22951 mutex_enter(SD_MUTEX(un)); 22952 un->un_sense_isbusy = 0; 22953 mutex_exit(SD_MUTEX(un)); 22954 22955 return (ret_val); 22956 } 22957 22958 /* 22959 * Defines needed for localized version of the scsi_poll routine. 22960 */ 22961 #define SD_CSEC 10000 /* usecs */ 22962 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22963 22964 22965 /* 22966 * Function: sd_ddi_scsi_poll() 22967 * 22968 * Description: Localized version of the scsi_poll routine. The purpose is to 22969 * send a scsi_pkt to a device as a polled command. This version 22970 * is to ensure more robust handling of transport errors. 22971 * Specifically this routine cures not ready, coming ready 22972 * transition for power up and reset of sonoma's. This can take 22973 * up to 45 seconds for power-on and 20 seconds for reset of a 22974 * sonoma lun. 22975 * 22976 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22977 * 22978 * Return Code: 0 - Command completed successfully with good status 22979 * -1 - Command failed. 22980 * 22981 */ 22982 22983 static int 22984 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22985 { 22986 int busy_count; 22987 int timeout; 22988 int rval = SD_FAILURE; 22989 int savef; 22990 uint8_t *sensep; 22991 long savet; 22992 void (*savec)(); 22993 /* 22994 * The following is defined in machdep.c and is used in determining if 22995 * the scsi transport system will do polled I/O instead of interrupt 22996 * I/O when called from xx_dump(). 22997 */ 22998 extern int do_polled_io; 22999 23000 /* 23001 * save old flags in pkt, to restore at end 23002 */ 23003 savef = pkt->pkt_flags; 23004 savec = pkt->pkt_comp; 23005 savet = pkt->pkt_time; 23006 23007 pkt->pkt_flags |= FLAG_NOINTR; 23008 23009 /* 23010 * XXX there is nothing in the SCSA spec that states that we should not 23011 * do a callback for polled cmds; however, removing this will break sd 23012 * and probably other target drivers 23013 */ 23014 pkt->pkt_comp = NULL; 23015 23016 /* 23017 * we don't like a polled command without timeout. 23018 * 60 seconds seems long enough. 23019 */ 23020 if (pkt->pkt_time == 0) { 23021 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23022 } 23023 23024 /* 23025 * Send polled cmd. 23026 * 23027 * We do some error recovery for various errors. Tran_busy, 23028 * queue full, and non-dispatched commands are retried every 10 msec. 23029 * as they are typically transient failures. Busy status and Not 23030 * Ready are retried every second as this status takes a while to 23031 * change. Unit attention is retried for pkt_time (60) times 23032 * with no delay. 23033 */ 23034 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 23035 23036 for (busy_count = 0; busy_count < timeout; busy_count++) { 23037 int rc; 23038 int poll_delay; 23039 23040 /* 23041 * Initialize pkt status variables. 23042 */ 23043 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23044 23045 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23046 if (rc != TRAN_BUSY) { 23047 /* Transport failed - give up. */ 23048 break; 23049 } else { 23050 /* Transport busy - try again. */ 23051 poll_delay = 1 * SD_CSEC; /* 10 msec */ 23052 } 23053 } else { 23054 /* 23055 * Transport accepted - check pkt status. 23056 */ 23057 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23058 if (pkt->pkt_reason == CMD_CMPLT && 23059 rc == STATUS_CHECK && 23060 pkt->pkt_state & STATE_ARQ_DONE) { 23061 struct scsi_arq_status *arqstat = 23062 (struct scsi_arq_status *)(pkt->pkt_scbp); 23063 23064 sensep = (uint8_t *)&arqstat->sts_sensedata; 23065 } else { 23066 sensep = NULL; 23067 } 23068 23069 if ((pkt->pkt_reason == CMD_CMPLT) && 23070 (rc == STATUS_GOOD)) { 23071 /* No error - we're done */ 23072 rval = SD_SUCCESS; 23073 break; 23074 23075 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23076 /* Lost connection - give up */ 23077 break; 23078 23079 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23080 (pkt->pkt_state == 0)) { 23081 /* Pkt not dispatched - try again. */ 23082 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23083 23084 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23085 (rc == STATUS_QFULL)) { 23086 /* Queue full - try again. */ 23087 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23088 23089 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23090 (rc == STATUS_BUSY)) { 23091 /* Busy - try again. */ 23092 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23093 busy_count += (SD_SEC_TO_CSEC - 1); 23094 23095 } else if ((sensep != NULL) && 23096 (scsi_sense_key(sensep) == 23097 KEY_UNIT_ATTENTION)) { 23098 /* Unit Attention - try again */ 23099 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23100 continue; 23101 23102 } else if ((sensep != NULL) && 23103 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23104 (scsi_sense_asc(sensep) == 0x04) && 23105 (scsi_sense_ascq(sensep) == 0x01)) { 23106 /* Not ready -> ready - try again. */ 23107 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23108 busy_count += (SD_SEC_TO_CSEC - 1); 23109 23110 } else { 23111 /* BAD status - give up. */ 23112 break; 23113 } 23114 } 23115 23116 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23117 !do_polled_io) { 23118 delay(drv_usectohz(poll_delay)); 23119 } else { 23120 /* we busy wait during cpr_dump or interrupt threads */ 23121 drv_usecwait(poll_delay); 23122 } 23123 } 23124 23125 pkt->pkt_flags = savef; 23126 pkt->pkt_comp = savec; 23127 pkt->pkt_time = savet; 23128 return (rval); 23129 } 23130 23131 23132 /* 23133 * Function: sd_persistent_reservation_in_read_keys 23134 * 23135 * Description: This routine is the driver entry point for handling CD-ROM 23136 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23137 * by sending the SCSI-3 PRIN commands to the device. 23138 * Processes the read keys command response by copying the 23139 * reservation key information into the user provided buffer. 23140 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23141 * 23142 * Arguments: un - Pointer to soft state struct for the target. 23143 * usrp - user provided pointer to multihost Persistent In Read 23144 * Keys structure (mhioc_inkeys_t) 23145 * flag - this argument is a pass through to ddi_copyxxx() 23146 * directly from the mode argument of ioctl(). 23147 * 23148 * Return Code: 0 - Success 23149 * EACCES 23150 * ENOTSUP 23151 * errno return code from sd_send_scsi_cmd() 23152 * 23153 * Context: Can sleep. Does not return until command is completed. 23154 */ 23155 23156 static int 23157 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23158 mhioc_inkeys_t *usrp, int flag) 23159 { 23160 #ifdef _MULTI_DATAMODEL 23161 struct mhioc_key_list32 li32; 23162 #endif 23163 sd_prin_readkeys_t *in; 23164 mhioc_inkeys_t *ptr; 23165 mhioc_key_list_t li; 23166 uchar_t *data_bufp; 23167 int data_len; 23168 int rval; 23169 size_t copysz; 23170 23171 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23172 return (EINVAL); 23173 } 23174 bzero(&li, sizeof (mhioc_key_list_t)); 23175 23176 /* 23177 * Get the listsize from user 23178 */ 23179 #ifdef _MULTI_DATAMODEL 23180 23181 switch (ddi_model_convert_from(flag & FMODELS)) { 23182 case DDI_MODEL_ILP32: 23183 copysz = sizeof (struct mhioc_key_list32); 23184 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23185 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23186 "sd_persistent_reservation_in_read_keys: " 23187 "failed ddi_copyin: mhioc_key_list32_t\n"); 23188 rval = EFAULT; 23189 goto done; 23190 } 23191 li.listsize = li32.listsize; 23192 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23193 break; 23194 23195 case DDI_MODEL_NONE: 23196 copysz = sizeof (mhioc_key_list_t); 23197 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23198 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23199 "sd_persistent_reservation_in_read_keys: " 23200 "failed ddi_copyin: mhioc_key_list_t\n"); 23201 rval = EFAULT; 23202 goto done; 23203 } 23204 break; 23205 } 23206 23207 #else /* ! _MULTI_DATAMODEL */ 23208 copysz = sizeof (mhioc_key_list_t); 23209 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23210 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23211 "sd_persistent_reservation_in_read_keys: " 23212 "failed ddi_copyin: mhioc_key_list_t\n"); 23213 rval = EFAULT; 23214 goto done; 23215 } 23216 #endif 23217 23218 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23219 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23220 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23221 23222 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23223 data_len, data_bufp)) != 0) { 23224 goto done; 23225 } 23226 in = (sd_prin_readkeys_t *)data_bufp; 23227 ptr->generation = BE_32(in->generation); 23228 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23229 23230 /* 23231 * Return the min(listsize, listlen) keys 23232 */ 23233 #ifdef _MULTI_DATAMODEL 23234 23235 switch (ddi_model_convert_from(flag & FMODELS)) { 23236 case DDI_MODEL_ILP32: 23237 li32.listlen = li.listlen; 23238 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23239 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23240 "sd_persistent_reservation_in_read_keys: " 23241 "failed ddi_copyout: mhioc_key_list32_t\n"); 23242 rval = EFAULT; 23243 goto done; 23244 } 23245 break; 23246 23247 case DDI_MODEL_NONE: 23248 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23249 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23250 "sd_persistent_reservation_in_read_keys: " 23251 "failed ddi_copyout: mhioc_key_list_t\n"); 23252 rval = EFAULT; 23253 goto done; 23254 } 23255 break; 23256 } 23257 23258 #else /* ! _MULTI_DATAMODEL */ 23259 23260 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23261 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23262 "sd_persistent_reservation_in_read_keys: " 23263 "failed ddi_copyout: mhioc_key_list_t\n"); 23264 rval = EFAULT; 23265 goto done; 23266 } 23267 23268 #endif /* _MULTI_DATAMODEL */ 23269 23270 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23271 li.listsize * MHIOC_RESV_KEY_SIZE); 23272 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23273 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23274 "sd_persistent_reservation_in_read_keys: " 23275 "failed ddi_copyout: keylist\n"); 23276 rval = EFAULT; 23277 } 23278 done: 23279 kmem_free(data_bufp, data_len); 23280 return (rval); 23281 } 23282 23283 23284 /* 23285 * Function: sd_persistent_reservation_in_read_resv 23286 * 23287 * Description: This routine is the driver entry point for handling CD-ROM 23288 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23289 * by sending the SCSI-3 PRIN commands to the device. 23290 * Process the read persistent reservations command response by 23291 * copying the reservation information into the user provided 23292 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23293 * 23294 * Arguments: un - Pointer to soft state struct for the target. 23295 * usrp - user provided pointer to multihost Persistent In Read 23296 * Keys structure (mhioc_inkeys_t) 23297 * flag - this argument is a pass through to ddi_copyxxx() 23298 * directly from the mode argument of ioctl(). 23299 * 23300 * Return Code: 0 - Success 23301 * EACCES 23302 * ENOTSUP 23303 * errno return code from sd_send_scsi_cmd() 23304 * 23305 * Context: Can sleep. Does not return until command is completed. 23306 */ 23307 23308 static int 23309 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23310 mhioc_inresvs_t *usrp, int flag) 23311 { 23312 #ifdef _MULTI_DATAMODEL 23313 struct mhioc_resv_desc_list32 resvlist32; 23314 #endif 23315 sd_prin_readresv_t *in; 23316 mhioc_inresvs_t *ptr; 23317 sd_readresv_desc_t *readresv_ptr; 23318 mhioc_resv_desc_list_t resvlist; 23319 mhioc_resv_desc_t resvdesc; 23320 uchar_t *data_bufp; 23321 int data_len; 23322 int rval; 23323 int i; 23324 size_t copysz; 23325 mhioc_resv_desc_t *bufp; 23326 23327 if ((ptr = usrp) == NULL) { 23328 return (EINVAL); 23329 } 23330 23331 /* 23332 * Get the listsize from user 23333 */ 23334 #ifdef _MULTI_DATAMODEL 23335 switch (ddi_model_convert_from(flag & FMODELS)) { 23336 case DDI_MODEL_ILP32: 23337 copysz = sizeof (struct mhioc_resv_desc_list32); 23338 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23339 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23340 "sd_persistent_reservation_in_read_resv: " 23341 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23342 rval = EFAULT; 23343 goto done; 23344 } 23345 resvlist.listsize = resvlist32.listsize; 23346 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23347 break; 23348 23349 case DDI_MODEL_NONE: 23350 copysz = sizeof (mhioc_resv_desc_list_t); 23351 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23352 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23353 "sd_persistent_reservation_in_read_resv: " 23354 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23355 rval = EFAULT; 23356 goto done; 23357 } 23358 break; 23359 } 23360 #else /* ! _MULTI_DATAMODEL */ 23361 copysz = sizeof (mhioc_resv_desc_list_t); 23362 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23363 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23364 "sd_persistent_reservation_in_read_resv: " 23365 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23366 rval = EFAULT; 23367 goto done; 23368 } 23369 #endif /* ! _MULTI_DATAMODEL */ 23370 23371 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23372 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23373 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23374 23375 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23376 data_len, data_bufp)) != 0) { 23377 goto done; 23378 } 23379 in = (sd_prin_readresv_t *)data_bufp; 23380 ptr->generation = BE_32(in->generation); 23381 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23382 23383 /* 23384 * Return the min(listsize, listlen( keys 23385 */ 23386 #ifdef _MULTI_DATAMODEL 23387 23388 switch (ddi_model_convert_from(flag & FMODELS)) { 23389 case DDI_MODEL_ILP32: 23390 resvlist32.listlen = resvlist.listlen; 23391 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23392 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23393 "sd_persistent_reservation_in_read_resv: " 23394 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23395 rval = EFAULT; 23396 goto done; 23397 } 23398 break; 23399 23400 case DDI_MODEL_NONE: 23401 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23402 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23403 "sd_persistent_reservation_in_read_resv: " 23404 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23405 rval = EFAULT; 23406 goto done; 23407 } 23408 break; 23409 } 23410 23411 #else /* ! _MULTI_DATAMODEL */ 23412 23413 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23414 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23415 "sd_persistent_reservation_in_read_resv: " 23416 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23417 rval = EFAULT; 23418 goto done; 23419 } 23420 23421 #endif /* ! _MULTI_DATAMODEL */ 23422 23423 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23424 bufp = resvlist.list; 23425 copysz = sizeof (mhioc_resv_desc_t); 23426 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23427 i++, readresv_ptr++, bufp++) { 23428 23429 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23430 MHIOC_RESV_KEY_SIZE); 23431 resvdesc.type = readresv_ptr->type; 23432 resvdesc.scope = readresv_ptr->scope; 23433 resvdesc.scope_specific_addr = 23434 BE_32(readresv_ptr->scope_specific_addr); 23435 23436 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23437 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23438 "sd_persistent_reservation_in_read_resv: " 23439 "failed ddi_copyout: resvlist\n"); 23440 rval = EFAULT; 23441 goto done; 23442 } 23443 } 23444 done: 23445 kmem_free(data_bufp, data_len); 23446 return (rval); 23447 } 23448 23449 23450 /* 23451 * Function: sr_change_blkmode() 23452 * 23453 * Description: This routine is the driver entry point for handling CD-ROM 23454 * block mode ioctl requests. Support for returning and changing 23455 * the current block size in use by the device is implemented. The 23456 * LBA size is changed via a MODE SELECT Block Descriptor. 23457 * 23458 * This routine issues a mode sense with an allocation length of 23459 * 12 bytes for the mode page header and a single block descriptor. 23460 * 23461 * Arguments: dev - the device 'dev_t' 23462 * cmd - the request type; one of CDROMGBLKMODE (get) or 23463 * CDROMSBLKMODE (set) 23464 * data - current block size or requested block size 23465 * flag - this argument is a pass through to ddi_copyxxx() directly 23466 * from the mode argument of ioctl(). 23467 * 23468 * Return Code: the code returned by sd_send_scsi_cmd() 23469 * EINVAL if invalid arguments are provided 23470 * EFAULT if ddi_copyxxx() fails 23471 * ENXIO if fail ddi_get_soft_state 23472 * EIO if invalid mode sense block descriptor length 23473 * 23474 */ 23475 23476 static int 23477 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23478 { 23479 struct sd_lun *un = NULL; 23480 struct mode_header *sense_mhp, *select_mhp; 23481 struct block_descriptor *sense_desc, *select_desc; 23482 int current_bsize; 23483 int rval = EINVAL; 23484 uchar_t *sense = NULL; 23485 uchar_t *select = NULL; 23486 23487 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23488 23489 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23490 return (ENXIO); 23491 } 23492 23493 /* 23494 * The block length is changed via the Mode Select block descriptor, the 23495 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23496 * required as part of this routine. Therefore the mode sense allocation 23497 * length is specified to be the length of a mode page header and a 23498 * block descriptor. 23499 */ 23500 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23501 23502 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23503 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23504 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23505 "sr_change_blkmode: Mode Sense Failed\n"); 23506 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23507 return (rval); 23508 } 23509 23510 /* Check the block descriptor len to handle only 1 block descriptor */ 23511 sense_mhp = (struct mode_header *)sense; 23512 if ((sense_mhp->bdesc_length == 0) || 23513 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23515 "sr_change_blkmode: Mode Sense returned invalid block" 23516 " descriptor length\n"); 23517 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23518 return (EIO); 23519 } 23520 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23521 current_bsize = ((sense_desc->blksize_hi << 16) | 23522 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23523 23524 /* Process command */ 23525 switch (cmd) { 23526 case CDROMGBLKMODE: 23527 /* Return the block size obtained during the mode sense */ 23528 if (ddi_copyout(¤t_bsize, (void *)data, 23529 sizeof (int), flag) != 0) 23530 rval = EFAULT; 23531 break; 23532 case CDROMSBLKMODE: 23533 /* Validate the requested block size */ 23534 switch (data) { 23535 case CDROM_BLK_512: 23536 case CDROM_BLK_1024: 23537 case CDROM_BLK_2048: 23538 case CDROM_BLK_2056: 23539 case CDROM_BLK_2336: 23540 case CDROM_BLK_2340: 23541 case CDROM_BLK_2352: 23542 case CDROM_BLK_2368: 23543 case CDROM_BLK_2448: 23544 case CDROM_BLK_2646: 23545 case CDROM_BLK_2647: 23546 break; 23547 default: 23548 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23549 "sr_change_blkmode: " 23550 "Block Size '%ld' Not Supported\n", data); 23551 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23552 return (EINVAL); 23553 } 23554 23555 /* 23556 * The current block size matches the requested block size so 23557 * there is no need to send the mode select to change the size 23558 */ 23559 if (current_bsize == data) { 23560 break; 23561 } 23562 23563 /* Build the select data for the requested block size */ 23564 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23565 select_mhp = (struct mode_header *)select; 23566 select_desc = 23567 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23568 /* 23569 * The LBA size is changed via the block descriptor, so the 23570 * descriptor is built according to the user data 23571 */ 23572 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23573 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23574 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23575 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23576 23577 /* Send the mode select for the requested block size */ 23578 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23579 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23580 SD_PATH_STANDARD)) != 0) { 23581 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23582 "sr_change_blkmode: Mode Select Failed\n"); 23583 /* 23584 * The mode select failed for the requested block size, 23585 * so reset the data for the original block size and 23586 * send it to the target. The error is indicated by the 23587 * return value for the failed mode select. 23588 */ 23589 select_desc->blksize_hi = sense_desc->blksize_hi; 23590 select_desc->blksize_mid = sense_desc->blksize_mid; 23591 select_desc->blksize_lo = sense_desc->blksize_lo; 23592 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23593 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23594 SD_PATH_STANDARD); 23595 } else { 23596 ASSERT(!mutex_owned(SD_MUTEX(un))); 23597 mutex_enter(SD_MUTEX(un)); 23598 sd_update_block_info(un, (uint32_t)data, 0); 23599 mutex_exit(SD_MUTEX(un)); 23600 } 23601 break; 23602 default: 23603 /* should not reach here, but check anyway */ 23604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23605 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23606 rval = EINVAL; 23607 break; 23608 } 23609 23610 if (select) { 23611 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23612 } 23613 if (sense) { 23614 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23615 } 23616 return (rval); 23617 } 23618 23619 23620 /* 23621 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23622 * implement driver support for getting and setting the CD speed. The command 23623 * set used will be based on the device type. If the device has not been 23624 * identified as MMC the Toshiba vendor specific mode page will be used. If 23625 * the device is MMC but does not support the Real Time Streaming feature 23626 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23627 * be used to read the speed. 23628 */ 23629 23630 /* 23631 * Function: sr_change_speed() 23632 * 23633 * Description: This routine is the driver entry point for handling CD-ROM 23634 * drive speed ioctl requests for devices supporting the Toshiba 23635 * vendor specific drive speed mode page. Support for returning 23636 * and changing the current drive speed in use by the device is 23637 * implemented. 23638 * 23639 * Arguments: dev - the device 'dev_t' 23640 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23641 * CDROMSDRVSPEED (set) 23642 * data - current drive speed or requested drive speed 23643 * flag - this argument is a pass through to ddi_copyxxx() directly 23644 * from the mode argument of ioctl(). 23645 * 23646 * Return Code: the code returned by sd_send_scsi_cmd() 23647 * EINVAL if invalid arguments are provided 23648 * EFAULT if ddi_copyxxx() fails 23649 * ENXIO if fail ddi_get_soft_state 23650 * EIO if invalid mode sense block descriptor length 23651 */ 23652 23653 static int 23654 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23655 { 23656 struct sd_lun *un = NULL; 23657 struct mode_header *sense_mhp, *select_mhp; 23658 struct mode_speed *sense_page, *select_page; 23659 int current_speed; 23660 int rval = EINVAL; 23661 int bd_len; 23662 uchar_t *sense = NULL; 23663 uchar_t *select = NULL; 23664 23665 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23666 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23667 return (ENXIO); 23668 } 23669 23670 /* 23671 * Note: The drive speed is being modified here according to a Toshiba 23672 * vendor specific mode page (0x31). 23673 */ 23674 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23675 23676 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23677 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23678 SD_PATH_STANDARD)) != 0) { 23679 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23680 "sr_change_speed: Mode Sense Failed\n"); 23681 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23682 return (rval); 23683 } 23684 sense_mhp = (struct mode_header *)sense; 23685 23686 /* Check the block descriptor len to handle only 1 block descriptor */ 23687 bd_len = sense_mhp->bdesc_length; 23688 if (bd_len > MODE_BLK_DESC_LENGTH) { 23689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23690 "sr_change_speed: Mode Sense returned invalid block " 23691 "descriptor length\n"); 23692 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23693 return (EIO); 23694 } 23695 23696 sense_page = (struct mode_speed *) 23697 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23698 current_speed = sense_page->speed; 23699 23700 /* Process command */ 23701 switch (cmd) { 23702 case CDROMGDRVSPEED: 23703 /* Return the drive speed obtained during the mode sense */ 23704 if (current_speed == 0x2) { 23705 current_speed = CDROM_TWELVE_SPEED; 23706 } 23707 if (ddi_copyout(¤t_speed, (void *)data, 23708 sizeof (int), flag) != 0) { 23709 rval = EFAULT; 23710 } 23711 break; 23712 case CDROMSDRVSPEED: 23713 /* Validate the requested drive speed */ 23714 switch ((uchar_t)data) { 23715 case CDROM_TWELVE_SPEED: 23716 data = 0x2; 23717 /*FALLTHROUGH*/ 23718 case CDROM_NORMAL_SPEED: 23719 case CDROM_DOUBLE_SPEED: 23720 case CDROM_QUAD_SPEED: 23721 case CDROM_MAXIMUM_SPEED: 23722 break; 23723 default: 23724 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23725 "sr_change_speed: " 23726 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23727 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23728 return (EINVAL); 23729 } 23730 23731 /* 23732 * The current drive speed matches the requested drive speed so 23733 * there is no need to send the mode select to change the speed 23734 */ 23735 if (current_speed == data) { 23736 break; 23737 } 23738 23739 /* Build the select data for the requested drive speed */ 23740 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23741 select_mhp = (struct mode_header *)select; 23742 select_mhp->bdesc_length = 0; 23743 select_page = 23744 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23745 select_page = 23746 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23747 select_page->mode_page.code = CDROM_MODE_SPEED; 23748 select_page->mode_page.length = 2; 23749 select_page->speed = (uchar_t)data; 23750 23751 /* Send the mode select for the requested block size */ 23752 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23753 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23754 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23755 /* 23756 * The mode select failed for the requested drive speed, 23757 * so reset the data for the original drive speed and 23758 * send it to the target. The error is indicated by the 23759 * return value for the failed mode select. 23760 */ 23761 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23762 "sr_drive_speed: Mode Select Failed\n"); 23763 select_page->speed = sense_page->speed; 23764 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23765 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23766 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23767 } 23768 break; 23769 default: 23770 /* should not reach here, but check anyway */ 23771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23772 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23773 rval = EINVAL; 23774 break; 23775 } 23776 23777 if (select) { 23778 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23779 } 23780 if (sense) { 23781 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23782 } 23783 23784 return (rval); 23785 } 23786 23787 23788 /* 23789 * Function: sr_atapi_change_speed() 23790 * 23791 * Description: This routine is the driver entry point for handling CD-ROM 23792 * drive speed ioctl requests for MMC devices that do not support 23793 * the Real Time Streaming feature (0x107). 23794 * 23795 * Note: This routine will use the SET SPEED command which may not 23796 * be supported by all devices. 23797 * 23798 * Arguments: dev- the device 'dev_t' 23799 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23800 * CDROMSDRVSPEED (set) 23801 * data- current drive speed or requested drive speed 23802 * flag- this argument is a pass through to ddi_copyxxx() directly 23803 * from the mode argument of ioctl(). 23804 * 23805 * Return Code: the code returned by sd_send_scsi_cmd() 23806 * EINVAL if invalid arguments are provided 23807 * EFAULT if ddi_copyxxx() fails 23808 * ENXIO if fail ddi_get_soft_state 23809 * EIO if invalid mode sense block descriptor length 23810 */ 23811 23812 static int 23813 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23814 { 23815 struct sd_lun *un; 23816 struct uscsi_cmd *com = NULL; 23817 struct mode_header_grp2 *sense_mhp; 23818 uchar_t *sense_page; 23819 uchar_t *sense = NULL; 23820 char cdb[CDB_GROUP5]; 23821 int bd_len; 23822 int current_speed = 0; 23823 int max_speed = 0; 23824 int rval; 23825 23826 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23827 23828 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23829 return (ENXIO); 23830 } 23831 23832 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23833 23834 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23835 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23836 SD_PATH_STANDARD)) != 0) { 23837 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23838 "sr_atapi_change_speed: Mode Sense Failed\n"); 23839 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23840 return (rval); 23841 } 23842 23843 /* Check the block descriptor len to handle only 1 block descriptor */ 23844 sense_mhp = (struct mode_header_grp2 *)sense; 23845 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23846 if (bd_len > MODE_BLK_DESC_LENGTH) { 23847 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23848 "sr_atapi_change_speed: Mode Sense returned invalid " 23849 "block descriptor length\n"); 23850 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23851 return (EIO); 23852 } 23853 23854 /* Calculate the current and maximum drive speeds */ 23855 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23856 current_speed = (sense_page[14] << 8) | sense_page[15]; 23857 max_speed = (sense_page[8] << 8) | sense_page[9]; 23858 23859 /* Process the command */ 23860 switch (cmd) { 23861 case CDROMGDRVSPEED: 23862 current_speed /= SD_SPEED_1X; 23863 if (ddi_copyout(¤t_speed, (void *)data, 23864 sizeof (int), flag) != 0) 23865 rval = EFAULT; 23866 break; 23867 case CDROMSDRVSPEED: 23868 /* Convert the speed code to KB/sec */ 23869 switch ((uchar_t)data) { 23870 case CDROM_NORMAL_SPEED: 23871 current_speed = SD_SPEED_1X; 23872 break; 23873 case CDROM_DOUBLE_SPEED: 23874 current_speed = 2 * SD_SPEED_1X; 23875 break; 23876 case CDROM_QUAD_SPEED: 23877 current_speed = 4 * SD_SPEED_1X; 23878 break; 23879 case CDROM_TWELVE_SPEED: 23880 current_speed = 12 * SD_SPEED_1X; 23881 break; 23882 case CDROM_MAXIMUM_SPEED: 23883 current_speed = 0xffff; 23884 break; 23885 default: 23886 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23887 "sr_atapi_change_speed: invalid drive speed %d\n", 23888 (uchar_t)data); 23889 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23890 return (EINVAL); 23891 } 23892 23893 /* Check the request against the drive's max speed. */ 23894 if (current_speed != 0xffff) { 23895 if (current_speed > max_speed) { 23896 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23897 return (EINVAL); 23898 } 23899 } 23900 23901 /* 23902 * Build and send the SET SPEED command 23903 * 23904 * Note: The SET SPEED (0xBB) command used in this routine is 23905 * obsolete per the SCSI MMC spec but still supported in the 23906 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23907 * therefore the command is still implemented in this routine. 23908 */ 23909 bzero(cdb, sizeof (cdb)); 23910 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23911 cdb[2] = (uchar_t)(current_speed >> 8); 23912 cdb[3] = (uchar_t)current_speed; 23913 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23914 com->uscsi_cdb = (caddr_t)cdb; 23915 com->uscsi_cdblen = CDB_GROUP5; 23916 com->uscsi_bufaddr = NULL; 23917 com->uscsi_buflen = 0; 23918 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23919 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23920 break; 23921 default: 23922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23923 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23924 rval = EINVAL; 23925 } 23926 23927 if (sense) { 23928 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23929 } 23930 if (com) { 23931 kmem_free(com, sizeof (*com)); 23932 } 23933 return (rval); 23934 } 23935 23936 23937 /* 23938 * Function: sr_pause_resume() 23939 * 23940 * Description: This routine is the driver entry point for handling CD-ROM 23941 * pause/resume ioctl requests. This only affects the audio play 23942 * operation. 23943 * 23944 * Arguments: dev - the device 'dev_t' 23945 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23946 * for setting the resume bit of the cdb. 23947 * 23948 * Return Code: the code returned by sd_send_scsi_cmd() 23949 * EINVAL if invalid mode specified 23950 * 23951 */ 23952 23953 static int 23954 sr_pause_resume(dev_t dev, int cmd) 23955 { 23956 struct sd_lun *un; 23957 struct uscsi_cmd *com; 23958 char cdb[CDB_GROUP1]; 23959 int rval; 23960 23961 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23962 return (ENXIO); 23963 } 23964 23965 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23966 bzero(cdb, CDB_GROUP1); 23967 cdb[0] = SCMD_PAUSE_RESUME; 23968 switch (cmd) { 23969 case CDROMRESUME: 23970 cdb[8] = 1; 23971 break; 23972 case CDROMPAUSE: 23973 cdb[8] = 0; 23974 break; 23975 default: 23976 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23977 " Command '%x' Not Supported\n", cmd); 23978 rval = EINVAL; 23979 goto done; 23980 } 23981 23982 com->uscsi_cdb = cdb; 23983 com->uscsi_cdblen = CDB_GROUP1; 23984 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23985 23986 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23987 SD_PATH_STANDARD); 23988 23989 done: 23990 kmem_free(com, sizeof (*com)); 23991 return (rval); 23992 } 23993 23994 23995 /* 23996 * Function: sr_play_msf() 23997 * 23998 * Description: This routine is the driver entry point for handling CD-ROM 23999 * ioctl requests to output the audio signals at the specified 24000 * starting address and continue the audio play until the specified 24001 * ending address (CDROMPLAYMSF) The address is in Minute Second 24002 * Frame (MSF) format. 24003 * 24004 * Arguments: dev - the device 'dev_t' 24005 * data - pointer to user provided audio msf structure, 24006 * specifying start/end addresses. 24007 * flag - this argument is a pass through to ddi_copyxxx() 24008 * directly from the mode argument of ioctl(). 24009 * 24010 * Return Code: the code returned by sd_send_scsi_cmd() 24011 * EFAULT if ddi_copyxxx() fails 24012 * ENXIO if fail ddi_get_soft_state 24013 * EINVAL if data pointer is NULL 24014 */ 24015 24016 static int 24017 sr_play_msf(dev_t dev, caddr_t data, int flag) 24018 { 24019 struct sd_lun *un; 24020 struct uscsi_cmd *com; 24021 struct cdrom_msf msf_struct; 24022 struct cdrom_msf *msf = &msf_struct; 24023 char cdb[CDB_GROUP1]; 24024 int rval; 24025 24026 if (data == NULL) { 24027 return (EINVAL); 24028 } 24029 24030 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24031 return (ENXIO); 24032 } 24033 24034 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24035 return (EFAULT); 24036 } 24037 24038 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24039 bzero(cdb, CDB_GROUP1); 24040 cdb[0] = SCMD_PLAYAUDIO_MSF; 24041 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24042 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24043 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24044 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24045 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24046 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24047 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24048 } else { 24049 cdb[3] = msf->cdmsf_min0; 24050 cdb[4] = msf->cdmsf_sec0; 24051 cdb[5] = msf->cdmsf_frame0; 24052 cdb[6] = msf->cdmsf_min1; 24053 cdb[7] = msf->cdmsf_sec1; 24054 cdb[8] = msf->cdmsf_frame1; 24055 } 24056 com->uscsi_cdb = cdb; 24057 com->uscsi_cdblen = CDB_GROUP1; 24058 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24059 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24060 SD_PATH_STANDARD); 24061 kmem_free(com, sizeof (*com)); 24062 return (rval); 24063 } 24064 24065 24066 /* 24067 * Function: sr_play_trkind() 24068 * 24069 * Description: This routine is the driver entry point for handling CD-ROM 24070 * ioctl requests to output the audio signals at the specified 24071 * starting address and continue the audio play until the specified 24072 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24073 * format. 24074 * 24075 * Arguments: dev - the device 'dev_t' 24076 * data - pointer to user provided audio track/index structure, 24077 * specifying start/end addresses. 24078 * flag - this argument is a pass through to ddi_copyxxx() 24079 * directly from the mode argument of ioctl(). 24080 * 24081 * Return Code: the code returned by sd_send_scsi_cmd() 24082 * EFAULT if ddi_copyxxx() fails 24083 * ENXIO if fail ddi_get_soft_state 24084 * EINVAL if data pointer is NULL 24085 */ 24086 24087 static int 24088 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24089 { 24090 struct cdrom_ti ti_struct; 24091 struct cdrom_ti *ti = &ti_struct; 24092 struct uscsi_cmd *com = NULL; 24093 char cdb[CDB_GROUP1]; 24094 int rval; 24095 24096 if (data == NULL) { 24097 return (EINVAL); 24098 } 24099 24100 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24101 return (EFAULT); 24102 } 24103 24104 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24105 bzero(cdb, CDB_GROUP1); 24106 cdb[0] = SCMD_PLAYAUDIO_TI; 24107 cdb[4] = ti->cdti_trk0; 24108 cdb[5] = ti->cdti_ind0; 24109 cdb[7] = ti->cdti_trk1; 24110 cdb[8] = ti->cdti_ind1; 24111 com->uscsi_cdb = cdb; 24112 com->uscsi_cdblen = CDB_GROUP1; 24113 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24114 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24115 SD_PATH_STANDARD); 24116 kmem_free(com, sizeof (*com)); 24117 return (rval); 24118 } 24119 24120 24121 /* 24122 * Function: sr_read_all_subcodes() 24123 * 24124 * Description: This routine is the driver entry point for handling CD-ROM 24125 * ioctl requests to return raw subcode data while the target is 24126 * playing audio (CDROMSUBCODE). 24127 * 24128 * Arguments: dev - the device 'dev_t' 24129 * data - pointer to user provided cdrom subcode structure, 24130 * specifying the transfer length and address. 24131 * flag - this argument is a pass through to ddi_copyxxx() 24132 * directly from the mode argument of ioctl(). 24133 * 24134 * Return Code: the code returned by sd_send_scsi_cmd() 24135 * EFAULT if ddi_copyxxx() fails 24136 * ENXIO if fail ddi_get_soft_state 24137 * EINVAL if data pointer is NULL 24138 */ 24139 24140 static int 24141 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24142 { 24143 struct sd_lun *un = NULL; 24144 struct uscsi_cmd *com = NULL; 24145 struct cdrom_subcode *subcode = NULL; 24146 int rval; 24147 size_t buflen; 24148 char cdb[CDB_GROUP5]; 24149 24150 #ifdef _MULTI_DATAMODEL 24151 /* To support ILP32 applications in an LP64 world */ 24152 struct cdrom_subcode32 cdrom_subcode32; 24153 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24154 #endif 24155 if (data == NULL) { 24156 return (EINVAL); 24157 } 24158 24159 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24160 return (ENXIO); 24161 } 24162 24163 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24164 24165 #ifdef _MULTI_DATAMODEL 24166 switch (ddi_model_convert_from(flag & FMODELS)) { 24167 case DDI_MODEL_ILP32: 24168 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24169 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24170 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24171 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24172 return (EFAULT); 24173 } 24174 /* Convert the ILP32 uscsi data from the application to LP64 */ 24175 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24176 break; 24177 case DDI_MODEL_NONE: 24178 if (ddi_copyin(data, subcode, 24179 sizeof (struct cdrom_subcode), flag)) { 24180 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24181 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24182 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24183 return (EFAULT); 24184 } 24185 break; 24186 } 24187 #else /* ! _MULTI_DATAMODEL */ 24188 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24189 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24190 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24191 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24192 return (EFAULT); 24193 } 24194 #endif /* _MULTI_DATAMODEL */ 24195 24196 /* 24197 * Since MMC-2 expects max 3 bytes for length, check if the 24198 * length input is greater than 3 bytes 24199 */ 24200 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24201 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24202 "sr_read_all_subcodes: " 24203 "cdrom transfer length too large: %d (limit %d)\n", 24204 subcode->cdsc_length, 0xFFFFFF); 24205 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24206 return (EINVAL); 24207 } 24208 24209 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24210 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24211 bzero(cdb, CDB_GROUP5); 24212 24213 if (un->un_f_mmc_cap == TRUE) { 24214 cdb[0] = (char)SCMD_READ_CD; 24215 cdb[2] = (char)0xff; 24216 cdb[3] = (char)0xff; 24217 cdb[4] = (char)0xff; 24218 cdb[5] = (char)0xff; 24219 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24220 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24221 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24222 cdb[10] = 1; 24223 } else { 24224 /* 24225 * Note: A vendor specific command (0xDF) is being used her to 24226 * request a read of all subcodes. 24227 */ 24228 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24229 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24230 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24231 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24232 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24233 } 24234 com->uscsi_cdb = cdb; 24235 com->uscsi_cdblen = CDB_GROUP5; 24236 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24237 com->uscsi_buflen = buflen; 24238 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24239 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24240 SD_PATH_STANDARD); 24241 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24242 kmem_free(com, sizeof (*com)); 24243 return (rval); 24244 } 24245 24246 24247 /* 24248 * Function: sr_read_subchannel() 24249 * 24250 * Description: This routine is the driver entry point for handling CD-ROM 24251 * ioctl requests to return the Q sub-channel data of the CD 24252 * current position block. (CDROMSUBCHNL) The data includes the 24253 * track number, index number, absolute CD-ROM address (LBA or MSF 24254 * format per the user) , track relative CD-ROM address (LBA or MSF 24255 * format per the user), control data and audio status. 24256 * 24257 * Arguments: dev - the device 'dev_t' 24258 * data - pointer to user provided cdrom sub-channel structure 24259 * flag - this argument is a pass through to ddi_copyxxx() 24260 * directly from the mode argument of ioctl(). 24261 * 24262 * Return Code: the code returned by sd_send_scsi_cmd() 24263 * EFAULT if ddi_copyxxx() fails 24264 * ENXIO if fail ddi_get_soft_state 24265 * EINVAL if data pointer is NULL 24266 */ 24267 24268 static int 24269 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24270 { 24271 struct sd_lun *un; 24272 struct uscsi_cmd *com; 24273 struct cdrom_subchnl subchanel; 24274 struct cdrom_subchnl *subchnl = &subchanel; 24275 char cdb[CDB_GROUP1]; 24276 caddr_t buffer; 24277 int rval; 24278 24279 if (data == NULL) { 24280 return (EINVAL); 24281 } 24282 24283 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24284 (un->un_state == SD_STATE_OFFLINE)) { 24285 return (ENXIO); 24286 } 24287 24288 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24289 return (EFAULT); 24290 } 24291 24292 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24293 bzero(cdb, CDB_GROUP1); 24294 cdb[0] = SCMD_READ_SUBCHANNEL; 24295 /* Set the MSF bit based on the user requested address format */ 24296 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24297 /* 24298 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24299 * returned 24300 */ 24301 cdb[2] = 0x40; 24302 /* 24303 * Set byte 3 to specify the return data format. A value of 0x01 24304 * indicates that the CD-ROM current position should be returned. 24305 */ 24306 cdb[3] = 0x01; 24307 cdb[8] = 0x10; 24308 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24309 com->uscsi_cdb = cdb; 24310 com->uscsi_cdblen = CDB_GROUP1; 24311 com->uscsi_bufaddr = buffer; 24312 com->uscsi_buflen = 16; 24313 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24314 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24315 SD_PATH_STANDARD); 24316 if (rval != 0) { 24317 kmem_free(buffer, 16); 24318 kmem_free(com, sizeof (*com)); 24319 return (rval); 24320 } 24321 24322 /* Process the returned Q sub-channel data */ 24323 subchnl->cdsc_audiostatus = buffer[1]; 24324 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24325 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24326 subchnl->cdsc_trk = buffer[6]; 24327 subchnl->cdsc_ind = buffer[7]; 24328 if (subchnl->cdsc_format & CDROM_LBA) { 24329 subchnl->cdsc_absaddr.lba = 24330 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24331 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24332 subchnl->cdsc_reladdr.lba = 24333 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24334 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24335 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24336 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24337 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24338 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24339 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24340 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24341 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24342 } else { 24343 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24344 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24345 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24346 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24347 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24348 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24349 } 24350 kmem_free(buffer, 16); 24351 kmem_free(com, sizeof (*com)); 24352 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24353 != 0) { 24354 return (EFAULT); 24355 } 24356 return (rval); 24357 } 24358 24359 24360 /* 24361 * Function: sr_read_tocentry() 24362 * 24363 * Description: This routine is the driver entry point for handling CD-ROM 24364 * ioctl requests to read from the Table of Contents (TOC) 24365 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24366 * fields, the starting address (LBA or MSF format per the user) 24367 * and the data mode if the user specified track is a data track. 24368 * 24369 * Note: The READ HEADER (0x44) command used in this routine is 24370 * obsolete per the SCSI MMC spec but still supported in the 24371 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24372 * therefore the command is still implemented in this routine. 24373 * 24374 * Arguments: dev - the device 'dev_t' 24375 * data - pointer to user provided toc entry structure, 24376 * specifying the track # and the address format 24377 * (LBA or MSF). 24378 * flag - this argument is a pass through to ddi_copyxxx() 24379 * directly from the mode argument of ioctl(). 24380 * 24381 * Return Code: the code returned by sd_send_scsi_cmd() 24382 * EFAULT if ddi_copyxxx() fails 24383 * ENXIO if fail ddi_get_soft_state 24384 * EINVAL if data pointer is NULL 24385 */ 24386 24387 static int 24388 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24389 { 24390 struct sd_lun *un = NULL; 24391 struct uscsi_cmd *com; 24392 struct cdrom_tocentry toc_entry; 24393 struct cdrom_tocentry *entry = &toc_entry; 24394 caddr_t buffer; 24395 int rval; 24396 char cdb[CDB_GROUP1]; 24397 24398 if (data == NULL) { 24399 return (EINVAL); 24400 } 24401 24402 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24403 (un->un_state == SD_STATE_OFFLINE)) { 24404 return (ENXIO); 24405 } 24406 24407 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24408 return (EFAULT); 24409 } 24410 24411 /* Validate the requested track and address format */ 24412 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24413 return (EINVAL); 24414 } 24415 24416 if (entry->cdte_track == 0) { 24417 return (EINVAL); 24418 } 24419 24420 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24421 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24422 bzero(cdb, CDB_GROUP1); 24423 24424 cdb[0] = SCMD_READ_TOC; 24425 /* Set the MSF bit based on the user requested address format */ 24426 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24427 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24428 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24429 } else { 24430 cdb[6] = entry->cdte_track; 24431 } 24432 24433 /* 24434 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24435 * (4 byte TOC response header + 8 byte track descriptor) 24436 */ 24437 cdb[8] = 12; 24438 com->uscsi_cdb = cdb; 24439 com->uscsi_cdblen = CDB_GROUP1; 24440 com->uscsi_bufaddr = buffer; 24441 com->uscsi_buflen = 0x0C; 24442 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24443 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24444 SD_PATH_STANDARD); 24445 if (rval != 0) { 24446 kmem_free(buffer, 12); 24447 kmem_free(com, sizeof (*com)); 24448 return (rval); 24449 } 24450 24451 /* Process the toc entry */ 24452 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24453 entry->cdte_ctrl = (buffer[5] & 0x0F); 24454 if (entry->cdte_format & CDROM_LBA) { 24455 entry->cdte_addr.lba = 24456 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24457 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24458 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24459 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24460 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24461 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24462 /* 24463 * Send a READ TOC command using the LBA address format to get 24464 * the LBA for the track requested so it can be used in the 24465 * READ HEADER request 24466 * 24467 * Note: The MSF bit of the READ HEADER command specifies the 24468 * output format. The block address specified in that command 24469 * must be in LBA format. 24470 */ 24471 cdb[1] = 0; 24472 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24473 SD_PATH_STANDARD); 24474 if (rval != 0) { 24475 kmem_free(buffer, 12); 24476 kmem_free(com, sizeof (*com)); 24477 return (rval); 24478 } 24479 } else { 24480 entry->cdte_addr.msf.minute = buffer[9]; 24481 entry->cdte_addr.msf.second = buffer[10]; 24482 entry->cdte_addr.msf.frame = buffer[11]; 24483 /* 24484 * Send a READ TOC command using the LBA address format to get 24485 * the LBA for the track requested so it can be used in the 24486 * READ HEADER request 24487 * 24488 * Note: The MSF bit of the READ HEADER command specifies the 24489 * output format. The block address specified in that command 24490 * must be in LBA format. 24491 */ 24492 cdb[1] = 0; 24493 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24494 SD_PATH_STANDARD); 24495 if (rval != 0) { 24496 kmem_free(buffer, 12); 24497 kmem_free(com, sizeof (*com)); 24498 return (rval); 24499 } 24500 } 24501 24502 /* 24503 * Build and send the READ HEADER command to determine the data mode of 24504 * the user specified track. 24505 */ 24506 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24507 (entry->cdte_track != CDROM_LEADOUT)) { 24508 bzero(cdb, CDB_GROUP1); 24509 cdb[0] = SCMD_READ_HEADER; 24510 cdb[2] = buffer[8]; 24511 cdb[3] = buffer[9]; 24512 cdb[4] = buffer[10]; 24513 cdb[5] = buffer[11]; 24514 cdb[8] = 0x08; 24515 com->uscsi_buflen = 0x08; 24516 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24517 SD_PATH_STANDARD); 24518 if (rval == 0) { 24519 entry->cdte_datamode = buffer[0]; 24520 } else { 24521 /* 24522 * READ HEADER command failed, since this is 24523 * obsoleted in one spec, its better to return 24524 * -1 for an invlid track so that we can still 24525 * receive the rest of the TOC data. 24526 */ 24527 entry->cdte_datamode = (uchar_t)-1; 24528 } 24529 } else { 24530 entry->cdte_datamode = (uchar_t)-1; 24531 } 24532 24533 kmem_free(buffer, 12); 24534 kmem_free(com, sizeof (*com)); 24535 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24536 return (EFAULT); 24537 24538 return (rval); 24539 } 24540 24541 24542 /* 24543 * Function: sr_read_tochdr() 24544 * 24545 * Description: This routine is the driver entry point for handling CD-ROM 24546 * ioctl requests to read the Table of Contents (TOC) header 24547 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24548 * and ending track numbers 24549 * 24550 * Arguments: dev - the device 'dev_t' 24551 * data - pointer to user provided toc header structure, 24552 * specifying the starting and ending track numbers. 24553 * flag - this argument is a pass through to ddi_copyxxx() 24554 * directly from the mode argument of ioctl(). 24555 * 24556 * Return Code: the code returned by sd_send_scsi_cmd() 24557 * EFAULT if ddi_copyxxx() fails 24558 * ENXIO if fail ddi_get_soft_state 24559 * EINVAL if data pointer is NULL 24560 */ 24561 24562 static int 24563 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24564 { 24565 struct sd_lun *un; 24566 struct uscsi_cmd *com; 24567 struct cdrom_tochdr toc_header; 24568 struct cdrom_tochdr *hdr = &toc_header; 24569 char cdb[CDB_GROUP1]; 24570 int rval; 24571 caddr_t buffer; 24572 24573 if (data == NULL) { 24574 return (EINVAL); 24575 } 24576 24577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24578 (un->un_state == SD_STATE_OFFLINE)) { 24579 return (ENXIO); 24580 } 24581 24582 buffer = kmem_zalloc(4, KM_SLEEP); 24583 bzero(cdb, CDB_GROUP1); 24584 cdb[0] = SCMD_READ_TOC; 24585 /* 24586 * Specifying a track number of 0x00 in the READ TOC command indicates 24587 * that the TOC header should be returned 24588 */ 24589 cdb[6] = 0x00; 24590 /* 24591 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24592 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24593 */ 24594 cdb[8] = 0x04; 24595 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24596 com->uscsi_cdb = cdb; 24597 com->uscsi_cdblen = CDB_GROUP1; 24598 com->uscsi_bufaddr = buffer; 24599 com->uscsi_buflen = 0x04; 24600 com->uscsi_timeout = 300; 24601 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24602 24603 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24604 SD_PATH_STANDARD); 24605 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24606 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24607 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24608 } else { 24609 hdr->cdth_trk0 = buffer[2]; 24610 hdr->cdth_trk1 = buffer[3]; 24611 } 24612 kmem_free(buffer, 4); 24613 kmem_free(com, sizeof (*com)); 24614 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24615 return (EFAULT); 24616 } 24617 return (rval); 24618 } 24619 24620 24621 /* 24622 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24623 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24624 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24625 * digital audio and extended architecture digital audio. These modes are 24626 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24627 * MMC specs. 24628 * 24629 * In addition to support for the various data formats these routines also 24630 * include support for devices that implement only the direct access READ 24631 * commands (0x08, 0x28), devices that implement the READ_CD commands 24632 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24633 * READ CDXA commands (0xD8, 0xDB) 24634 */ 24635 24636 /* 24637 * Function: sr_read_mode1() 24638 * 24639 * Description: This routine is the driver entry point for handling CD-ROM 24640 * ioctl read mode1 requests (CDROMREADMODE1). 24641 * 24642 * Arguments: dev - the device 'dev_t' 24643 * data - pointer to user provided cd read structure specifying 24644 * the lba buffer address and length. 24645 * flag - this argument is a pass through to ddi_copyxxx() 24646 * directly from the mode argument of ioctl(). 24647 * 24648 * Return Code: the code returned by sd_send_scsi_cmd() 24649 * EFAULT if ddi_copyxxx() fails 24650 * ENXIO if fail ddi_get_soft_state 24651 * EINVAL if data pointer is NULL 24652 */ 24653 24654 static int 24655 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24656 { 24657 struct sd_lun *un; 24658 struct cdrom_read mode1_struct; 24659 struct cdrom_read *mode1 = &mode1_struct; 24660 int rval; 24661 #ifdef _MULTI_DATAMODEL 24662 /* To support ILP32 applications in an LP64 world */ 24663 struct cdrom_read32 cdrom_read32; 24664 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24665 #endif /* _MULTI_DATAMODEL */ 24666 24667 if (data == NULL) { 24668 return (EINVAL); 24669 } 24670 24671 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24672 (un->un_state == SD_STATE_OFFLINE)) { 24673 return (ENXIO); 24674 } 24675 24676 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24677 "sd_read_mode1: entry: un:0x%p\n", un); 24678 24679 #ifdef _MULTI_DATAMODEL 24680 switch (ddi_model_convert_from(flag & FMODELS)) { 24681 case DDI_MODEL_ILP32: 24682 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24683 return (EFAULT); 24684 } 24685 /* Convert the ILP32 uscsi data from the application to LP64 */ 24686 cdrom_read32tocdrom_read(cdrd32, mode1); 24687 break; 24688 case DDI_MODEL_NONE: 24689 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24690 return (EFAULT); 24691 } 24692 } 24693 #else /* ! _MULTI_DATAMODEL */ 24694 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24695 return (EFAULT); 24696 } 24697 #endif /* _MULTI_DATAMODEL */ 24698 24699 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24700 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24701 24702 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24703 "sd_read_mode1: exit: un:0x%p\n", un); 24704 24705 return (rval); 24706 } 24707 24708 24709 /* 24710 * Function: sr_read_cd_mode2() 24711 * 24712 * Description: This routine is the driver entry point for handling CD-ROM 24713 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24714 * support the READ CD (0xBE) command or the 1st generation 24715 * READ CD (0xD4) command. 24716 * 24717 * Arguments: dev - the device 'dev_t' 24718 * data - pointer to user provided cd read structure specifying 24719 * the lba buffer address and length. 24720 * flag - this argument is a pass through to ddi_copyxxx() 24721 * directly from the mode argument of ioctl(). 24722 * 24723 * Return Code: the code returned by sd_send_scsi_cmd() 24724 * EFAULT if ddi_copyxxx() fails 24725 * ENXIO if fail ddi_get_soft_state 24726 * EINVAL if data pointer is NULL 24727 */ 24728 24729 static int 24730 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24731 { 24732 struct sd_lun *un; 24733 struct uscsi_cmd *com; 24734 struct cdrom_read mode2_struct; 24735 struct cdrom_read *mode2 = &mode2_struct; 24736 uchar_t cdb[CDB_GROUP5]; 24737 int nblocks; 24738 int rval; 24739 #ifdef _MULTI_DATAMODEL 24740 /* To support ILP32 applications in an LP64 world */ 24741 struct cdrom_read32 cdrom_read32; 24742 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24743 #endif /* _MULTI_DATAMODEL */ 24744 24745 if (data == NULL) { 24746 return (EINVAL); 24747 } 24748 24749 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24750 (un->un_state == SD_STATE_OFFLINE)) { 24751 return (ENXIO); 24752 } 24753 24754 #ifdef _MULTI_DATAMODEL 24755 switch (ddi_model_convert_from(flag & FMODELS)) { 24756 case DDI_MODEL_ILP32: 24757 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24758 return (EFAULT); 24759 } 24760 /* Convert the ILP32 uscsi data from the application to LP64 */ 24761 cdrom_read32tocdrom_read(cdrd32, mode2); 24762 break; 24763 case DDI_MODEL_NONE: 24764 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24765 return (EFAULT); 24766 } 24767 break; 24768 } 24769 24770 #else /* ! _MULTI_DATAMODEL */ 24771 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24772 return (EFAULT); 24773 } 24774 #endif /* _MULTI_DATAMODEL */ 24775 24776 bzero(cdb, sizeof (cdb)); 24777 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24778 /* Read command supported by 1st generation atapi drives */ 24779 cdb[0] = SCMD_READ_CDD4; 24780 } else { 24781 /* Universal CD Access Command */ 24782 cdb[0] = SCMD_READ_CD; 24783 } 24784 24785 /* 24786 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24787 */ 24788 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24789 24790 /* set the start address */ 24791 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24792 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24793 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24794 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24795 24796 /* set the transfer length */ 24797 nblocks = mode2->cdread_buflen / 2336; 24798 cdb[6] = (uchar_t)(nblocks >> 16); 24799 cdb[7] = (uchar_t)(nblocks >> 8); 24800 cdb[8] = (uchar_t)nblocks; 24801 24802 /* set the filter bits */ 24803 cdb[9] = CDROM_READ_CD_USERDATA; 24804 24805 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24806 com->uscsi_cdb = (caddr_t)cdb; 24807 com->uscsi_cdblen = sizeof (cdb); 24808 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24809 com->uscsi_buflen = mode2->cdread_buflen; 24810 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24811 24812 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24813 SD_PATH_STANDARD); 24814 kmem_free(com, sizeof (*com)); 24815 return (rval); 24816 } 24817 24818 24819 /* 24820 * Function: sr_read_mode2() 24821 * 24822 * Description: This routine is the driver entry point for handling CD-ROM 24823 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24824 * do not support the READ CD (0xBE) command. 24825 * 24826 * Arguments: dev - the device 'dev_t' 24827 * data - pointer to user provided cd read structure specifying 24828 * the lba buffer address and length. 24829 * flag - this argument is a pass through to ddi_copyxxx() 24830 * directly from the mode argument of ioctl(). 24831 * 24832 * Return Code: the code returned by sd_send_scsi_cmd() 24833 * EFAULT if ddi_copyxxx() fails 24834 * ENXIO if fail ddi_get_soft_state 24835 * EINVAL if data pointer is NULL 24836 * EIO if fail to reset block size 24837 * EAGAIN if commands are in progress in the driver 24838 */ 24839 24840 static int 24841 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24842 { 24843 struct sd_lun *un; 24844 struct cdrom_read mode2_struct; 24845 struct cdrom_read *mode2 = &mode2_struct; 24846 int rval; 24847 uint32_t restore_blksize; 24848 struct uscsi_cmd *com; 24849 uchar_t cdb[CDB_GROUP0]; 24850 int nblocks; 24851 24852 #ifdef _MULTI_DATAMODEL 24853 /* To support ILP32 applications in an LP64 world */ 24854 struct cdrom_read32 cdrom_read32; 24855 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24856 #endif /* _MULTI_DATAMODEL */ 24857 24858 if (data == NULL) { 24859 return (EINVAL); 24860 } 24861 24862 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24863 (un->un_state == SD_STATE_OFFLINE)) { 24864 return (ENXIO); 24865 } 24866 24867 /* 24868 * Because this routine will update the device and driver block size 24869 * being used we want to make sure there are no commands in progress. 24870 * If commands are in progress the user will have to try again. 24871 * 24872 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24873 * in sdioctl to protect commands from sdioctl through to the top of 24874 * sd_uscsi_strategy. See sdioctl for details. 24875 */ 24876 mutex_enter(SD_MUTEX(un)); 24877 if (un->un_ncmds_in_driver != 1) { 24878 mutex_exit(SD_MUTEX(un)); 24879 return (EAGAIN); 24880 } 24881 mutex_exit(SD_MUTEX(un)); 24882 24883 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24884 "sd_read_mode2: entry: un:0x%p\n", un); 24885 24886 #ifdef _MULTI_DATAMODEL 24887 switch (ddi_model_convert_from(flag & FMODELS)) { 24888 case DDI_MODEL_ILP32: 24889 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24890 return (EFAULT); 24891 } 24892 /* Convert the ILP32 uscsi data from the application to LP64 */ 24893 cdrom_read32tocdrom_read(cdrd32, mode2); 24894 break; 24895 case DDI_MODEL_NONE: 24896 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24897 return (EFAULT); 24898 } 24899 break; 24900 } 24901 #else /* ! _MULTI_DATAMODEL */ 24902 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24903 return (EFAULT); 24904 } 24905 #endif /* _MULTI_DATAMODEL */ 24906 24907 /* Store the current target block size for restoration later */ 24908 restore_blksize = un->un_tgt_blocksize; 24909 24910 /* Change the device and soft state target block size to 2336 */ 24911 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24912 rval = EIO; 24913 goto done; 24914 } 24915 24916 24917 bzero(cdb, sizeof (cdb)); 24918 24919 /* set READ operation */ 24920 cdb[0] = SCMD_READ; 24921 24922 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24923 mode2->cdread_lba >>= 2; 24924 24925 /* set the start address */ 24926 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24927 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24928 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24929 24930 /* set the transfer length */ 24931 nblocks = mode2->cdread_buflen / 2336; 24932 cdb[4] = (uchar_t)nblocks & 0xFF; 24933 24934 /* build command */ 24935 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24936 com->uscsi_cdb = (caddr_t)cdb; 24937 com->uscsi_cdblen = sizeof (cdb); 24938 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24939 com->uscsi_buflen = mode2->cdread_buflen; 24940 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24941 24942 /* 24943 * Issue SCSI command with user space address for read buffer. 24944 * 24945 * This sends the command through main channel in the driver. 24946 * 24947 * Since this is accessed via an IOCTL call, we go through the 24948 * standard path, so that if the device was powered down, then 24949 * it would be 'awakened' to handle the command. 24950 */ 24951 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24952 SD_PATH_STANDARD); 24953 24954 kmem_free(com, sizeof (*com)); 24955 24956 /* Restore the device and soft state target block size */ 24957 if (sr_sector_mode(dev, restore_blksize) != 0) { 24958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24959 "can't do switch back to mode 1\n"); 24960 /* 24961 * If sd_send_scsi_READ succeeded we still need to report 24962 * an error because we failed to reset the block size 24963 */ 24964 if (rval == 0) { 24965 rval = EIO; 24966 } 24967 } 24968 24969 done: 24970 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24971 "sd_read_mode2: exit: un:0x%p\n", un); 24972 24973 return (rval); 24974 } 24975 24976 24977 /* 24978 * Function: sr_sector_mode() 24979 * 24980 * Description: This utility function is used by sr_read_mode2 to set the target 24981 * block size based on the user specified size. This is a legacy 24982 * implementation based upon a vendor specific mode page 24983 * 24984 * Arguments: dev - the device 'dev_t' 24985 * data - flag indicating if block size is being set to 2336 or 24986 * 512. 24987 * 24988 * Return Code: the code returned by sd_send_scsi_cmd() 24989 * EFAULT if ddi_copyxxx() fails 24990 * ENXIO if fail ddi_get_soft_state 24991 * EINVAL if data pointer is NULL 24992 */ 24993 24994 static int 24995 sr_sector_mode(dev_t dev, uint32_t blksize) 24996 { 24997 struct sd_lun *un; 24998 uchar_t *sense; 24999 uchar_t *select; 25000 int rval; 25001 25002 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25003 (un->un_state == SD_STATE_OFFLINE)) { 25004 return (ENXIO); 25005 } 25006 25007 sense = kmem_zalloc(20, KM_SLEEP); 25008 25009 /* Note: This is a vendor specific mode page (0x81) */ 25010 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25011 SD_PATH_STANDARD)) != 0) { 25012 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25013 "sr_sector_mode: Mode Sense failed\n"); 25014 kmem_free(sense, 20); 25015 return (rval); 25016 } 25017 select = kmem_zalloc(20, KM_SLEEP); 25018 select[3] = 0x08; 25019 select[10] = ((blksize >> 8) & 0xff); 25020 select[11] = (blksize & 0xff); 25021 select[12] = 0x01; 25022 select[13] = 0x06; 25023 select[14] = sense[14]; 25024 select[15] = sense[15]; 25025 if (blksize == SD_MODE2_BLKSIZE) { 25026 select[14] |= 0x01; 25027 } 25028 25029 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25030 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25031 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25032 "sr_sector_mode: Mode Select failed\n"); 25033 } else { 25034 /* 25035 * Only update the softstate block size if we successfully 25036 * changed the device block mode. 25037 */ 25038 mutex_enter(SD_MUTEX(un)); 25039 sd_update_block_info(un, blksize, 0); 25040 mutex_exit(SD_MUTEX(un)); 25041 } 25042 kmem_free(sense, 20); 25043 kmem_free(select, 20); 25044 return (rval); 25045 } 25046 25047 25048 /* 25049 * Function: sr_read_cdda() 25050 * 25051 * Description: This routine is the driver entry point for handling CD-ROM 25052 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25053 * the target supports CDDA these requests are handled via a vendor 25054 * specific command (0xD8) If the target does not support CDDA 25055 * these requests are handled via the READ CD command (0xBE). 25056 * 25057 * Arguments: dev - the device 'dev_t' 25058 * data - pointer to user provided CD-DA structure specifying 25059 * the track starting address, transfer length, and 25060 * subcode options. 25061 * flag - this argument is a pass through to ddi_copyxxx() 25062 * directly from the mode argument of ioctl(). 25063 * 25064 * Return Code: the code returned by sd_send_scsi_cmd() 25065 * EFAULT if ddi_copyxxx() fails 25066 * ENXIO if fail ddi_get_soft_state 25067 * EINVAL if invalid arguments are provided 25068 * ENOTTY 25069 */ 25070 25071 static int 25072 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25073 { 25074 struct sd_lun *un; 25075 struct uscsi_cmd *com; 25076 struct cdrom_cdda *cdda; 25077 int rval; 25078 size_t buflen; 25079 char cdb[CDB_GROUP5]; 25080 25081 #ifdef _MULTI_DATAMODEL 25082 /* To support ILP32 applications in an LP64 world */ 25083 struct cdrom_cdda32 cdrom_cdda32; 25084 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25085 #endif /* _MULTI_DATAMODEL */ 25086 25087 if (data == NULL) { 25088 return (EINVAL); 25089 } 25090 25091 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25092 return (ENXIO); 25093 } 25094 25095 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25096 25097 #ifdef _MULTI_DATAMODEL 25098 switch (ddi_model_convert_from(flag & FMODELS)) { 25099 case DDI_MODEL_ILP32: 25100 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25101 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25102 "sr_read_cdda: ddi_copyin Failed\n"); 25103 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25104 return (EFAULT); 25105 } 25106 /* Convert the ILP32 uscsi data from the application to LP64 */ 25107 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25108 break; 25109 case DDI_MODEL_NONE: 25110 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25111 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25112 "sr_read_cdda: ddi_copyin Failed\n"); 25113 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25114 return (EFAULT); 25115 } 25116 break; 25117 } 25118 #else /* ! _MULTI_DATAMODEL */ 25119 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25120 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25121 "sr_read_cdda: ddi_copyin Failed\n"); 25122 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25123 return (EFAULT); 25124 } 25125 #endif /* _MULTI_DATAMODEL */ 25126 25127 /* 25128 * Since MMC-2 expects max 3 bytes for length, check if the 25129 * length input is greater than 3 bytes 25130 */ 25131 if ((cdda->cdda_length & 0xFF000000) != 0) { 25132 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25133 "cdrom transfer length too large: %d (limit %d)\n", 25134 cdda->cdda_length, 0xFFFFFF); 25135 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25136 return (EINVAL); 25137 } 25138 25139 switch (cdda->cdda_subcode) { 25140 case CDROM_DA_NO_SUBCODE: 25141 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25142 break; 25143 case CDROM_DA_SUBQ: 25144 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25145 break; 25146 case CDROM_DA_ALL_SUBCODE: 25147 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25148 break; 25149 case CDROM_DA_SUBCODE_ONLY: 25150 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25151 break; 25152 default: 25153 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25154 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25155 cdda->cdda_subcode); 25156 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25157 return (EINVAL); 25158 } 25159 25160 /* Build and send the command */ 25161 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25162 bzero(cdb, CDB_GROUP5); 25163 25164 if (un->un_f_cfg_cdda == TRUE) { 25165 cdb[0] = (char)SCMD_READ_CD; 25166 cdb[1] = 0x04; 25167 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25168 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25169 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25170 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25171 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25172 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25173 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25174 cdb[9] = 0x10; 25175 switch (cdda->cdda_subcode) { 25176 case CDROM_DA_NO_SUBCODE : 25177 cdb[10] = 0x0; 25178 break; 25179 case CDROM_DA_SUBQ : 25180 cdb[10] = 0x2; 25181 break; 25182 case CDROM_DA_ALL_SUBCODE : 25183 cdb[10] = 0x1; 25184 break; 25185 case CDROM_DA_SUBCODE_ONLY : 25186 /* FALLTHROUGH */ 25187 default : 25188 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25189 kmem_free(com, sizeof (*com)); 25190 return (ENOTTY); 25191 } 25192 } else { 25193 cdb[0] = (char)SCMD_READ_CDDA; 25194 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25195 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25196 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25197 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25198 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25199 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25200 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25201 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25202 cdb[10] = cdda->cdda_subcode; 25203 } 25204 25205 com->uscsi_cdb = cdb; 25206 com->uscsi_cdblen = CDB_GROUP5; 25207 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25208 com->uscsi_buflen = buflen; 25209 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25210 25211 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25212 SD_PATH_STANDARD); 25213 25214 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25215 kmem_free(com, sizeof (*com)); 25216 return (rval); 25217 } 25218 25219 25220 /* 25221 * Function: sr_read_cdxa() 25222 * 25223 * Description: This routine is the driver entry point for handling CD-ROM 25224 * ioctl requests to return CD-XA (Extended Architecture) data. 25225 * (CDROMCDXA). 25226 * 25227 * Arguments: dev - the device 'dev_t' 25228 * data - pointer to user provided CD-XA structure specifying 25229 * the data starting address, transfer length, and format 25230 * flag - this argument is a pass through to ddi_copyxxx() 25231 * directly from the mode argument of ioctl(). 25232 * 25233 * Return Code: the code returned by sd_send_scsi_cmd() 25234 * EFAULT if ddi_copyxxx() fails 25235 * ENXIO if fail ddi_get_soft_state 25236 * EINVAL if data pointer is NULL 25237 */ 25238 25239 static int 25240 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25241 { 25242 struct sd_lun *un; 25243 struct uscsi_cmd *com; 25244 struct cdrom_cdxa *cdxa; 25245 int rval; 25246 size_t buflen; 25247 char cdb[CDB_GROUP5]; 25248 uchar_t read_flags; 25249 25250 #ifdef _MULTI_DATAMODEL 25251 /* To support ILP32 applications in an LP64 world */ 25252 struct cdrom_cdxa32 cdrom_cdxa32; 25253 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25254 #endif /* _MULTI_DATAMODEL */ 25255 25256 if (data == NULL) { 25257 return (EINVAL); 25258 } 25259 25260 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25261 return (ENXIO); 25262 } 25263 25264 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25265 25266 #ifdef _MULTI_DATAMODEL 25267 switch (ddi_model_convert_from(flag & FMODELS)) { 25268 case DDI_MODEL_ILP32: 25269 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25270 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25271 return (EFAULT); 25272 } 25273 /* 25274 * Convert the ILP32 uscsi data from the 25275 * application to LP64 for internal use. 25276 */ 25277 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25278 break; 25279 case DDI_MODEL_NONE: 25280 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25281 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25282 return (EFAULT); 25283 } 25284 break; 25285 } 25286 #else /* ! _MULTI_DATAMODEL */ 25287 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25288 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25289 return (EFAULT); 25290 } 25291 #endif /* _MULTI_DATAMODEL */ 25292 25293 /* 25294 * Since MMC-2 expects max 3 bytes for length, check if the 25295 * length input is greater than 3 bytes 25296 */ 25297 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25299 "cdrom transfer length too large: %d (limit %d)\n", 25300 cdxa->cdxa_length, 0xFFFFFF); 25301 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25302 return (EINVAL); 25303 } 25304 25305 switch (cdxa->cdxa_format) { 25306 case CDROM_XA_DATA: 25307 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25308 read_flags = 0x10; 25309 break; 25310 case CDROM_XA_SECTOR_DATA: 25311 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25312 read_flags = 0xf8; 25313 break; 25314 case CDROM_XA_DATA_W_ERROR: 25315 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25316 read_flags = 0xfc; 25317 break; 25318 default: 25319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25320 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25321 cdxa->cdxa_format); 25322 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25323 return (EINVAL); 25324 } 25325 25326 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25327 bzero(cdb, CDB_GROUP5); 25328 if (un->un_f_mmc_cap == TRUE) { 25329 cdb[0] = (char)SCMD_READ_CD; 25330 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25331 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25332 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25333 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25334 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25335 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25336 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25337 cdb[9] = (char)read_flags; 25338 } else { 25339 /* 25340 * Note: A vendor specific command (0xDB) is being used her to 25341 * request a read of all subcodes. 25342 */ 25343 cdb[0] = (char)SCMD_READ_CDXA; 25344 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25345 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25346 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25347 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25348 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25349 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25350 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25351 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25352 cdb[10] = cdxa->cdxa_format; 25353 } 25354 com->uscsi_cdb = cdb; 25355 com->uscsi_cdblen = CDB_GROUP5; 25356 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25357 com->uscsi_buflen = buflen; 25358 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25359 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25360 SD_PATH_STANDARD); 25361 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25362 kmem_free(com, sizeof (*com)); 25363 return (rval); 25364 } 25365 25366 25367 /* 25368 * Function: sr_eject() 25369 * 25370 * Description: This routine is the driver entry point for handling CD-ROM 25371 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25372 * 25373 * Arguments: dev - the device 'dev_t' 25374 * 25375 * Return Code: the code returned by sd_send_scsi_cmd() 25376 */ 25377 25378 static int 25379 sr_eject(dev_t dev) 25380 { 25381 struct sd_lun *un; 25382 int rval; 25383 25384 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25385 (un->un_state == SD_STATE_OFFLINE)) { 25386 return (ENXIO); 25387 } 25388 25389 /* 25390 * To prevent race conditions with the eject 25391 * command, keep track of an eject command as 25392 * it progresses. If we are already handling 25393 * an eject command in the driver for the given 25394 * unit and another request to eject is received 25395 * immediately return EAGAIN so we don't lose 25396 * the command if the current eject command fails. 25397 */ 25398 mutex_enter(SD_MUTEX(un)); 25399 if (un->un_f_ejecting == TRUE) { 25400 mutex_exit(SD_MUTEX(un)); 25401 return (EAGAIN); 25402 } 25403 un->un_f_ejecting = TRUE; 25404 mutex_exit(SD_MUTEX(un)); 25405 25406 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25407 SD_PATH_STANDARD)) != 0) { 25408 mutex_enter(SD_MUTEX(un)); 25409 un->un_f_ejecting = FALSE; 25410 mutex_exit(SD_MUTEX(un)); 25411 return (rval); 25412 } 25413 25414 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25415 SD_PATH_STANDARD); 25416 25417 if (rval == 0) { 25418 mutex_enter(SD_MUTEX(un)); 25419 sr_ejected(un); 25420 un->un_mediastate = DKIO_EJECTED; 25421 un->un_f_ejecting = FALSE; 25422 cv_broadcast(&un->un_state_cv); 25423 mutex_exit(SD_MUTEX(un)); 25424 } else { 25425 mutex_enter(SD_MUTEX(un)); 25426 un->un_f_ejecting = FALSE; 25427 mutex_exit(SD_MUTEX(un)); 25428 } 25429 return (rval); 25430 } 25431 25432 25433 /* 25434 * Function: sr_ejected() 25435 * 25436 * Description: This routine updates the soft state structure to invalidate the 25437 * geometry information after the media has been ejected or a 25438 * media eject has been detected. 25439 * 25440 * Arguments: un - driver soft state (unit) structure 25441 */ 25442 25443 static void 25444 sr_ejected(struct sd_lun *un) 25445 { 25446 struct sd_errstats *stp; 25447 25448 ASSERT(un != NULL); 25449 ASSERT(mutex_owned(SD_MUTEX(un))); 25450 25451 un->un_f_blockcount_is_valid = FALSE; 25452 un->un_f_tgt_blocksize_is_valid = FALSE; 25453 mutex_exit(SD_MUTEX(un)); 25454 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25455 mutex_enter(SD_MUTEX(un)); 25456 25457 if (un->un_errstats != NULL) { 25458 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25459 stp->sd_capacity.value.ui64 = 0; 25460 } 25461 25462 /* remove "capacity-of-device" properties */ 25463 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25464 "device-nblocks"); 25465 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25466 "device-blksize"); 25467 } 25468 25469 25470 /* 25471 * Function: sr_check_wp() 25472 * 25473 * Description: This routine checks the write protection of a removable 25474 * media disk and hotpluggable devices via the write protect bit of 25475 * the Mode Page Header device specific field. Some devices choke 25476 * on unsupported mode page. In order to workaround this issue, 25477 * this routine has been implemented to use 0x3f mode page(request 25478 * for all pages) for all device types. 25479 * 25480 * Arguments: dev - the device 'dev_t' 25481 * 25482 * Return Code: int indicating if the device is write protected (1) or not (0) 25483 * 25484 * Context: Kernel thread. 25485 * 25486 */ 25487 25488 static int 25489 sr_check_wp(dev_t dev) 25490 { 25491 struct sd_lun *un; 25492 uchar_t device_specific; 25493 uchar_t *sense; 25494 int hdrlen; 25495 int rval = FALSE; 25496 25497 /* 25498 * Note: The return codes for this routine should be reworked to 25499 * properly handle the case of a NULL softstate. 25500 */ 25501 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25502 return (FALSE); 25503 } 25504 25505 if (un->un_f_cfg_is_atapi == TRUE) { 25506 /* 25507 * The mode page contents are not required; set the allocation 25508 * length for the mode page header only 25509 */ 25510 hdrlen = MODE_HEADER_LENGTH_GRP2; 25511 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25512 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25513 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25514 goto err_exit; 25515 device_specific = 25516 ((struct mode_header_grp2 *)sense)->device_specific; 25517 } else { 25518 hdrlen = MODE_HEADER_LENGTH; 25519 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25520 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25521 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25522 goto err_exit; 25523 device_specific = 25524 ((struct mode_header *)sense)->device_specific; 25525 } 25526 25527 /* 25528 * Write protect mode sense failed; not all disks 25529 * understand this query. Return FALSE assuming that 25530 * these devices are not writable. 25531 */ 25532 if (device_specific & WRITE_PROTECT) { 25533 rval = TRUE; 25534 } 25535 25536 err_exit: 25537 kmem_free(sense, hdrlen); 25538 return (rval); 25539 } 25540 25541 /* 25542 * Function: sr_volume_ctrl() 25543 * 25544 * Description: This routine is the driver entry point for handling CD-ROM 25545 * audio output volume ioctl requests. (CDROMVOLCTRL) 25546 * 25547 * Arguments: dev - the device 'dev_t' 25548 * data - pointer to user audio volume control structure 25549 * flag - this argument is a pass through to ddi_copyxxx() 25550 * directly from the mode argument of ioctl(). 25551 * 25552 * Return Code: the code returned by sd_send_scsi_cmd() 25553 * EFAULT if ddi_copyxxx() fails 25554 * ENXIO if fail ddi_get_soft_state 25555 * EINVAL if data pointer is NULL 25556 * 25557 */ 25558 25559 static int 25560 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25561 { 25562 struct sd_lun *un; 25563 struct cdrom_volctrl volume; 25564 struct cdrom_volctrl *vol = &volume; 25565 uchar_t *sense_page; 25566 uchar_t *select_page; 25567 uchar_t *sense; 25568 uchar_t *select; 25569 int sense_buflen; 25570 int select_buflen; 25571 int rval; 25572 25573 if (data == NULL) { 25574 return (EINVAL); 25575 } 25576 25577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25578 (un->un_state == SD_STATE_OFFLINE)) { 25579 return (ENXIO); 25580 } 25581 25582 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25583 return (EFAULT); 25584 } 25585 25586 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25587 struct mode_header_grp2 *sense_mhp; 25588 struct mode_header_grp2 *select_mhp; 25589 int bd_len; 25590 25591 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25592 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25593 MODEPAGE_AUDIO_CTRL_LEN; 25594 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25595 select = kmem_zalloc(select_buflen, KM_SLEEP); 25596 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25597 sense_buflen, MODEPAGE_AUDIO_CTRL, 25598 SD_PATH_STANDARD)) != 0) { 25599 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25600 "sr_volume_ctrl: Mode Sense Failed\n"); 25601 kmem_free(sense, sense_buflen); 25602 kmem_free(select, select_buflen); 25603 return (rval); 25604 } 25605 sense_mhp = (struct mode_header_grp2 *)sense; 25606 select_mhp = (struct mode_header_grp2 *)select; 25607 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25608 sense_mhp->bdesc_length_lo; 25609 if (bd_len > MODE_BLK_DESC_LENGTH) { 25610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25611 "sr_volume_ctrl: Mode Sense returned invalid " 25612 "block descriptor length\n"); 25613 kmem_free(sense, sense_buflen); 25614 kmem_free(select, select_buflen); 25615 return (EIO); 25616 } 25617 sense_page = (uchar_t *) 25618 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25619 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25620 select_mhp->length_msb = 0; 25621 select_mhp->length_lsb = 0; 25622 select_mhp->bdesc_length_hi = 0; 25623 select_mhp->bdesc_length_lo = 0; 25624 } else { 25625 struct mode_header *sense_mhp, *select_mhp; 25626 25627 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25628 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25629 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25630 select = kmem_zalloc(select_buflen, KM_SLEEP); 25631 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25632 sense_buflen, MODEPAGE_AUDIO_CTRL, 25633 SD_PATH_STANDARD)) != 0) { 25634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25635 "sr_volume_ctrl: Mode Sense Failed\n"); 25636 kmem_free(sense, sense_buflen); 25637 kmem_free(select, select_buflen); 25638 return (rval); 25639 } 25640 sense_mhp = (struct mode_header *)sense; 25641 select_mhp = (struct mode_header *)select; 25642 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25644 "sr_volume_ctrl: Mode Sense returned invalid " 25645 "block descriptor length\n"); 25646 kmem_free(sense, sense_buflen); 25647 kmem_free(select, select_buflen); 25648 return (EIO); 25649 } 25650 sense_page = (uchar_t *) 25651 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25652 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25653 select_mhp->length = 0; 25654 select_mhp->bdesc_length = 0; 25655 } 25656 /* 25657 * Note: An audio control data structure could be created and overlayed 25658 * on the following in place of the array indexing method implemented. 25659 */ 25660 25661 /* Build the select data for the user volume data */ 25662 select_page[0] = MODEPAGE_AUDIO_CTRL; 25663 select_page[1] = 0xE; 25664 /* Set the immediate bit */ 25665 select_page[2] = 0x04; 25666 /* Zero out reserved fields */ 25667 select_page[3] = 0x00; 25668 select_page[4] = 0x00; 25669 /* Return sense data for fields not to be modified */ 25670 select_page[5] = sense_page[5]; 25671 select_page[6] = sense_page[6]; 25672 select_page[7] = sense_page[7]; 25673 /* Set the user specified volume levels for channel 0 and 1 */ 25674 select_page[8] = 0x01; 25675 select_page[9] = vol->channel0; 25676 select_page[10] = 0x02; 25677 select_page[11] = vol->channel1; 25678 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25679 select_page[12] = sense_page[12]; 25680 select_page[13] = sense_page[13]; 25681 select_page[14] = sense_page[14]; 25682 select_page[15] = sense_page[15]; 25683 25684 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25685 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25686 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25687 } else { 25688 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25689 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25690 } 25691 25692 kmem_free(sense, sense_buflen); 25693 kmem_free(select, select_buflen); 25694 return (rval); 25695 } 25696 25697 25698 /* 25699 * Function: sr_read_sony_session_offset() 25700 * 25701 * Description: This routine is the driver entry point for handling CD-ROM 25702 * ioctl requests for session offset information. (CDROMREADOFFSET) 25703 * The address of the first track in the last session of a 25704 * multi-session CD-ROM is returned 25705 * 25706 * Note: This routine uses a vendor specific key value in the 25707 * command control field without implementing any vendor check here 25708 * or in the ioctl routine. 25709 * 25710 * Arguments: dev - the device 'dev_t' 25711 * data - pointer to an int to hold the requested address 25712 * flag - this argument is a pass through to ddi_copyxxx() 25713 * directly from the mode argument of ioctl(). 25714 * 25715 * Return Code: the code returned by sd_send_scsi_cmd() 25716 * EFAULT if ddi_copyxxx() fails 25717 * ENXIO if fail ddi_get_soft_state 25718 * EINVAL if data pointer is NULL 25719 */ 25720 25721 static int 25722 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25723 { 25724 struct sd_lun *un; 25725 struct uscsi_cmd *com; 25726 caddr_t buffer; 25727 char cdb[CDB_GROUP1]; 25728 int session_offset = 0; 25729 int rval; 25730 25731 if (data == NULL) { 25732 return (EINVAL); 25733 } 25734 25735 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25736 (un->un_state == SD_STATE_OFFLINE)) { 25737 return (ENXIO); 25738 } 25739 25740 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25741 bzero(cdb, CDB_GROUP1); 25742 cdb[0] = SCMD_READ_TOC; 25743 /* 25744 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25745 * (4 byte TOC response header + 8 byte response data) 25746 */ 25747 cdb[8] = SONY_SESSION_OFFSET_LEN; 25748 /* Byte 9 is the control byte. A vendor specific value is used */ 25749 cdb[9] = SONY_SESSION_OFFSET_KEY; 25750 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25751 com->uscsi_cdb = cdb; 25752 com->uscsi_cdblen = CDB_GROUP1; 25753 com->uscsi_bufaddr = buffer; 25754 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25755 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25756 25757 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25758 SD_PATH_STANDARD); 25759 if (rval != 0) { 25760 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25761 kmem_free(com, sizeof (*com)); 25762 return (rval); 25763 } 25764 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25765 session_offset = 25766 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25767 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25768 /* 25769 * Offset returned offset in current lbasize block's. Convert to 25770 * 2k block's to return to the user 25771 */ 25772 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25773 session_offset >>= 2; 25774 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25775 session_offset >>= 1; 25776 } 25777 } 25778 25779 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25780 rval = EFAULT; 25781 } 25782 25783 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25784 kmem_free(com, sizeof (*com)); 25785 return (rval); 25786 } 25787 25788 25789 /* 25790 * Function: sd_wm_cache_constructor() 25791 * 25792 * Description: Cache Constructor for the wmap cache for the read/modify/write 25793 * devices. 25794 * 25795 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25796 * un - sd_lun structure for the device. 25797 * flag - the km flags passed to constructor 25798 * 25799 * Return Code: 0 on success. 25800 * -1 on failure. 25801 */ 25802 25803 /*ARGSUSED*/ 25804 static int 25805 sd_wm_cache_constructor(void *wm, void *un, int flags) 25806 { 25807 bzero(wm, sizeof (struct sd_w_map)); 25808 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25809 return (0); 25810 } 25811 25812 25813 /* 25814 * Function: sd_wm_cache_destructor() 25815 * 25816 * Description: Cache destructor for the wmap cache for the read/modify/write 25817 * devices. 25818 * 25819 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25820 * un - sd_lun structure for the device. 25821 */ 25822 /*ARGSUSED*/ 25823 static void 25824 sd_wm_cache_destructor(void *wm, void *un) 25825 { 25826 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25827 } 25828 25829 25830 /* 25831 * Function: sd_range_lock() 25832 * 25833 * Description: Lock the range of blocks specified as parameter to ensure 25834 * that read, modify write is atomic and no other i/o writes 25835 * to the same location. The range is specified in terms 25836 * of start and end blocks. Block numbers are the actual 25837 * media block numbers and not system. 25838 * 25839 * Arguments: un - sd_lun structure for the device. 25840 * startb - The starting block number 25841 * endb - The end block number 25842 * typ - type of i/o - simple/read_modify_write 25843 * 25844 * Return Code: wm - pointer to the wmap structure. 25845 * 25846 * Context: This routine can sleep. 25847 */ 25848 25849 static struct sd_w_map * 25850 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25851 { 25852 struct sd_w_map *wmp = NULL; 25853 struct sd_w_map *sl_wmp = NULL; 25854 struct sd_w_map *tmp_wmp; 25855 wm_state state = SD_WM_CHK_LIST; 25856 25857 25858 ASSERT(un != NULL); 25859 ASSERT(!mutex_owned(SD_MUTEX(un))); 25860 25861 mutex_enter(SD_MUTEX(un)); 25862 25863 while (state != SD_WM_DONE) { 25864 25865 switch (state) { 25866 case SD_WM_CHK_LIST: 25867 /* 25868 * This is the starting state. Check the wmap list 25869 * to see if the range is currently available. 25870 */ 25871 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25872 /* 25873 * If this is a simple write and no rmw 25874 * i/o is pending then try to lock the 25875 * range as the range should be available. 25876 */ 25877 state = SD_WM_LOCK_RANGE; 25878 } else { 25879 tmp_wmp = sd_get_range(un, startb, endb); 25880 if (tmp_wmp != NULL) { 25881 if ((wmp != NULL) && ONLIST(un, wmp)) { 25882 /* 25883 * Should not keep onlist wmps 25884 * while waiting this macro 25885 * will also do wmp = NULL; 25886 */ 25887 FREE_ONLIST_WMAP(un, wmp); 25888 } 25889 /* 25890 * sl_wmp is the wmap on which wait 25891 * is done, since the tmp_wmp points 25892 * to the inuse wmap, set sl_wmp to 25893 * tmp_wmp and change the state to sleep 25894 */ 25895 sl_wmp = tmp_wmp; 25896 state = SD_WM_WAIT_MAP; 25897 } else { 25898 state = SD_WM_LOCK_RANGE; 25899 } 25900 25901 } 25902 break; 25903 25904 case SD_WM_LOCK_RANGE: 25905 ASSERT(un->un_wm_cache); 25906 /* 25907 * The range need to be locked, try to get a wmap. 25908 * First attempt it with NO_SLEEP, want to avoid a sleep 25909 * if possible as we will have to release the sd mutex 25910 * if we have to sleep. 25911 */ 25912 if (wmp == NULL) 25913 wmp = kmem_cache_alloc(un->un_wm_cache, 25914 KM_NOSLEEP); 25915 if (wmp == NULL) { 25916 mutex_exit(SD_MUTEX(un)); 25917 _NOTE(DATA_READABLE_WITHOUT_LOCK 25918 (sd_lun::un_wm_cache)) 25919 wmp = kmem_cache_alloc(un->un_wm_cache, 25920 KM_SLEEP); 25921 mutex_enter(SD_MUTEX(un)); 25922 /* 25923 * we released the mutex so recheck and go to 25924 * check list state. 25925 */ 25926 state = SD_WM_CHK_LIST; 25927 } else { 25928 /* 25929 * We exit out of state machine since we 25930 * have the wmap. Do the housekeeping first. 25931 * place the wmap on the wmap list if it is not 25932 * on it already and then set the state to done. 25933 */ 25934 wmp->wm_start = startb; 25935 wmp->wm_end = endb; 25936 wmp->wm_flags = typ | SD_WM_BUSY; 25937 if (typ & SD_WTYPE_RMW) { 25938 un->un_rmw_count++; 25939 } 25940 /* 25941 * If not already on the list then link 25942 */ 25943 if (!ONLIST(un, wmp)) { 25944 wmp->wm_next = un->un_wm; 25945 wmp->wm_prev = NULL; 25946 if (wmp->wm_next) 25947 wmp->wm_next->wm_prev = wmp; 25948 un->un_wm = wmp; 25949 } 25950 state = SD_WM_DONE; 25951 } 25952 break; 25953 25954 case SD_WM_WAIT_MAP: 25955 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25956 /* 25957 * Wait is done on sl_wmp, which is set in the 25958 * check_list state. 25959 */ 25960 sl_wmp->wm_wanted_count++; 25961 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25962 sl_wmp->wm_wanted_count--; 25963 /* 25964 * We can reuse the memory from the completed sl_wmp 25965 * lock range for our new lock, but only if noone is 25966 * waiting for it. 25967 */ 25968 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25969 if (sl_wmp->wm_wanted_count == 0) { 25970 if (wmp != NULL) 25971 CHK_N_FREEWMP(un, wmp); 25972 wmp = sl_wmp; 25973 } 25974 sl_wmp = NULL; 25975 /* 25976 * After waking up, need to recheck for availability of 25977 * range. 25978 */ 25979 state = SD_WM_CHK_LIST; 25980 break; 25981 25982 default: 25983 panic("sd_range_lock: " 25984 "Unknown state %d in sd_range_lock", state); 25985 /*NOTREACHED*/ 25986 } /* switch(state) */ 25987 25988 } /* while(state != SD_WM_DONE) */ 25989 25990 mutex_exit(SD_MUTEX(un)); 25991 25992 ASSERT(wmp != NULL); 25993 25994 return (wmp); 25995 } 25996 25997 25998 /* 25999 * Function: sd_get_range() 26000 * 26001 * Description: Find if there any overlapping I/O to this one 26002 * Returns the write-map of 1st such I/O, NULL otherwise. 26003 * 26004 * Arguments: un - sd_lun structure for the device. 26005 * startb - The starting block number 26006 * endb - The end block number 26007 * 26008 * Return Code: wm - pointer to the wmap structure. 26009 */ 26010 26011 static struct sd_w_map * 26012 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26013 { 26014 struct sd_w_map *wmp; 26015 26016 ASSERT(un != NULL); 26017 26018 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26019 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26020 continue; 26021 } 26022 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26023 break; 26024 } 26025 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26026 break; 26027 } 26028 } 26029 26030 return (wmp); 26031 } 26032 26033 26034 /* 26035 * Function: sd_free_inlist_wmap() 26036 * 26037 * Description: Unlink and free a write map struct. 26038 * 26039 * Arguments: un - sd_lun structure for the device. 26040 * wmp - sd_w_map which needs to be unlinked. 26041 */ 26042 26043 static void 26044 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26045 { 26046 ASSERT(un != NULL); 26047 26048 if (un->un_wm == wmp) { 26049 un->un_wm = wmp->wm_next; 26050 } else { 26051 wmp->wm_prev->wm_next = wmp->wm_next; 26052 } 26053 26054 if (wmp->wm_next) { 26055 wmp->wm_next->wm_prev = wmp->wm_prev; 26056 } 26057 26058 wmp->wm_next = wmp->wm_prev = NULL; 26059 26060 kmem_cache_free(un->un_wm_cache, wmp); 26061 } 26062 26063 26064 /* 26065 * Function: sd_range_unlock() 26066 * 26067 * Description: Unlock the range locked by wm. 26068 * Free write map if nobody else is waiting on it. 26069 * 26070 * Arguments: un - sd_lun structure for the device. 26071 * wmp - sd_w_map which needs to be unlinked. 26072 */ 26073 26074 static void 26075 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26076 { 26077 ASSERT(un != NULL); 26078 ASSERT(wm != NULL); 26079 ASSERT(!mutex_owned(SD_MUTEX(un))); 26080 26081 mutex_enter(SD_MUTEX(un)); 26082 26083 if (wm->wm_flags & SD_WTYPE_RMW) { 26084 un->un_rmw_count--; 26085 } 26086 26087 if (wm->wm_wanted_count) { 26088 wm->wm_flags = 0; 26089 /* 26090 * Broadcast that the wmap is available now. 26091 */ 26092 cv_broadcast(&wm->wm_avail); 26093 } else { 26094 /* 26095 * If no one is waiting on the map, it should be free'ed. 26096 */ 26097 sd_free_inlist_wmap(un, wm); 26098 } 26099 26100 mutex_exit(SD_MUTEX(un)); 26101 } 26102 26103 26104 /* 26105 * Function: sd_read_modify_write_task 26106 * 26107 * Description: Called from a taskq thread to initiate the write phase of 26108 * a read-modify-write request. This is used for targets where 26109 * un->un_sys_blocksize != un->un_tgt_blocksize. 26110 * 26111 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26112 * 26113 * Context: Called under taskq thread context. 26114 */ 26115 26116 static void 26117 sd_read_modify_write_task(void *arg) 26118 { 26119 struct sd_mapblocksize_info *bsp; 26120 struct buf *bp; 26121 struct sd_xbuf *xp; 26122 struct sd_lun *un; 26123 26124 bp = arg; /* The bp is given in arg */ 26125 ASSERT(bp != NULL); 26126 26127 /* Get the pointer to the layer-private data struct */ 26128 xp = SD_GET_XBUF(bp); 26129 ASSERT(xp != NULL); 26130 bsp = xp->xb_private; 26131 ASSERT(bsp != NULL); 26132 26133 un = SD_GET_UN(bp); 26134 ASSERT(un != NULL); 26135 ASSERT(!mutex_owned(SD_MUTEX(un))); 26136 26137 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26138 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26139 26140 /* 26141 * This is the write phase of a read-modify-write request, called 26142 * under the context of a taskq thread in response to the completion 26143 * of the read portion of the rmw request completing under interrupt 26144 * context. The write request must be sent from here down the iostart 26145 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26146 * we use the layer index saved in the layer-private data area. 26147 */ 26148 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26149 26150 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26151 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26152 } 26153 26154 26155 /* 26156 * Function: sddump_do_read_of_rmw() 26157 * 26158 * Description: This routine will be called from sddump, If sddump is called 26159 * with an I/O which not aligned on device blocksize boundary 26160 * then the write has to be converted to read-modify-write. 26161 * Do the read part here in order to keep sddump simple. 26162 * Note - That the sd_mutex is held across the call to this 26163 * routine. 26164 * 26165 * Arguments: un - sd_lun 26166 * blkno - block number in terms of media block size. 26167 * nblk - number of blocks. 26168 * bpp - pointer to pointer to the buf structure. On return 26169 * from this function, *bpp points to the valid buffer 26170 * to which the write has to be done. 26171 * 26172 * Return Code: 0 for success or errno-type return code 26173 */ 26174 26175 static int 26176 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26177 struct buf **bpp) 26178 { 26179 int err; 26180 int i; 26181 int rval; 26182 struct buf *bp; 26183 struct scsi_pkt *pkt = NULL; 26184 uint32_t target_blocksize; 26185 26186 ASSERT(un != NULL); 26187 ASSERT(mutex_owned(SD_MUTEX(un))); 26188 26189 target_blocksize = un->un_tgt_blocksize; 26190 26191 mutex_exit(SD_MUTEX(un)); 26192 26193 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26194 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26195 if (bp == NULL) { 26196 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26197 "no resources for dumping; giving up"); 26198 err = ENOMEM; 26199 goto done; 26200 } 26201 26202 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26203 blkno, nblk); 26204 if (rval != 0) { 26205 scsi_free_consistent_buf(bp); 26206 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26207 "no resources for dumping; giving up"); 26208 err = ENOMEM; 26209 goto done; 26210 } 26211 26212 pkt->pkt_flags |= FLAG_NOINTR; 26213 26214 err = EIO; 26215 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26216 26217 /* 26218 * Scsi_poll returns 0 (success) if the command completes and 26219 * the status block is STATUS_GOOD. We should only check 26220 * errors if this condition is not true. Even then we should 26221 * send our own request sense packet only if we have a check 26222 * condition and auto request sense has not been performed by 26223 * the hba. 26224 */ 26225 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26226 26227 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26228 err = 0; 26229 break; 26230 } 26231 26232 /* 26233 * Check CMD_DEV_GONE 1st, give up if device is gone, 26234 * no need to read RQS data. 26235 */ 26236 if (pkt->pkt_reason == CMD_DEV_GONE) { 26237 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26238 "Device is gone\n"); 26239 break; 26240 } 26241 26242 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26243 SD_INFO(SD_LOG_DUMP, un, 26244 "sddump: read failed with CHECK, try # %d\n", i); 26245 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26246 (void) sd_send_polled_RQS(un); 26247 } 26248 26249 continue; 26250 } 26251 26252 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26253 int reset_retval = 0; 26254 26255 SD_INFO(SD_LOG_DUMP, un, 26256 "sddump: read failed with BUSY, try # %d\n", i); 26257 26258 if (un->un_f_lun_reset_enabled == TRUE) { 26259 reset_retval = scsi_reset(SD_ADDRESS(un), 26260 RESET_LUN); 26261 } 26262 if (reset_retval == 0) { 26263 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26264 } 26265 (void) sd_send_polled_RQS(un); 26266 26267 } else { 26268 SD_INFO(SD_LOG_DUMP, un, 26269 "sddump: read failed with 0x%x, try # %d\n", 26270 SD_GET_PKT_STATUS(pkt), i); 26271 mutex_enter(SD_MUTEX(un)); 26272 sd_reset_target(un, pkt); 26273 mutex_exit(SD_MUTEX(un)); 26274 } 26275 26276 /* 26277 * If we are not getting anywhere with lun/target resets, 26278 * let's reset the bus. 26279 */ 26280 if (i > SD_NDUMP_RETRIES/2) { 26281 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26282 (void) sd_send_polled_RQS(un); 26283 } 26284 26285 } 26286 scsi_destroy_pkt(pkt); 26287 26288 if (err != 0) { 26289 scsi_free_consistent_buf(bp); 26290 *bpp = NULL; 26291 } else { 26292 *bpp = bp; 26293 } 26294 26295 done: 26296 mutex_enter(SD_MUTEX(un)); 26297 return (err); 26298 } 26299 26300 26301 /* 26302 * Function: sd_failfast_flushq 26303 * 26304 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26305 * in b_flags and move them onto the failfast queue, then kick 26306 * off a thread to return all bp's on the failfast queue to 26307 * their owners with an error set. 26308 * 26309 * Arguments: un - pointer to the soft state struct for the instance. 26310 * 26311 * Context: may execute in interrupt context. 26312 */ 26313 26314 static void 26315 sd_failfast_flushq(struct sd_lun *un) 26316 { 26317 struct buf *bp; 26318 struct buf *next_waitq_bp; 26319 struct buf *prev_waitq_bp = NULL; 26320 26321 ASSERT(un != NULL); 26322 ASSERT(mutex_owned(SD_MUTEX(un))); 26323 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26324 ASSERT(un->un_failfast_bp == NULL); 26325 26326 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26327 "sd_failfast_flushq: entry: un:0x%p\n", un); 26328 26329 /* 26330 * Check if we should flush all bufs when entering failfast state, or 26331 * just those with B_FAILFAST set. 26332 */ 26333 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26334 /* 26335 * Move *all* bp's on the wait queue to the failfast flush 26336 * queue, including those that do NOT have B_FAILFAST set. 26337 */ 26338 if (un->un_failfast_headp == NULL) { 26339 ASSERT(un->un_failfast_tailp == NULL); 26340 un->un_failfast_headp = un->un_waitq_headp; 26341 } else { 26342 ASSERT(un->un_failfast_tailp != NULL); 26343 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26344 } 26345 26346 un->un_failfast_tailp = un->un_waitq_tailp; 26347 26348 /* update kstat for each bp moved out of the waitq */ 26349 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26350 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26351 } 26352 26353 /* empty the waitq */ 26354 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26355 26356 } else { 26357 /* 26358 * Go thru the wait queue, pick off all entries with 26359 * B_FAILFAST set, and move these onto the failfast queue. 26360 */ 26361 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26362 /* 26363 * Save the pointer to the next bp on the wait queue, 26364 * so we get to it on the next iteration of this loop. 26365 */ 26366 next_waitq_bp = bp->av_forw; 26367 26368 /* 26369 * If this bp from the wait queue does NOT have 26370 * B_FAILFAST set, just move on to the next element 26371 * in the wait queue. Note, this is the only place 26372 * where it is correct to set prev_waitq_bp. 26373 */ 26374 if ((bp->b_flags & B_FAILFAST) == 0) { 26375 prev_waitq_bp = bp; 26376 continue; 26377 } 26378 26379 /* 26380 * Remove the bp from the wait queue. 26381 */ 26382 if (bp == un->un_waitq_headp) { 26383 /* The bp is the first element of the waitq. */ 26384 un->un_waitq_headp = next_waitq_bp; 26385 if (un->un_waitq_headp == NULL) { 26386 /* The wait queue is now empty */ 26387 un->un_waitq_tailp = NULL; 26388 } 26389 } else { 26390 /* 26391 * The bp is either somewhere in the middle 26392 * or at the end of the wait queue. 26393 */ 26394 ASSERT(un->un_waitq_headp != NULL); 26395 ASSERT(prev_waitq_bp != NULL); 26396 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26397 == 0); 26398 if (bp == un->un_waitq_tailp) { 26399 /* bp is the last entry on the waitq. */ 26400 ASSERT(next_waitq_bp == NULL); 26401 un->un_waitq_tailp = prev_waitq_bp; 26402 } 26403 prev_waitq_bp->av_forw = next_waitq_bp; 26404 } 26405 bp->av_forw = NULL; 26406 26407 /* 26408 * update kstat since the bp is moved out of 26409 * the waitq 26410 */ 26411 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26412 26413 /* 26414 * Now put the bp onto the failfast queue. 26415 */ 26416 if (un->un_failfast_headp == NULL) { 26417 /* failfast queue is currently empty */ 26418 ASSERT(un->un_failfast_tailp == NULL); 26419 un->un_failfast_headp = 26420 un->un_failfast_tailp = bp; 26421 } else { 26422 /* Add the bp to the end of the failfast q */ 26423 ASSERT(un->un_failfast_tailp != NULL); 26424 ASSERT(un->un_failfast_tailp->b_flags & 26425 B_FAILFAST); 26426 un->un_failfast_tailp->av_forw = bp; 26427 un->un_failfast_tailp = bp; 26428 } 26429 } 26430 } 26431 26432 /* 26433 * Now return all bp's on the failfast queue to their owners. 26434 */ 26435 while ((bp = un->un_failfast_headp) != NULL) { 26436 26437 un->un_failfast_headp = bp->av_forw; 26438 if (un->un_failfast_headp == NULL) { 26439 un->un_failfast_tailp = NULL; 26440 } 26441 26442 /* 26443 * We want to return the bp with a failure error code, but 26444 * we do not want a call to sd_start_cmds() to occur here, 26445 * so use sd_return_failed_command_no_restart() instead of 26446 * sd_return_failed_command(). 26447 */ 26448 sd_return_failed_command_no_restart(un, bp, EIO); 26449 } 26450 26451 /* Flush the xbuf queues if required. */ 26452 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26453 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26454 } 26455 26456 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26457 "sd_failfast_flushq: exit: un:0x%p\n", un); 26458 } 26459 26460 26461 /* 26462 * Function: sd_failfast_flushq_callback 26463 * 26464 * Description: Return TRUE if the given bp meets the criteria for failfast 26465 * flushing. Used with ddi_xbuf_flushq(9F). 26466 * 26467 * Arguments: bp - ptr to buf struct to be examined. 26468 * 26469 * Context: Any 26470 */ 26471 26472 static int 26473 sd_failfast_flushq_callback(struct buf *bp) 26474 { 26475 /* 26476 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26477 * state is entered; OR (2) the given bp has B_FAILFAST set. 26478 */ 26479 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26480 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26481 } 26482 26483 26484 26485 #if defined(__i386) || defined(__amd64) 26486 /* 26487 * Function: sd_setup_next_xfer 26488 * 26489 * Description: Prepare next I/O operation using DMA_PARTIAL 26490 * 26491 */ 26492 26493 static int 26494 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26495 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26496 { 26497 ssize_t num_blks_not_xfered; 26498 daddr_t strt_blk_num; 26499 ssize_t bytes_not_xfered; 26500 int rval; 26501 26502 ASSERT(pkt->pkt_resid == 0); 26503 26504 /* 26505 * Calculate next block number and amount to be transferred. 26506 * 26507 * How much data NOT transfered to the HBA yet. 26508 */ 26509 bytes_not_xfered = xp->xb_dma_resid; 26510 26511 /* 26512 * figure how many blocks NOT transfered to the HBA yet. 26513 */ 26514 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26515 26516 /* 26517 * set starting block number to the end of what WAS transfered. 26518 */ 26519 strt_blk_num = xp->xb_blkno + 26520 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26521 26522 /* 26523 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26524 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26525 * the disk mutex here. 26526 */ 26527 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26528 strt_blk_num, num_blks_not_xfered); 26529 26530 if (rval == 0) { 26531 26532 /* 26533 * Success. 26534 * 26535 * Adjust things if there are still more blocks to be 26536 * transfered. 26537 */ 26538 xp->xb_dma_resid = pkt->pkt_resid; 26539 pkt->pkt_resid = 0; 26540 26541 return (1); 26542 } 26543 26544 /* 26545 * There's really only one possible return value from 26546 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26547 * returns NULL. 26548 */ 26549 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26550 26551 bp->b_resid = bp->b_bcount; 26552 bp->b_flags |= B_ERROR; 26553 26554 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26555 "Error setting up next portion of DMA transfer\n"); 26556 26557 return (0); 26558 } 26559 #endif 26560 26561 /* 26562 * Function: sd_panic_for_res_conflict 26563 * 26564 * Description: Call panic with a string formatted with "Reservation Conflict" 26565 * and a human readable identifier indicating the SD instance 26566 * that experienced the reservation conflict. 26567 * 26568 * Arguments: un - pointer to the soft state struct for the instance. 26569 * 26570 * Context: may execute in interrupt context. 26571 */ 26572 26573 #define SD_RESV_CONFLICT_FMT_LEN 40 26574 void 26575 sd_panic_for_res_conflict(struct sd_lun *un) 26576 { 26577 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26578 char path_str[MAXPATHLEN]; 26579 26580 (void) snprintf(panic_str, sizeof (panic_str), 26581 "Reservation Conflict\nDisk: %s", 26582 ddi_pathname(SD_DEVINFO(un), path_str)); 26583 26584 panic(panic_str); 26585 } 26586 26587 /* 26588 * Note: The following sd_faultinjection_ioctl( ) routines implement 26589 * driver support for handling fault injection for error analysis 26590 * causing faults in multiple layers of the driver. 26591 * 26592 */ 26593 26594 #ifdef SD_FAULT_INJECTION 26595 static uint_t sd_fault_injection_on = 0; 26596 26597 /* 26598 * Function: sd_faultinjection_ioctl() 26599 * 26600 * Description: This routine is the driver entry point for handling 26601 * faultinjection ioctls to inject errors into the 26602 * layer model 26603 * 26604 * Arguments: cmd - the ioctl cmd received 26605 * arg - the arguments from user and returns 26606 */ 26607 26608 static void 26609 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26610 26611 uint_t i; 26612 uint_t rval; 26613 26614 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26615 26616 mutex_enter(SD_MUTEX(un)); 26617 26618 switch (cmd) { 26619 case SDIOCRUN: 26620 /* Allow pushed faults to be injected */ 26621 SD_INFO(SD_LOG_SDTEST, un, 26622 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26623 26624 sd_fault_injection_on = 1; 26625 26626 SD_INFO(SD_LOG_IOERR, un, 26627 "sd_faultinjection_ioctl: run finished\n"); 26628 break; 26629 26630 case SDIOCSTART: 26631 /* Start Injection Session */ 26632 SD_INFO(SD_LOG_SDTEST, un, 26633 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26634 26635 sd_fault_injection_on = 0; 26636 un->sd_injection_mask = 0xFFFFFFFF; 26637 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26638 un->sd_fi_fifo_pkt[i] = NULL; 26639 un->sd_fi_fifo_xb[i] = NULL; 26640 un->sd_fi_fifo_un[i] = NULL; 26641 un->sd_fi_fifo_arq[i] = NULL; 26642 } 26643 un->sd_fi_fifo_start = 0; 26644 un->sd_fi_fifo_end = 0; 26645 26646 mutex_enter(&(un->un_fi_mutex)); 26647 un->sd_fi_log[0] = '\0'; 26648 un->sd_fi_buf_len = 0; 26649 mutex_exit(&(un->un_fi_mutex)); 26650 26651 SD_INFO(SD_LOG_IOERR, un, 26652 "sd_faultinjection_ioctl: start finished\n"); 26653 break; 26654 26655 case SDIOCSTOP: 26656 /* Stop Injection Session */ 26657 SD_INFO(SD_LOG_SDTEST, un, 26658 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26659 sd_fault_injection_on = 0; 26660 un->sd_injection_mask = 0x0; 26661 26662 /* Empty stray or unuseds structs from fifo */ 26663 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26664 if (un->sd_fi_fifo_pkt[i] != NULL) { 26665 kmem_free(un->sd_fi_fifo_pkt[i], 26666 sizeof (struct sd_fi_pkt)); 26667 } 26668 if (un->sd_fi_fifo_xb[i] != NULL) { 26669 kmem_free(un->sd_fi_fifo_xb[i], 26670 sizeof (struct sd_fi_xb)); 26671 } 26672 if (un->sd_fi_fifo_un[i] != NULL) { 26673 kmem_free(un->sd_fi_fifo_un[i], 26674 sizeof (struct sd_fi_un)); 26675 } 26676 if (un->sd_fi_fifo_arq[i] != NULL) { 26677 kmem_free(un->sd_fi_fifo_arq[i], 26678 sizeof (struct sd_fi_arq)); 26679 } 26680 un->sd_fi_fifo_pkt[i] = NULL; 26681 un->sd_fi_fifo_un[i] = NULL; 26682 un->sd_fi_fifo_xb[i] = NULL; 26683 un->sd_fi_fifo_arq[i] = NULL; 26684 } 26685 un->sd_fi_fifo_start = 0; 26686 un->sd_fi_fifo_end = 0; 26687 26688 SD_INFO(SD_LOG_IOERR, un, 26689 "sd_faultinjection_ioctl: stop finished\n"); 26690 break; 26691 26692 case SDIOCINSERTPKT: 26693 /* Store a packet struct to be pushed onto fifo */ 26694 SD_INFO(SD_LOG_SDTEST, un, 26695 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26696 26697 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26698 26699 sd_fault_injection_on = 0; 26700 26701 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26702 if (un->sd_fi_fifo_pkt[i] != NULL) { 26703 kmem_free(un->sd_fi_fifo_pkt[i], 26704 sizeof (struct sd_fi_pkt)); 26705 } 26706 if (arg != NULL) { 26707 un->sd_fi_fifo_pkt[i] = 26708 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26709 if (un->sd_fi_fifo_pkt[i] == NULL) { 26710 /* Alloc failed don't store anything */ 26711 break; 26712 } 26713 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26714 sizeof (struct sd_fi_pkt), 0); 26715 if (rval == -1) { 26716 kmem_free(un->sd_fi_fifo_pkt[i], 26717 sizeof (struct sd_fi_pkt)); 26718 un->sd_fi_fifo_pkt[i] = NULL; 26719 } 26720 } else { 26721 SD_INFO(SD_LOG_IOERR, un, 26722 "sd_faultinjection_ioctl: pkt null\n"); 26723 } 26724 break; 26725 26726 case SDIOCINSERTXB: 26727 /* Store a xb struct to be pushed onto fifo */ 26728 SD_INFO(SD_LOG_SDTEST, un, 26729 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26730 26731 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26732 26733 sd_fault_injection_on = 0; 26734 26735 if (un->sd_fi_fifo_xb[i] != NULL) { 26736 kmem_free(un->sd_fi_fifo_xb[i], 26737 sizeof (struct sd_fi_xb)); 26738 un->sd_fi_fifo_xb[i] = NULL; 26739 } 26740 if (arg != NULL) { 26741 un->sd_fi_fifo_xb[i] = 26742 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26743 if (un->sd_fi_fifo_xb[i] == NULL) { 26744 /* Alloc failed don't store anything */ 26745 break; 26746 } 26747 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26748 sizeof (struct sd_fi_xb), 0); 26749 26750 if (rval == -1) { 26751 kmem_free(un->sd_fi_fifo_xb[i], 26752 sizeof (struct sd_fi_xb)); 26753 un->sd_fi_fifo_xb[i] = NULL; 26754 } 26755 } else { 26756 SD_INFO(SD_LOG_IOERR, un, 26757 "sd_faultinjection_ioctl: xb null\n"); 26758 } 26759 break; 26760 26761 case SDIOCINSERTUN: 26762 /* Store a un struct to be pushed onto fifo */ 26763 SD_INFO(SD_LOG_SDTEST, un, 26764 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26765 26766 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26767 26768 sd_fault_injection_on = 0; 26769 26770 if (un->sd_fi_fifo_un[i] != NULL) { 26771 kmem_free(un->sd_fi_fifo_un[i], 26772 sizeof (struct sd_fi_un)); 26773 un->sd_fi_fifo_un[i] = NULL; 26774 } 26775 if (arg != NULL) { 26776 un->sd_fi_fifo_un[i] = 26777 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26778 if (un->sd_fi_fifo_un[i] == NULL) { 26779 /* Alloc failed don't store anything */ 26780 break; 26781 } 26782 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26783 sizeof (struct sd_fi_un), 0); 26784 if (rval == -1) { 26785 kmem_free(un->sd_fi_fifo_un[i], 26786 sizeof (struct sd_fi_un)); 26787 un->sd_fi_fifo_un[i] = NULL; 26788 } 26789 26790 } else { 26791 SD_INFO(SD_LOG_IOERR, un, 26792 "sd_faultinjection_ioctl: un null\n"); 26793 } 26794 26795 break; 26796 26797 case SDIOCINSERTARQ: 26798 /* Store a arq struct to be pushed onto fifo */ 26799 SD_INFO(SD_LOG_SDTEST, un, 26800 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26801 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26802 26803 sd_fault_injection_on = 0; 26804 26805 if (un->sd_fi_fifo_arq[i] != NULL) { 26806 kmem_free(un->sd_fi_fifo_arq[i], 26807 sizeof (struct sd_fi_arq)); 26808 un->sd_fi_fifo_arq[i] = NULL; 26809 } 26810 if (arg != NULL) { 26811 un->sd_fi_fifo_arq[i] = 26812 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26813 if (un->sd_fi_fifo_arq[i] == NULL) { 26814 /* Alloc failed don't store anything */ 26815 break; 26816 } 26817 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26818 sizeof (struct sd_fi_arq), 0); 26819 if (rval == -1) { 26820 kmem_free(un->sd_fi_fifo_arq[i], 26821 sizeof (struct sd_fi_arq)); 26822 un->sd_fi_fifo_arq[i] = NULL; 26823 } 26824 26825 } else { 26826 SD_INFO(SD_LOG_IOERR, un, 26827 "sd_faultinjection_ioctl: arq null\n"); 26828 } 26829 26830 break; 26831 26832 case SDIOCPUSH: 26833 /* Push stored xb, pkt, un, and arq onto fifo */ 26834 sd_fault_injection_on = 0; 26835 26836 if (arg != NULL) { 26837 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26838 if (rval != -1 && 26839 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26840 un->sd_fi_fifo_end += i; 26841 } 26842 } else { 26843 SD_INFO(SD_LOG_IOERR, un, 26844 "sd_faultinjection_ioctl: push arg null\n"); 26845 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26846 un->sd_fi_fifo_end++; 26847 } 26848 } 26849 SD_INFO(SD_LOG_IOERR, un, 26850 "sd_faultinjection_ioctl: push to end=%d\n", 26851 un->sd_fi_fifo_end); 26852 break; 26853 26854 case SDIOCRETRIEVE: 26855 /* Return buffer of log from Injection session */ 26856 SD_INFO(SD_LOG_SDTEST, un, 26857 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26858 26859 sd_fault_injection_on = 0; 26860 26861 mutex_enter(&(un->un_fi_mutex)); 26862 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26863 un->sd_fi_buf_len+1, 0); 26864 mutex_exit(&(un->un_fi_mutex)); 26865 26866 if (rval == -1) { 26867 /* 26868 * arg is possibly invalid setting 26869 * it to NULL for return 26870 */ 26871 arg = NULL; 26872 } 26873 break; 26874 } 26875 26876 mutex_exit(SD_MUTEX(un)); 26877 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26878 " exit\n"); 26879 } 26880 26881 26882 /* 26883 * Function: sd_injection_log() 26884 * 26885 * Description: This routine adds buff to the already existing injection log 26886 * for retrieval via faultinjection_ioctl for use in fault 26887 * detection and recovery 26888 * 26889 * Arguments: buf - the string to add to the log 26890 */ 26891 26892 static void 26893 sd_injection_log(char *buf, struct sd_lun *un) 26894 { 26895 uint_t len; 26896 26897 ASSERT(un != NULL); 26898 ASSERT(buf != NULL); 26899 26900 mutex_enter(&(un->un_fi_mutex)); 26901 26902 len = min(strlen(buf), 255); 26903 /* Add logged value to Injection log to be returned later */ 26904 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26905 uint_t offset = strlen((char *)un->sd_fi_log); 26906 char *destp = (char *)un->sd_fi_log + offset; 26907 int i; 26908 for (i = 0; i < len; i++) { 26909 *destp++ = *buf++; 26910 } 26911 un->sd_fi_buf_len += len; 26912 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26913 } 26914 26915 mutex_exit(&(un->un_fi_mutex)); 26916 } 26917 26918 26919 /* 26920 * Function: sd_faultinjection() 26921 * 26922 * Description: This routine takes the pkt and changes its 26923 * content based on error injection scenerio. 26924 * 26925 * Arguments: pktp - packet to be changed 26926 */ 26927 26928 static void 26929 sd_faultinjection(struct scsi_pkt *pktp) 26930 { 26931 uint_t i; 26932 struct sd_fi_pkt *fi_pkt; 26933 struct sd_fi_xb *fi_xb; 26934 struct sd_fi_un *fi_un; 26935 struct sd_fi_arq *fi_arq; 26936 struct buf *bp; 26937 struct sd_xbuf *xb; 26938 struct sd_lun *un; 26939 26940 ASSERT(pktp != NULL); 26941 26942 /* pull bp xb and un from pktp */ 26943 bp = (struct buf *)pktp->pkt_private; 26944 xb = SD_GET_XBUF(bp); 26945 un = SD_GET_UN(bp); 26946 26947 ASSERT(un != NULL); 26948 26949 mutex_enter(SD_MUTEX(un)); 26950 26951 SD_TRACE(SD_LOG_SDTEST, un, 26952 "sd_faultinjection: entry Injection from sdintr\n"); 26953 26954 /* if injection is off return */ 26955 if (sd_fault_injection_on == 0 || 26956 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26957 mutex_exit(SD_MUTEX(un)); 26958 return; 26959 } 26960 26961 26962 /* take next set off fifo */ 26963 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26964 26965 fi_pkt = un->sd_fi_fifo_pkt[i]; 26966 fi_xb = un->sd_fi_fifo_xb[i]; 26967 fi_un = un->sd_fi_fifo_un[i]; 26968 fi_arq = un->sd_fi_fifo_arq[i]; 26969 26970 26971 /* set variables accordingly */ 26972 /* set pkt if it was on fifo */ 26973 if (fi_pkt != NULL) { 26974 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26975 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26976 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26977 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26978 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26979 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26980 26981 } 26982 26983 /* set xb if it was on fifo */ 26984 if (fi_xb != NULL) { 26985 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26986 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26987 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26988 SD_CONDSET(xb, xb, xb_victim_retry_count, 26989 "xb_victim_retry_count"); 26990 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26991 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26992 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26993 26994 /* copy in block data from sense */ 26995 if (fi_xb->xb_sense_data[0] != -1) { 26996 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26997 SENSE_LENGTH); 26998 } 26999 27000 /* copy in extended sense codes */ 27001 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27002 "es_code"); 27003 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27004 "es_key"); 27005 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27006 "es_add_code"); 27007 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27008 es_qual_code, "es_qual_code"); 27009 } 27010 27011 /* set un if it was on fifo */ 27012 if (fi_un != NULL) { 27013 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27014 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27015 SD_CONDSET(un, un, un_reset_retry_count, 27016 "un_reset_retry_count"); 27017 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27018 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27019 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27020 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27021 "un_f_allow_bus_device_reset"); 27022 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27023 27024 } 27025 27026 /* copy in auto request sense if it was on fifo */ 27027 if (fi_arq != NULL) { 27028 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27029 } 27030 27031 /* free structs */ 27032 if (un->sd_fi_fifo_pkt[i] != NULL) { 27033 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27034 } 27035 if (un->sd_fi_fifo_xb[i] != NULL) { 27036 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27037 } 27038 if (un->sd_fi_fifo_un[i] != NULL) { 27039 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27040 } 27041 if (un->sd_fi_fifo_arq[i] != NULL) { 27042 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27043 } 27044 27045 /* 27046 * kmem_free does not gurantee to set to NULL 27047 * since we uses these to determine if we set 27048 * values or not lets confirm they are always 27049 * NULL after free 27050 */ 27051 un->sd_fi_fifo_pkt[i] = NULL; 27052 un->sd_fi_fifo_un[i] = NULL; 27053 un->sd_fi_fifo_xb[i] = NULL; 27054 un->sd_fi_fifo_arq[i] = NULL; 27055 27056 un->sd_fi_fifo_start++; 27057 27058 mutex_exit(SD_MUTEX(un)); 27059 27060 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27061 } 27062 27063 #endif /* SD_FAULT_INJECTION */ 27064 27065 /* 27066 * This routine is invoked in sd_unit_attach(). Before calling it, the 27067 * properties in conf file should be processed already, and "hotpluggable" 27068 * property was processed also. 27069 * 27070 * The sd driver distinguishes 3 different type of devices: removable media, 27071 * non-removable media, and hotpluggable. Below the differences are defined: 27072 * 27073 * 1. Device ID 27074 * 27075 * The device ID of a device is used to identify this device. Refer to 27076 * ddi_devid_register(9F). 27077 * 27078 * For a non-removable media disk device which can provide 0x80 or 0x83 27079 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27080 * device ID is created to identify this device. For other non-removable 27081 * media devices, a default device ID is created only if this device has 27082 * at least 2 alter cylinders. Otherwise, this device has no devid. 27083 * 27084 * ------------------------------------------------------- 27085 * removable media hotpluggable | Can Have Device ID 27086 * ------------------------------------------------------- 27087 * false false | Yes 27088 * false true | Yes 27089 * true x | No 27090 * ------------------------------------------------------ 27091 * 27092 * 27093 * 2. SCSI group 4 commands 27094 * 27095 * In SCSI specs, only some commands in group 4 command set can use 27096 * 8-byte addresses that can be used to access >2TB storage spaces. 27097 * Other commands have no such capability. Without supporting group4, 27098 * it is impossible to make full use of storage spaces of a disk with 27099 * capacity larger than 2TB. 27100 * 27101 * ----------------------------------------------- 27102 * removable media hotpluggable LP64 | Group 27103 * ----------------------------------------------- 27104 * false false false | 1 27105 * false false true | 4 27106 * false true false | 1 27107 * false true true | 4 27108 * true x x | 5 27109 * ----------------------------------------------- 27110 * 27111 * 27112 * 3. Check for VTOC Label 27113 * 27114 * If a direct-access disk has no EFI label, sd will check if it has a 27115 * valid VTOC label. Now, sd also does that check for removable media 27116 * and hotpluggable devices. 27117 * 27118 * -------------------------------------------------------------- 27119 * Direct-Access removable media hotpluggable | Check Label 27120 * ------------------------------------------------------------- 27121 * false false false | No 27122 * false false true | No 27123 * false true false | Yes 27124 * false true true | Yes 27125 * true x x | Yes 27126 * -------------------------------------------------------------- 27127 * 27128 * 27129 * 4. Building default VTOC label 27130 * 27131 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27132 * If those devices have no valid VTOC label, sd(7d) will attempt to 27133 * create default VTOC for them. Currently sd creates default VTOC label 27134 * for all devices on x86 platform (VTOC_16), but only for removable 27135 * media devices on SPARC (VTOC_8). 27136 * 27137 * ----------------------------------------------------------- 27138 * removable media hotpluggable platform | Default Label 27139 * ----------------------------------------------------------- 27140 * false false sparc | No 27141 * false true x86 | Yes 27142 * false true sparc | Yes 27143 * true x x | Yes 27144 * ---------------------------------------------------------- 27145 * 27146 * 27147 * 5. Supported blocksizes of target devices 27148 * 27149 * Sd supports non-512-byte blocksize for removable media devices only. 27150 * For other devices, only 512-byte blocksize is supported. This may be 27151 * changed in near future because some RAID devices require non-512-byte 27152 * blocksize 27153 * 27154 * ----------------------------------------------------------- 27155 * removable media hotpluggable | non-512-byte blocksize 27156 * ----------------------------------------------------------- 27157 * false false | No 27158 * false true | No 27159 * true x | Yes 27160 * ----------------------------------------------------------- 27161 * 27162 * 27163 * 6. Automatic mount & unmount 27164 * 27165 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27166 * if a device is removable media device. It return 1 for removable media 27167 * devices, and 0 for others. 27168 * 27169 * The automatic mounting subsystem should distinguish between the types 27170 * of devices and apply automounting policies to each. 27171 * 27172 * 27173 * 7. fdisk partition management 27174 * 27175 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27176 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27177 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27178 * fdisk partitions on both x86 and SPARC platform. 27179 * 27180 * ----------------------------------------------------------- 27181 * platform removable media USB/1394 | fdisk supported 27182 * ----------------------------------------------------------- 27183 * x86 X X | true 27184 * ------------------------------------------------------------ 27185 * sparc X X | false 27186 * ------------------------------------------------------------ 27187 * 27188 * 27189 * 8. MBOOT/MBR 27190 * 27191 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27192 * read/write mboot for removable media devices on sparc platform. 27193 * 27194 * ----------------------------------------------------------- 27195 * platform removable media USB/1394 | mboot supported 27196 * ----------------------------------------------------------- 27197 * x86 X X | true 27198 * ------------------------------------------------------------ 27199 * sparc false false | false 27200 * sparc false true | true 27201 * sparc true false | true 27202 * sparc true true | true 27203 * ------------------------------------------------------------ 27204 * 27205 * 27206 * 9. error handling during opening device 27207 * 27208 * If failed to open a disk device, an errno is returned. For some kinds 27209 * of errors, different errno is returned depending on if this device is 27210 * a removable media device. This brings USB/1394 hard disks in line with 27211 * expected hard disk behavior. It is not expected that this breaks any 27212 * application. 27213 * 27214 * ------------------------------------------------------ 27215 * removable media hotpluggable | errno 27216 * ------------------------------------------------------ 27217 * false false | EIO 27218 * false true | EIO 27219 * true x | ENXIO 27220 * ------------------------------------------------------ 27221 * 27222 * 27223 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27224 * 27225 * These IOCTLs are applicable only to removable media devices. 27226 * 27227 * ----------------------------------------------------------- 27228 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27229 * ----------------------------------------------------------- 27230 * false false | No 27231 * false true | No 27232 * true x | Yes 27233 * ----------------------------------------------------------- 27234 * 27235 * 27236 * 12. Kstats for partitions 27237 * 27238 * sd creates partition kstat for non-removable media devices. USB and 27239 * Firewire hard disks now have partition kstats 27240 * 27241 * ------------------------------------------------------ 27242 * removable media hotpluggable | kstat 27243 * ------------------------------------------------------ 27244 * false false | Yes 27245 * false true | Yes 27246 * true x | No 27247 * ------------------------------------------------------ 27248 * 27249 * 27250 * 13. Removable media & hotpluggable properties 27251 * 27252 * Sd driver creates a "removable-media" property for removable media 27253 * devices. Parent nexus drivers create a "hotpluggable" property if 27254 * it supports hotplugging. 27255 * 27256 * --------------------------------------------------------------------- 27257 * removable media hotpluggable | "removable-media" " hotpluggable" 27258 * --------------------------------------------------------------------- 27259 * false false | No No 27260 * false true | No Yes 27261 * true false | Yes No 27262 * true true | Yes Yes 27263 * --------------------------------------------------------------------- 27264 * 27265 * 27266 * 14. Power Management 27267 * 27268 * sd only power manages removable media devices or devices that support 27269 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27270 * 27271 * A parent nexus that supports hotplugging can also set "pm-capable" 27272 * if the disk can be power managed. 27273 * 27274 * ------------------------------------------------------------ 27275 * removable media hotpluggable pm-capable | power manage 27276 * ------------------------------------------------------------ 27277 * false false false | No 27278 * false false true | Yes 27279 * false true false | No 27280 * false true true | Yes 27281 * true x x | Yes 27282 * ------------------------------------------------------------ 27283 * 27284 * USB and firewire hard disks can now be power managed independently 27285 * of the framebuffer 27286 * 27287 * 27288 * 15. Support for USB disks with capacity larger than 1TB 27289 * 27290 * Currently, sd doesn't permit a fixed disk device with capacity 27291 * larger than 1TB to be used in a 32-bit operating system environment. 27292 * However, sd doesn't do that for removable media devices. Instead, it 27293 * assumes that removable media devices cannot have a capacity larger 27294 * than 1TB. Therefore, using those devices on 32-bit system is partially 27295 * supported, which can cause some unexpected results. 27296 * 27297 * --------------------------------------------------------------------- 27298 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27299 * --------------------------------------------------------------------- 27300 * false false | true | no 27301 * false true | true | no 27302 * true false | true | Yes 27303 * true true | true | Yes 27304 * --------------------------------------------------------------------- 27305 * 27306 * 27307 * 16. Check write-protection at open time 27308 * 27309 * When a removable media device is being opened for writing without NDELAY 27310 * flag, sd will check if this device is writable. If attempting to open 27311 * without NDELAY flag a write-protected device, this operation will abort. 27312 * 27313 * ------------------------------------------------------------ 27314 * removable media USB/1394 | WP Check 27315 * ------------------------------------------------------------ 27316 * false false | No 27317 * false true | No 27318 * true false | Yes 27319 * true true | Yes 27320 * ------------------------------------------------------------ 27321 * 27322 * 27323 * 17. syslog when corrupted VTOC is encountered 27324 * 27325 * Currently, if an invalid VTOC is encountered, sd only print syslog 27326 * for fixed SCSI disks. 27327 * ------------------------------------------------------------ 27328 * removable media USB/1394 | print syslog 27329 * ------------------------------------------------------------ 27330 * false false | Yes 27331 * false true | No 27332 * true false | No 27333 * true true | No 27334 * ------------------------------------------------------------ 27335 */ 27336 static void 27337 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27338 { 27339 int pm_capable_prop; 27340 27341 ASSERT(un->un_sd); 27342 ASSERT(un->un_sd->sd_inq); 27343 27344 /* 27345 * Enable SYNC CACHE support for all devices. 27346 */ 27347 un->un_f_sync_cache_supported = TRUE; 27348 27349 if (un->un_sd->sd_inq->inq_rmb) { 27350 /* 27351 * The media of this device is removable. And for this kind 27352 * of devices, it is possible to change medium after opening 27353 * devices. Thus we should support this operation. 27354 */ 27355 un->un_f_has_removable_media = TRUE; 27356 27357 /* 27358 * support non-512-byte blocksize of removable media devices 27359 */ 27360 un->un_f_non_devbsize_supported = TRUE; 27361 27362 /* 27363 * Assume that all removable media devices support DOOR_LOCK 27364 */ 27365 un->un_f_doorlock_supported = TRUE; 27366 27367 /* 27368 * For a removable media device, it is possible to be opened 27369 * with NDELAY flag when there is no media in drive, in this 27370 * case we don't care if device is writable. But if without 27371 * NDELAY flag, we need to check if media is write-protected. 27372 */ 27373 un->un_f_chk_wp_open = TRUE; 27374 27375 /* 27376 * need to start a SCSI watch thread to monitor media state, 27377 * when media is being inserted or ejected, notify syseventd. 27378 */ 27379 un->un_f_monitor_media_state = TRUE; 27380 27381 /* 27382 * Some devices don't support START_STOP_UNIT command. 27383 * Therefore, we'd better check if a device supports it 27384 * before sending it. 27385 */ 27386 un->un_f_check_start_stop = TRUE; 27387 27388 /* 27389 * support eject media ioctl: 27390 * FDEJECT, DKIOCEJECT, CDROMEJECT 27391 */ 27392 un->un_f_eject_media_supported = TRUE; 27393 27394 /* 27395 * Because many removable-media devices don't support 27396 * LOG_SENSE, we couldn't use this command to check if 27397 * a removable media device support power-management. 27398 * We assume that they support power-management via 27399 * START_STOP_UNIT command and can be spun up and down 27400 * without limitations. 27401 */ 27402 un->un_f_pm_supported = TRUE; 27403 27404 /* 27405 * Need to create a zero length (Boolean) property 27406 * removable-media for the removable media devices. 27407 * Note that the return value of the property is not being 27408 * checked, since if unable to create the property 27409 * then do not want the attach to fail altogether. Consistent 27410 * with other property creation in attach. 27411 */ 27412 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27413 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27414 27415 } else { 27416 /* 27417 * create device ID for device 27418 */ 27419 un->un_f_devid_supported = TRUE; 27420 27421 /* 27422 * Spin up non-removable-media devices once it is attached 27423 */ 27424 un->un_f_attach_spinup = TRUE; 27425 27426 /* 27427 * According to SCSI specification, Sense data has two kinds of 27428 * format: fixed format, and descriptor format. At present, we 27429 * don't support descriptor format sense data for removable 27430 * media. 27431 */ 27432 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27433 un->un_f_descr_format_supported = TRUE; 27434 } 27435 27436 /* 27437 * kstats are created only for non-removable media devices. 27438 * 27439 * Set this in sd.conf to 0 in order to disable kstats. The 27440 * default is 1, so they are enabled by default. 27441 */ 27442 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27443 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27444 "enable-partition-kstats", 1)); 27445 27446 /* 27447 * Check if HBA has set the "pm-capable" property. 27448 * If "pm-capable" exists and is non-zero then we can 27449 * power manage the device without checking the start/stop 27450 * cycle count log sense page. 27451 * 27452 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27453 * then we should not power manage the device. 27454 * 27455 * If "pm-capable" doesn't exist then pm_capable_prop will 27456 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27457 * sd will check the start/stop cycle count log sense page 27458 * and power manage the device if the cycle count limit has 27459 * not been exceeded. 27460 */ 27461 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27462 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27463 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27464 un->un_f_log_sense_supported = TRUE; 27465 } else { 27466 /* 27467 * pm-capable property exists. 27468 * 27469 * Convert "TRUE" values for pm_capable_prop to 27470 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27471 * later. "TRUE" values are any values except 27472 * SD_PM_CAPABLE_FALSE (0) and 27473 * SD_PM_CAPABLE_UNDEFINED (-1) 27474 */ 27475 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27476 un->un_f_log_sense_supported = FALSE; 27477 } else { 27478 un->un_f_pm_supported = TRUE; 27479 } 27480 27481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27482 "sd_unit_attach: un:0x%p pm-capable " 27483 "property set to %d.\n", un, un->un_f_pm_supported); 27484 } 27485 } 27486 27487 if (un->un_f_is_hotpluggable) { 27488 27489 /* 27490 * Have to watch hotpluggable devices as well, since 27491 * that's the only way for userland applications to 27492 * detect hot removal while device is busy/mounted. 27493 */ 27494 un->un_f_monitor_media_state = TRUE; 27495 27496 un->un_f_check_start_stop = TRUE; 27497 27498 } 27499 } 27500 27501 /* 27502 * sd_tg_rdwr: 27503 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27504 * in sys block size, req_length in bytes. 27505 * 27506 */ 27507 static int 27508 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27509 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27510 { 27511 struct sd_lun *un; 27512 int path_flag = (int)(uintptr_t)tg_cookie; 27513 char *dkl = NULL; 27514 diskaddr_t real_addr = start_block; 27515 diskaddr_t first_byte, end_block; 27516 27517 size_t buffer_size = reqlength; 27518 int rval; 27519 diskaddr_t cap; 27520 uint32_t lbasize; 27521 27522 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27523 if (un == NULL) 27524 return (ENXIO); 27525 27526 if (cmd != TG_READ && cmd != TG_WRITE) 27527 return (EINVAL); 27528 27529 mutex_enter(SD_MUTEX(un)); 27530 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27531 mutex_exit(SD_MUTEX(un)); 27532 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27533 &lbasize, path_flag); 27534 if (rval != 0) 27535 return (rval); 27536 mutex_enter(SD_MUTEX(un)); 27537 sd_update_block_info(un, lbasize, cap); 27538 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27539 mutex_exit(SD_MUTEX(un)); 27540 return (EIO); 27541 } 27542 } 27543 27544 if (NOT_DEVBSIZE(un)) { 27545 /* 27546 * sys_blocksize != tgt_blocksize, need to re-adjust 27547 * blkno and save the index to beginning of dk_label 27548 */ 27549 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27550 real_addr = first_byte / un->un_tgt_blocksize; 27551 27552 end_block = (first_byte + reqlength + 27553 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27554 27555 /* round up buffer size to multiple of target block size */ 27556 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27557 27558 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27559 "label_addr: 0x%x allocation size: 0x%x\n", 27560 real_addr, buffer_size); 27561 27562 if (((first_byte % un->un_tgt_blocksize) != 0) || 27563 (reqlength % un->un_tgt_blocksize) != 0) 27564 /* the request is not aligned */ 27565 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27566 } 27567 27568 /* 27569 * The MMC standard allows READ CAPACITY to be 27570 * inaccurate by a bounded amount (in the interest of 27571 * response latency). As a result, failed READs are 27572 * commonplace (due to the reading of metadata and not 27573 * data). Depending on the per-Vendor/drive Sense data, 27574 * the failed READ can cause many (unnecessary) retries. 27575 */ 27576 27577 if (ISCD(un) && (cmd == TG_READ) && 27578 (un->un_f_blockcount_is_valid == TRUE) && 27579 ((start_block == (un->un_blockcount - 1))|| 27580 (start_block == (un->un_blockcount - 2)))) { 27581 path_flag = SD_PATH_DIRECT_PRIORITY; 27582 } 27583 27584 mutex_exit(SD_MUTEX(un)); 27585 if (cmd == TG_READ) { 27586 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27587 buffer_size, real_addr, path_flag); 27588 if (dkl != NULL) 27589 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27590 real_addr), bufaddr, reqlength); 27591 } else { 27592 if (dkl) { 27593 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27594 real_addr, path_flag); 27595 if (rval) { 27596 kmem_free(dkl, buffer_size); 27597 return (rval); 27598 } 27599 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27600 real_addr), reqlength); 27601 } 27602 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27603 buffer_size, real_addr, path_flag); 27604 } 27605 27606 if (dkl != NULL) 27607 kmem_free(dkl, buffer_size); 27608 27609 return (rval); 27610 } 27611 27612 27613 static int 27614 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27615 { 27616 27617 struct sd_lun *un; 27618 diskaddr_t cap; 27619 uint32_t lbasize; 27620 int path_flag = (int)(uintptr_t)tg_cookie; 27621 int ret = 0; 27622 27623 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27624 if (un == NULL) 27625 return (ENXIO); 27626 27627 switch (cmd) { 27628 case TG_GETPHYGEOM: 27629 case TG_GETVIRTGEOM: 27630 case TG_GETCAPACITY: 27631 case TG_GETBLOCKSIZE: 27632 mutex_enter(SD_MUTEX(un)); 27633 27634 if ((un->un_f_blockcount_is_valid == TRUE) && 27635 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27636 cap = un->un_blockcount; 27637 lbasize = un->un_tgt_blocksize; 27638 mutex_exit(SD_MUTEX(un)); 27639 } else { 27640 mutex_exit(SD_MUTEX(un)); 27641 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27642 &lbasize, path_flag); 27643 if (ret != 0) 27644 return (ret); 27645 mutex_enter(SD_MUTEX(un)); 27646 sd_update_block_info(un, lbasize, cap); 27647 if ((un->un_f_blockcount_is_valid == FALSE) || 27648 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27649 mutex_exit(SD_MUTEX(un)); 27650 return (EIO); 27651 } 27652 mutex_exit(SD_MUTEX(un)); 27653 } 27654 27655 if (cmd == TG_GETCAPACITY) { 27656 *(diskaddr_t *)arg = cap; 27657 return (0); 27658 } 27659 27660 if (cmd == TG_GETBLOCKSIZE) { 27661 *(uint32_t *)arg = lbasize; 27662 return (0); 27663 } 27664 27665 if (cmd == TG_GETPHYGEOM) 27666 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27667 cap, lbasize, path_flag); 27668 else 27669 /* TG_GETVIRTGEOM */ 27670 ret = sd_get_virtual_geometry(un, 27671 (cmlb_geom_t *)arg, cap, lbasize); 27672 27673 return (ret); 27674 27675 case TG_GETATTR: 27676 mutex_enter(SD_MUTEX(un)); 27677 ((tg_attribute_t *)arg)->media_is_writable = 27678 un->un_f_mmc_writable_media; 27679 mutex_exit(SD_MUTEX(un)); 27680 return (0); 27681 default: 27682 return (ENOTTY); 27683 27684 } 27685 27686 } 27687