1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatability. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatability mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two seperate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similiar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 620 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 621 { "SUN T3", SD_CONF_BSET_THROTTLE | 622 SD_CONF_BSET_BSY_RETRY_COUNT| 623 SD_CONF_BSET_RST_RETRIES| 624 SD_CONF_BSET_RSV_REL_TIME, 625 &purple_properties }, 626 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 627 SD_CONF_BSET_BSY_RETRY_COUNT| 628 SD_CONF_BSET_RST_RETRIES| 629 SD_CONF_BSET_RSV_REL_TIME| 630 SD_CONF_BSET_MIN_THROTTLE| 631 SD_CONF_BSET_DISKSORT_DISABLED, 632 &sve_properties }, 633 { "SUN T4", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME, 637 &purple_properties }, 638 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 639 SD_CONF_BSET_LUN_RESET_ENABLED, 640 &maserati_properties }, 641 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_NRR_COUNT| 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_MIN_THROTTLE| 646 SD_CONF_BSET_DISKSORT_DISABLED| 647 SD_CONF_BSET_LUN_RESET_ENABLED, 648 &pirus_properties }, 649 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 650 SD_CONF_BSET_NRR_COUNT| 651 SD_CONF_BSET_BSY_RETRY_COUNT| 652 SD_CONF_BSET_RST_RETRIES| 653 SD_CONF_BSET_MIN_THROTTLE| 654 SD_CONF_BSET_DISKSORT_DISABLED| 655 SD_CONF_BSET_LUN_RESET_ENABLED, 656 &pirus_properties }, 657 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 658 SD_CONF_BSET_NRR_COUNT| 659 SD_CONF_BSET_BSY_RETRY_COUNT| 660 SD_CONF_BSET_RST_RETRIES| 661 SD_CONF_BSET_MIN_THROTTLE| 662 SD_CONF_BSET_DISKSORT_DISABLED| 663 SD_CONF_BSET_LUN_RESET_ENABLED, 664 &pirus_properties }, 665 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 666 SD_CONF_BSET_NRR_COUNT| 667 SD_CONF_BSET_BSY_RETRY_COUNT| 668 SD_CONF_BSET_RST_RETRIES| 669 SD_CONF_BSET_MIN_THROTTLE| 670 SD_CONF_BSET_DISKSORT_DISABLED| 671 SD_CONF_BSET_LUN_RESET_ENABLED, 672 &pirus_properties }, 673 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 674 SD_CONF_BSET_NRR_COUNT| 675 SD_CONF_BSET_BSY_RETRY_COUNT| 676 SD_CONF_BSET_RST_RETRIES| 677 SD_CONF_BSET_MIN_THROTTLE| 678 SD_CONF_BSET_DISKSORT_DISABLED| 679 SD_CONF_BSET_LUN_RESET_ENABLED, 680 &pirus_properties }, 681 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 682 SD_CONF_BSET_NRR_COUNT| 683 SD_CONF_BSET_BSY_RETRY_COUNT| 684 SD_CONF_BSET_RST_RETRIES| 685 SD_CONF_BSET_MIN_THROTTLE| 686 SD_CONF_BSET_DISKSORT_DISABLED| 687 SD_CONF_BSET_LUN_RESET_ENABLED, 688 &pirus_properties }, 689 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 690 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 691 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 692 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 694 #endif /* fibre or NON-sparc platforms */ 695 #if ((defined(__sparc) && !defined(__fibre)) ||\ 696 (defined(__i386) || defined(__amd64))) 697 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 698 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 699 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 700 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 701 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 702 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 703 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 704 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 710 &symbios_properties }, 711 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 712 &lsi_properties_scsi }, 713 #if defined(__i386) || defined(__amd64) 714 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 715 | SD_CONF_BSET_READSUB_BCD 716 | SD_CONF_BSET_READ_TOC_ADDR_BCD 717 | SD_CONF_BSET_NO_READ_HEADER 718 | SD_CONF_BSET_READ_CD_XD4), NULL }, 719 720 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 #endif /* __i386 || __amd64 */ 726 #endif /* sparc NON-fibre or NON-sparc platforms */ 727 728 #if (defined(SD_PROP_TST)) 729 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 730 | SD_CONF_BSET_CTYPE 731 | SD_CONF_BSET_NRR_COUNT 732 | SD_CONF_BSET_FAB_DEVID 733 | SD_CONF_BSET_NOCACHE 734 | SD_CONF_BSET_BSY_RETRY_COUNT 735 | SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_TRK_BCD 738 | SD_CONF_BSET_READ_TOC_ADDR_BCD 739 | SD_CONF_BSET_NO_READ_HEADER 740 | SD_CONF_BSET_READ_CD_XD4 741 | SD_CONF_BSET_RST_RETRIES 742 | SD_CONF_BSET_RSV_REL_TIME 743 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 744 #endif 745 }; 746 747 static const int sd_disk_table_size = 748 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 749 750 751 752 #define SD_INTERCONNECT_PARALLEL 0 753 #define SD_INTERCONNECT_FABRIC 1 754 #define SD_INTERCONNECT_FIBRE 2 755 #define SD_INTERCONNECT_SSA 3 756 #define SD_INTERCONNECT_SATA 4 757 #define SD_IS_PARALLEL_SCSI(un) \ 758 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 759 #define SD_IS_SERIAL(un) \ 760 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 761 762 /* 763 * Definitions used by device id registration routines 764 */ 765 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 766 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 767 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 768 769 static kmutex_t sd_sense_mutex = {0}; 770 771 /* 772 * Macros for updates of the driver state 773 */ 774 #define New_state(un, s) \ 775 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 776 #define Restore_state(un) \ 777 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 778 779 static struct sd_cdbinfo sd_cdbtab[] = { 780 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 781 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 782 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 783 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 784 }; 785 786 /* 787 * Specifies the number of seconds that must have elapsed since the last 788 * cmd. has completed for a device to be declared idle to the PM framework. 789 */ 790 static int sd_pm_idletime = 1; 791 792 /* 793 * Internal function prototypes 794 */ 795 796 #if (defined(__fibre)) 797 /* 798 * These #defines are to avoid namespace collisions that occur because this 799 * code is currently used to compile two seperate driver modules: sd and ssd. 800 * All function names need to be treated this way (even if declared static) 801 * in order to allow the debugger to resolve the names properly. 802 * It is anticipated that in the near future the ssd module will be obsoleted, 803 * at which time this ugliness should go away. 804 */ 805 #define sd_log_trace ssd_log_trace 806 #define sd_log_info ssd_log_info 807 #define sd_log_err ssd_log_err 808 #define sdprobe ssdprobe 809 #define sdinfo ssdinfo 810 #define sd_prop_op ssd_prop_op 811 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 812 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 813 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 814 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 815 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 816 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 817 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 818 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 819 #define sd_spin_up_unit ssd_spin_up_unit 820 #define sd_enable_descr_sense ssd_enable_descr_sense 821 #define sd_reenable_dsense_task ssd_reenable_dsense_task 822 #define sd_set_mmc_caps ssd_set_mmc_caps 823 #define sd_read_unit_properties ssd_read_unit_properties 824 #define sd_process_sdconf_file ssd_process_sdconf_file 825 #define sd_process_sdconf_table ssd_process_sdconf_table 826 #define sd_sdconf_id_match ssd_sdconf_id_match 827 #define sd_blank_cmp ssd_blank_cmp 828 #define sd_chk_vers1_data ssd_chk_vers1_data 829 #define sd_set_vers1_properties ssd_set_vers1_properties 830 831 #define sd_get_physical_geometry ssd_get_physical_geometry 832 #define sd_get_virtual_geometry ssd_get_virtual_geometry 833 #define sd_update_block_info ssd_update_block_info 834 #define sd_register_devid ssd_register_devid 835 #define sd_get_devid ssd_get_devid 836 #define sd_create_devid ssd_create_devid 837 #define sd_write_deviceid ssd_write_deviceid 838 #define sd_check_vpd_page_support ssd_check_vpd_page_support 839 #define sd_setup_pm ssd_setup_pm 840 #define sd_create_pm_components ssd_create_pm_components 841 #define sd_ddi_suspend ssd_ddi_suspend 842 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 843 #define sd_ddi_resume ssd_ddi_resume 844 #define sd_ddi_pm_resume ssd_ddi_pm_resume 845 #define sdpower ssdpower 846 #define sdattach ssdattach 847 #define sddetach ssddetach 848 #define sd_unit_attach ssd_unit_attach 849 #define sd_unit_detach ssd_unit_detach 850 #define sd_set_unit_attributes ssd_set_unit_attributes 851 #define sd_create_errstats ssd_create_errstats 852 #define sd_set_errstats ssd_set_errstats 853 #define sd_set_pstats ssd_set_pstats 854 #define sddump ssddump 855 #define sd_scsi_poll ssd_scsi_poll 856 #define sd_send_polled_RQS ssd_send_polled_RQS 857 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 858 #define sd_init_event_callbacks ssd_init_event_callbacks 859 #define sd_event_callback ssd_event_callback 860 #define sd_cache_control ssd_cache_control 861 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 862 #define sd_make_device ssd_make_device 863 #define sdopen ssdopen 864 #define sdclose ssdclose 865 #define sd_ready_and_valid ssd_ready_and_valid 866 #define sdmin ssdmin 867 #define sdread ssdread 868 #define sdwrite ssdwrite 869 #define sdaread ssdaread 870 #define sdawrite ssdawrite 871 #define sdstrategy ssdstrategy 872 #define sdioctl ssdioctl 873 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 874 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 875 #define sd_checksum_iostart ssd_checksum_iostart 876 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 877 #define sd_pm_iostart ssd_pm_iostart 878 #define sd_core_iostart ssd_core_iostart 879 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 880 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 881 #define sd_checksum_iodone ssd_checksum_iodone 882 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 883 #define sd_pm_iodone ssd_pm_iodone 884 #define sd_initpkt_for_buf ssd_initpkt_for_buf 885 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 886 #define sd_setup_rw_pkt ssd_setup_rw_pkt 887 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 888 #define sd_buf_iodone ssd_buf_iodone 889 #define sd_uscsi_strategy ssd_uscsi_strategy 890 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 891 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 892 #define sd_uscsi_iodone ssd_uscsi_iodone 893 #define sd_xbuf_strategy ssd_xbuf_strategy 894 #define sd_xbuf_init ssd_xbuf_init 895 #define sd_pm_entry ssd_pm_entry 896 #define sd_pm_exit ssd_pm_exit 897 898 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 899 #define sd_pm_timeout_handler ssd_pm_timeout_handler 900 901 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 902 #define sdintr ssdintr 903 #define sd_start_cmds ssd_start_cmds 904 #define sd_send_scsi_cmd ssd_send_scsi_cmd 905 #define sd_bioclone_alloc ssd_bioclone_alloc 906 #define sd_bioclone_free ssd_bioclone_free 907 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 908 #define sd_shadow_buf_free ssd_shadow_buf_free 909 #define sd_print_transport_rejected_message \ 910 ssd_print_transport_rejected_message 911 #define sd_retry_command ssd_retry_command 912 #define sd_set_retry_bp ssd_set_retry_bp 913 #define sd_send_request_sense_command ssd_send_request_sense_command 914 #define sd_start_retry_command ssd_start_retry_command 915 #define sd_start_direct_priority_command \ 916 ssd_start_direct_priority_command 917 #define sd_return_failed_command ssd_return_failed_command 918 #define sd_return_failed_command_no_restart \ 919 ssd_return_failed_command_no_restart 920 #define sd_return_command ssd_return_command 921 #define sd_sync_with_callback ssd_sync_with_callback 922 #define sdrunout ssdrunout 923 #define sd_mark_rqs_busy ssd_mark_rqs_busy 924 #define sd_mark_rqs_idle ssd_mark_rqs_idle 925 #define sd_reduce_throttle ssd_reduce_throttle 926 #define sd_restore_throttle ssd_restore_throttle 927 #define sd_print_incomplete_msg ssd_print_incomplete_msg 928 #define sd_init_cdb_limits ssd_init_cdb_limits 929 #define sd_pkt_status_good ssd_pkt_status_good 930 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 931 #define sd_pkt_status_busy ssd_pkt_status_busy 932 #define sd_pkt_status_reservation_conflict \ 933 ssd_pkt_status_reservation_conflict 934 #define sd_pkt_status_qfull ssd_pkt_status_qfull 935 #define sd_handle_request_sense ssd_handle_request_sense 936 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 937 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 938 #define sd_validate_sense_data ssd_validate_sense_data 939 #define sd_decode_sense ssd_decode_sense 940 #define sd_print_sense_msg ssd_print_sense_msg 941 #define sd_sense_key_no_sense ssd_sense_key_no_sense 942 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 943 #define sd_sense_key_not_ready ssd_sense_key_not_ready 944 #define sd_sense_key_medium_or_hardware_error \ 945 ssd_sense_key_medium_or_hardware_error 946 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 947 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 948 #define sd_sense_key_fail_command ssd_sense_key_fail_command 949 #define sd_sense_key_blank_check ssd_sense_key_blank_check 950 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 951 #define sd_sense_key_default ssd_sense_key_default 952 #define sd_print_retry_msg ssd_print_retry_msg 953 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 954 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 955 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 956 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 957 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 958 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 959 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 960 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 961 #define sd_pkt_reason_default ssd_pkt_reason_default 962 #define sd_reset_target ssd_reset_target 963 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 964 #define sd_start_stop_unit_task ssd_start_stop_unit_task 965 #define sd_taskq_create ssd_taskq_create 966 #define sd_taskq_delete ssd_taskq_delete 967 #define sd_media_change_task ssd_media_change_task 968 #define sd_handle_mchange ssd_handle_mchange 969 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 970 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 971 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 972 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 973 #define sd_send_scsi_feature_GET_CONFIGURATION \ 974 sd_send_scsi_feature_GET_CONFIGURATION 975 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 976 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 977 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 978 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 979 ssd_send_scsi_PERSISTENT_RESERVE_IN 980 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 981 ssd_send_scsi_PERSISTENT_RESERVE_OUT 982 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 983 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 984 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 985 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 986 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 987 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 988 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 989 #define sd_alloc_rqs ssd_alloc_rqs 990 #define sd_free_rqs ssd_free_rqs 991 #define sd_dump_memory ssd_dump_memory 992 #define sd_get_media_info ssd_get_media_info 993 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 994 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 995 #define sd_setup_next_xfer ssd_setup_next_xfer 996 #define sd_dkio_get_temp ssd_dkio_get_temp 997 #define sd_check_mhd ssd_check_mhd 998 #define sd_mhd_watch_cb ssd_mhd_watch_cb 999 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1000 #define sd_sname ssd_sname 1001 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1002 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1003 #define sd_take_ownership ssd_take_ownership 1004 #define sd_reserve_release ssd_reserve_release 1005 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1006 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1007 #define sd_persistent_reservation_in_read_keys \ 1008 ssd_persistent_reservation_in_read_keys 1009 #define sd_persistent_reservation_in_read_resv \ 1010 ssd_persistent_reservation_in_read_resv 1011 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1012 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1013 #define sd_mhdioc_release ssd_mhdioc_release 1014 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1015 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1016 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1017 #define sr_change_blkmode ssr_change_blkmode 1018 #define sr_change_speed ssr_change_speed 1019 #define sr_atapi_change_speed ssr_atapi_change_speed 1020 #define sr_pause_resume ssr_pause_resume 1021 #define sr_play_msf ssr_play_msf 1022 #define sr_play_trkind ssr_play_trkind 1023 #define sr_read_all_subcodes ssr_read_all_subcodes 1024 #define sr_read_subchannel ssr_read_subchannel 1025 #define sr_read_tocentry ssr_read_tocentry 1026 #define sr_read_tochdr ssr_read_tochdr 1027 #define sr_read_cdda ssr_read_cdda 1028 #define sr_read_cdxa ssr_read_cdxa 1029 #define sr_read_mode1 ssr_read_mode1 1030 #define sr_read_mode2 ssr_read_mode2 1031 #define sr_read_cd_mode2 ssr_read_cd_mode2 1032 #define sr_sector_mode ssr_sector_mode 1033 #define sr_eject ssr_eject 1034 #define sr_ejected ssr_ejected 1035 #define sr_check_wp ssr_check_wp 1036 #define sd_check_media ssd_check_media 1037 #define sd_media_watch_cb ssd_media_watch_cb 1038 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1039 #define sr_volume_ctrl ssr_volume_ctrl 1040 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1041 #define sd_log_page_supported ssd_log_page_supported 1042 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1043 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1044 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1045 #define sd_range_lock ssd_range_lock 1046 #define sd_get_range ssd_get_range 1047 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1048 #define sd_range_unlock ssd_range_unlock 1049 #define sd_read_modify_write_task ssd_read_modify_write_task 1050 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1051 1052 #define sd_iostart_chain ssd_iostart_chain 1053 #define sd_iodone_chain ssd_iodone_chain 1054 #define sd_initpkt_map ssd_initpkt_map 1055 #define sd_destroypkt_map ssd_destroypkt_map 1056 #define sd_chain_type_map ssd_chain_type_map 1057 #define sd_chain_index_map ssd_chain_index_map 1058 1059 #define sd_failfast_flushctl ssd_failfast_flushctl 1060 #define sd_failfast_flushq ssd_failfast_flushq 1061 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1062 1063 #define sd_is_lsi ssd_is_lsi 1064 #define sd_tg_rdwr ssd_tg_rdwr 1065 #define sd_tg_getinfo ssd_tg_getinfo 1066 1067 #endif /* #if (defined(__fibre)) */ 1068 1069 1070 int _init(void); 1071 int _fini(void); 1072 int _info(struct modinfo *modinfop); 1073 1074 /*PRINTFLIKE3*/ 1075 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1076 /*PRINTFLIKE3*/ 1077 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1078 /*PRINTFLIKE3*/ 1079 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1080 1081 static int sdprobe(dev_info_t *devi); 1082 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1083 void **result); 1084 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1085 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1086 1087 /* 1088 * Smart probe for parallel scsi 1089 */ 1090 static void sd_scsi_probe_cache_init(void); 1091 static void sd_scsi_probe_cache_fini(void); 1092 static void sd_scsi_clear_probe_cache(void); 1093 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1094 1095 /* 1096 * Attached luns on target for parallel scsi 1097 */ 1098 static void sd_scsi_target_lun_init(void); 1099 static void sd_scsi_target_lun_fini(void); 1100 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1101 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1102 1103 static int sd_spin_up_unit(struct sd_lun *un); 1104 #ifdef _LP64 1105 static void sd_enable_descr_sense(struct sd_lun *un); 1106 static void sd_reenable_dsense_task(void *arg); 1107 #endif /* _LP64 */ 1108 1109 static void sd_set_mmc_caps(struct sd_lun *un); 1110 1111 static void sd_read_unit_properties(struct sd_lun *un); 1112 static int sd_process_sdconf_file(struct sd_lun *un); 1113 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1114 int *data_list, sd_tunables *values); 1115 static void sd_process_sdconf_table(struct sd_lun *un); 1116 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1117 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1118 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1119 int list_len, char *dataname_ptr); 1120 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1121 sd_tunables *prop_list); 1122 1123 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1124 int reservation_flag); 1125 static int sd_get_devid(struct sd_lun *un); 1126 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1127 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1128 static int sd_write_deviceid(struct sd_lun *un); 1129 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1130 static int sd_check_vpd_page_support(struct sd_lun *un); 1131 1132 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1133 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1134 1135 static int sd_ddi_suspend(dev_info_t *devi); 1136 static int sd_ddi_pm_suspend(struct sd_lun *un); 1137 static int sd_ddi_resume(dev_info_t *devi); 1138 static int sd_ddi_pm_resume(struct sd_lun *un); 1139 static int sdpower(dev_info_t *devi, int component, int level); 1140 1141 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1142 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1143 static int sd_unit_attach(dev_info_t *devi); 1144 static int sd_unit_detach(dev_info_t *devi); 1145 1146 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_errstats(struct sd_lun *un, int instance); 1148 static void sd_set_errstats(struct sd_lun *un); 1149 static void sd_set_pstats(struct sd_lun *un); 1150 1151 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1152 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1153 static int sd_send_polled_RQS(struct sd_lun *un); 1154 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1155 1156 #if (defined(__fibre)) 1157 /* 1158 * Event callbacks (photon) 1159 */ 1160 static void sd_init_event_callbacks(struct sd_lun *un); 1161 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1162 #endif 1163 1164 /* 1165 * Defines for sd_cache_control 1166 */ 1167 1168 #define SD_CACHE_ENABLE 1 1169 #define SD_CACHE_DISABLE 0 1170 #define SD_CACHE_NOCHANGE -1 1171 1172 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1173 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1174 static dev_t sd_make_device(dev_info_t *devi); 1175 1176 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1177 uint64_t capacity); 1178 1179 /* 1180 * Driver entry point functions. 1181 */ 1182 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1183 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1184 static int sd_ready_and_valid(struct sd_lun *un); 1185 1186 static void sdmin(struct buf *bp); 1187 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1188 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1189 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1190 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1191 1192 static int sdstrategy(struct buf *bp); 1193 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1194 1195 /* 1196 * Function prototypes for layering functions in the iostart chain. 1197 */ 1198 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1199 struct buf *bp); 1200 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1201 struct buf *bp); 1202 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1203 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1204 struct buf *bp); 1205 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1206 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 1208 /* 1209 * Function prototypes for layering functions in the iodone chain. 1210 */ 1211 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 1222 /* 1223 * Prototypes for functions to support buf(9S) based IO. 1224 */ 1225 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1226 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1227 static void sd_destroypkt_for_buf(struct buf *); 1228 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1229 struct buf *bp, int flags, 1230 int (*callback)(caddr_t), caddr_t callback_arg, 1231 diskaddr_t lba, uint32_t blockcount); 1232 #if defined(__i386) || defined(__amd64) 1233 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1234 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1235 #endif /* defined(__i386) || defined(__amd64) */ 1236 1237 /* 1238 * Prototypes for functions to support USCSI IO. 1239 */ 1240 static int sd_uscsi_strategy(struct buf *bp); 1241 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_uscsi(struct buf *); 1243 1244 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1245 uchar_t chain_type, void *pktinfop); 1246 1247 static int sd_pm_entry(struct sd_lun *un); 1248 static void sd_pm_exit(struct sd_lun *un); 1249 1250 static void sd_pm_idletimeout_handler(void *arg); 1251 1252 /* 1253 * sd_core internal functions (used at the sd_core_io layer). 1254 */ 1255 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1256 static void sdintr(struct scsi_pkt *pktp); 1257 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1258 1259 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1260 enum uio_seg dataspace, int path_flag); 1261 1262 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1263 daddr_t blkno, int (*func)(struct buf *)); 1264 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1265 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1266 static void sd_bioclone_free(struct buf *bp); 1267 static void sd_shadow_buf_free(struct buf *bp); 1268 1269 static void sd_print_transport_rejected_message(struct sd_lun *un, 1270 struct sd_xbuf *xp, int code); 1271 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1272 void *arg, int code); 1273 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1274 void *arg, int code); 1275 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1276 void *arg, int code); 1277 1278 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1279 int retry_check_flag, 1280 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1281 int c), 1282 void *user_arg, int failure_code, clock_t retry_delay, 1283 void (*statp)(kstat_io_t *)); 1284 1285 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1286 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1287 1288 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1289 struct scsi_pkt *pktp); 1290 static void sd_start_retry_command(void *arg); 1291 static void sd_start_direct_priority_command(void *arg); 1292 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1293 int errcode); 1294 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1295 struct buf *bp, int errcode); 1296 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1297 static void sd_sync_with_callback(struct sd_lun *un); 1298 static int sdrunout(caddr_t arg); 1299 1300 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1301 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1302 1303 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1304 static void sd_restore_throttle(void *arg); 1305 1306 static void sd_init_cdb_limits(struct sd_lun *un); 1307 1308 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 1311 /* 1312 * Error handling functions 1313 */ 1314 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1315 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1317 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1318 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1319 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1321 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 1323 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp); 1329 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 1332 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1333 void *arg, int code); 1334 1335 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1336 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1338 uint8_t *sense_datap, 1339 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_sense_key_not_ready(struct sd_lun *un, 1341 uint8_t *sense_datap, 1342 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1344 uint8_t *sense_datap, 1345 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1346 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_unit_attention(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1356 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_default(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 1361 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1362 void *arg, int flag); 1363 1364 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1377 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1379 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1380 1381 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1382 1383 static void sd_start_stop_unit_callback(void *arg); 1384 static void sd_start_stop_unit_task(void *arg); 1385 1386 static void sd_taskq_create(void); 1387 static void sd_taskq_delete(void); 1388 static void sd_media_change_task(void *arg); 1389 1390 static int sd_handle_mchange(struct sd_lun *un); 1391 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1392 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1393 uint32_t *lbap, int path_flag); 1394 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1395 uint32_t *lbap, int path_flag); 1396 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1397 int path_flag); 1398 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1399 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1400 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1401 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1402 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1403 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1404 uchar_t usr_cmd, uchar_t *usr_bufp); 1405 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1406 struct dk_callback *dkc); 1407 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1408 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1409 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1410 uchar_t *bufaddr, uint_t buflen, int path_flag); 1411 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1412 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1413 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1414 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1415 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1416 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1417 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1418 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1419 size_t buflen, daddr_t start_block, int path_flag); 1420 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1421 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1422 path_flag) 1423 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1424 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1425 path_flag) 1426 1427 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1428 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1429 uint16_t param_ptr, int path_flag); 1430 1431 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1432 static void sd_free_rqs(struct sd_lun *un); 1433 1434 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1435 uchar_t *data, int len, int fmt); 1436 static void sd_panic_for_res_conflict(struct sd_lun *un); 1437 1438 /* 1439 * Disk Ioctl Function Prototypes 1440 */ 1441 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1442 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1444 1445 /* 1446 * Multi-host Ioctl Prototypes 1447 */ 1448 static int sd_check_mhd(dev_t dev, int interval); 1449 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1450 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1451 static char *sd_sname(uchar_t status); 1452 static void sd_mhd_resvd_recover(void *arg); 1453 static void sd_resv_reclaim_thread(); 1454 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1455 static int sd_reserve_release(dev_t dev, int cmd); 1456 static void sd_rmv_resv_reclaim_req(dev_t dev); 1457 static void sd_mhd_reset_notify_cb(caddr_t arg); 1458 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1459 mhioc_inkeys_t *usrp, int flag); 1460 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1461 mhioc_inresvs_t *usrp, int flag); 1462 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1463 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1464 static int sd_mhdioc_release(dev_t dev); 1465 static int sd_mhdioc_register_devid(dev_t dev); 1466 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1467 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1468 1469 /* 1470 * SCSI removable prototypes 1471 */ 1472 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1473 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1474 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_pause_resume(dev_t dev, int mode); 1476 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1477 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1487 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1488 static int sr_eject(dev_t dev); 1489 static void sr_ejected(register struct sd_lun *un); 1490 static int sr_check_wp(dev_t dev); 1491 static int sd_check_media(dev_t dev, enum dkio_state state); 1492 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1493 static void sd_delayed_cv_broadcast(void *arg); 1494 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1496 1497 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1498 1499 /* 1500 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1501 */ 1502 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1503 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1504 static void sd_wm_cache_destructor(void *wm, void *un); 1505 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1506 daddr_t endb, ushort_t typ); 1507 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1508 daddr_t endb); 1509 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1510 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1511 static void sd_read_modify_write_task(void * arg); 1512 static int 1513 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1514 struct buf **bpp); 1515 1516 1517 /* 1518 * Function prototypes for failfast support. 1519 */ 1520 static void sd_failfast_flushq(struct sd_lun *un); 1521 static int sd_failfast_flushq_callback(struct buf *bp); 1522 1523 /* 1524 * Function prototypes to check for lsi devices 1525 */ 1526 static void sd_is_lsi(struct sd_lun *un); 1527 1528 /* 1529 * Function prototypes for x86 support 1530 */ 1531 #if defined(__i386) || defined(__amd64) 1532 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1533 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1534 #endif 1535 1536 1537 /* Function prototypes for cmlb */ 1538 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1539 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1540 1541 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1542 1543 /* 1544 * Constants for failfast support: 1545 * 1546 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1547 * failfast processing being performed. 1548 * 1549 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1550 * failfast processing on all bufs with B_FAILFAST set. 1551 */ 1552 1553 #define SD_FAILFAST_INACTIVE 0 1554 #define SD_FAILFAST_ACTIVE 1 1555 1556 /* 1557 * Bitmask to control behavior of buf(9S) flushes when a transition to 1558 * the failfast state occurs. Optional bits include: 1559 * 1560 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1561 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1562 * be flushed. 1563 * 1564 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1565 * driver, in addition to the regular wait queue. This includes the xbuf 1566 * queues. When clear, only the driver's wait queue will be flushed. 1567 */ 1568 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1569 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1570 1571 /* 1572 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1573 * to flush all queues within the driver. 1574 */ 1575 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1576 1577 1578 /* 1579 * SD Testing Fault Injection 1580 */ 1581 #ifdef SD_FAULT_INJECTION 1582 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1583 static void sd_faultinjection(struct scsi_pkt *pktp); 1584 static void sd_injection_log(char *buf, struct sd_lun *un); 1585 #endif 1586 1587 /* 1588 * Device driver ops vector 1589 */ 1590 static struct cb_ops sd_cb_ops = { 1591 sdopen, /* open */ 1592 sdclose, /* close */ 1593 sdstrategy, /* strategy */ 1594 nodev, /* print */ 1595 sddump, /* dump */ 1596 sdread, /* read */ 1597 sdwrite, /* write */ 1598 sdioctl, /* ioctl */ 1599 nodev, /* devmap */ 1600 nodev, /* mmap */ 1601 nodev, /* segmap */ 1602 nochpoll, /* poll */ 1603 sd_prop_op, /* cb_prop_op */ 1604 0, /* streamtab */ 1605 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1606 CB_REV, /* cb_rev */ 1607 sdaread, /* async I/O read entry point */ 1608 sdawrite /* async I/O write entry point */ 1609 }; 1610 1611 static struct dev_ops sd_ops = { 1612 DEVO_REV, /* devo_rev, */ 1613 0, /* refcnt */ 1614 sdinfo, /* info */ 1615 nulldev, /* identify */ 1616 sdprobe, /* probe */ 1617 sdattach, /* attach */ 1618 sddetach, /* detach */ 1619 nodev, /* reset */ 1620 &sd_cb_ops, /* driver operations */ 1621 NULL, /* bus operations */ 1622 sdpower /* power */ 1623 }; 1624 1625 1626 /* 1627 * This is the loadable module wrapper. 1628 */ 1629 #include <sys/modctl.h> 1630 1631 static struct modldrv modldrv = { 1632 &mod_driverops, /* Type of module. This one is a driver */ 1633 SD_MODULE_NAME, /* Module name. */ 1634 &sd_ops /* driver ops */ 1635 }; 1636 1637 1638 static struct modlinkage modlinkage = { 1639 MODREV_1, 1640 &modldrv, 1641 NULL 1642 }; 1643 1644 static cmlb_tg_ops_t sd_tgops = { 1645 TG_DK_OPS_VERSION_1, 1646 sd_tg_rdwr, 1647 sd_tg_getinfo 1648 }; 1649 1650 static struct scsi_asq_key_strings sd_additional_codes[] = { 1651 0x81, 0, "Logical Unit is Reserved", 1652 0x85, 0, "Audio Address Not Valid", 1653 0xb6, 0, "Media Load Mechanism Failed", 1654 0xB9, 0, "Audio Play Operation Aborted", 1655 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1656 0x53, 2, "Medium removal prevented", 1657 0x6f, 0, "Authentication failed during key exchange", 1658 0x6f, 1, "Key not present", 1659 0x6f, 2, "Key not established", 1660 0x6f, 3, "Read without proper authentication", 1661 0x6f, 4, "Mismatched region to this logical unit", 1662 0x6f, 5, "Region reset count error", 1663 0xffff, 0x0, NULL 1664 }; 1665 1666 1667 /* 1668 * Struct for passing printing information for sense data messages 1669 */ 1670 struct sd_sense_info { 1671 int ssi_severity; 1672 int ssi_pfa_flag; 1673 }; 1674 1675 /* 1676 * Table of function pointers for iostart-side routines. Seperate "chains" 1677 * of layered function calls are formed by placing the function pointers 1678 * sequentially in the desired order. Functions are called according to an 1679 * incrementing table index ordering. The last function in each chain must 1680 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1681 * in the sd_iodone_chain[] array. 1682 * 1683 * Note: It may seem more natural to organize both the iostart and iodone 1684 * functions together, into an array of structures (or some similar 1685 * organization) with a common index, rather than two seperate arrays which 1686 * must be maintained in synchronization. The purpose of this division is 1687 * to achiece improved performance: individual arrays allows for more 1688 * effective cache line utilization on certain platforms. 1689 */ 1690 1691 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1692 1693 1694 static sd_chain_t sd_iostart_chain[] = { 1695 1696 /* Chain for buf IO for disk drive targets (PM enabled) */ 1697 sd_mapblockaddr_iostart, /* Index: 0 */ 1698 sd_pm_iostart, /* Index: 1 */ 1699 sd_core_iostart, /* Index: 2 */ 1700 1701 /* Chain for buf IO for disk drive targets (PM disabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 3 */ 1703 sd_core_iostart, /* Index: 4 */ 1704 1705 /* Chain for buf IO for removable-media targets (PM enabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 5 */ 1707 sd_mapblocksize_iostart, /* Index: 6 */ 1708 sd_pm_iostart, /* Index: 7 */ 1709 sd_core_iostart, /* Index: 8 */ 1710 1711 /* Chain for buf IO for removable-media targets (PM disabled) */ 1712 sd_mapblockaddr_iostart, /* Index: 9 */ 1713 sd_mapblocksize_iostart, /* Index: 10 */ 1714 sd_core_iostart, /* Index: 11 */ 1715 1716 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 12 */ 1718 sd_checksum_iostart, /* Index: 13 */ 1719 sd_pm_iostart, /* Index: 14 */ 1720 sd_core_iostart, /* Index: 15 */ 1721 1722 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 16 */ 1724 sd_checksum_iostart, /* Index: 17 */ 1725 sd_core_iostart, /* Index: 18 */ 1726 1727 /* Chain for USCSI commands (all targets) */ 1728 sd_pm_iostart, /* Index: 19 */ 1729 sd_core_iostart, /* Index: 20 */ 1730 1731 /* Chain for checksumming USCSI commands (all targets) */ 1732 sd_checksum_uscsi_iostart, /* Index: 21 */ 1733 sd_pm_iostart, /* Index: 22 */ 1734 sd_core_iostart, /* Index: 23 */ 1735 1736 /* Chain for "direct" USCSI commands (all targets) */ 1737 sd_core_iostart, /* Index: 24 */ 1738 1739 /* Chain for "direct priority" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 25 */ 1741 }; 1742 1743 /* 1744 * Macros to locate the first function of each iostart chain in the 1745 * sd_iostart_chain[] array. These are located by the index in the array. 1746 */ 1747 #define SD_CHAIN_DISK_IOSTART 0 1748 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1749 #define SD_CHAIN_RMMEDIA_IOSTART 5 1750 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1751 #define SD_CHAIN_CHKSUM_IOSTART 12 1752 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1753 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1754 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1755 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1756 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1757 1758 1759 /* 1760 * Table of function pointers for the iodone-side routines for the driver- 1761 * internal layering mechanism. The calling sequence for iodone routines 1762 * uses a decrementing table index, so the last routine called in a chain 1763 * must be at the lowest array index location for that chain. The last 1764 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1765 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1766 * of the functions in an iodone side chain must correspond to the ordering 1767 * of the iostart routines for that chain. Note that there is no iodone 1768 * side routine that corresponds to sd_core_iostart(), so there is no 1769 * entry in the table for this. 1770 */ 1771 1772 static sd_chain_t sd_iodone_chain[] = { 1773 1774 /* Chain for buf IO for disk drive targets (PM enabled) */ 1775 sd_buf_iodone, /* Index: 0 */ 1776 sd_mapblockaddr_iodone, /* Index: 1 */ 1777 sd_pm_iodone, /* Index: 2 */ 1778 1779 /* Chain for buf IO for disk drive targets (PM disabled) */ 1780 sd_buf_iodone, /* Index: 3 */ 1781 sd_mapblockaddr_iodone, /* Index: 4 */ 1782 1783 /* Chain for buf IO for removable-media targets (PM enabled) */ 1784 sd_buf_iodone, /* Index: 5 */ 1785 sd_mapblockaddr_iodone, /* Index: 6 */ 1786 sd_mapblocksize_iodone, /* Index: 7 */ 1787 sd_pm_iodone, /* Index: 8 */ 1788 1789 /* Chain for buf IO for removable-media targets (PM disabled) */ 1790 sd_buf_iodone, /* Index: 9 */ 1791 sd_mapblockaddr_iodone, /* Index: 10 */ 1792 sd_mapblocksize_iodone, /* Index: 11 */ 1793 1794 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1795 sd_buf_iodone, /* Index: 12 */ 1796 sd_mapblockaddr_iodone, /* Index: 13 */ 1797 sd_checksum_iodone, /* Index: 14 */ 1798 sd_pm_iodone, /* Index: 15 */ 1799 1800 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1801 sd_buf_iodone, /* Index: 16 */ 1802 sd_mapblockaddr_iodone, /* Index: 17 */ 1803 sd_checksum_iodone, /* Index: 18 */ 1804 1805 /* Chain for USCSI commands (non-checksum targets) */ 1806 sd_uscsi_iodone, /* Index: 19 */ 1807 sd_pm_iodone, /* Index: 20 */ 1808 1809 /* Chain for USCSI commands (checksum targets) */ 1810 sd_uscsi_iodone, /* Index: 21 */ 1811 sd_checksum_uscsi_iodone, /* Index: 22 */ 1812 sd_pm_iodone, /* Index: 22 */ 1813 1814 /* Chain for "direct" USCSI commands (all targets) */ 1815 sd_uscsi_iodone, /* Index: 24 */ 1816 1817 /* Chain for "direct priority" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 25 */ 1819 }; 1820 1821 1822 /* 1823 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1824 * each iodone-side chain. These are located by the array index, but as the 1825 * iodone side functions are called in a decrementing-index order, the 1826 * highest index number in each chain must be specified (as these correspond 1827 * to the first function in the iodone chain that will be called by the core 1828 * at IO completion time). 1829 */ 1830 1831 #define SD_CHAIN_DISK_IODONE 2 1832 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1833 #define SD_CHAIN_RMMEDIA_IODONE 8 1834 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1835 #define SD_CHAIN_CHKSUM_IODONE 15 1836 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1837 #define SD_CHAIN_USCSI_CMD_IODONE 20 1838 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1839 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1840 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1841 1842 1843 1844 1845 /* 1846 * Array to map a layering chain index to the appropriate initpkt routine. 1847 * The redundant entries are present so that the index used for accessing 1848 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1849 * with this table as well. 1850 */ 1851 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1852 1853 static sd_initpkt_t sd_initpkt_map[] = { 1854 1855 /* Chain for buf IO for disk drive targets (PM enabled) */ 1856 sd_initpkt_for_buf, /* Index: 0 */ 1857 sd_initpkt_for_buf, /* Index: 1 */ 1858 sd_initpkt_for_buf, /* Index: 2 */ 1859 1860 /* Chain for buf IO for disk drive targets (PM disabled) */ 1861 sd_initpkt_for_buf, /* Index: 3 */ 1862 sd_initpkt_for_buf, /* Index: 4 */ 1863 1864 /* Chain for buf IO for removable-media targets (PM enabled) */ 1865 sd_initpkt_for_buf, /* Index: 5 */ 1866 sd_initpkt_for_buf, /* Index: 6 */ 1867 sd_initpkt_for_buf, /* Index: 7 */ 1868 sd_initpkt_for_buf, /* Index: 8 */ 1869 1870 /* Chain for buf IO for removable-media targets (PM disabled) */ 1871 sd_initpkt_for_buf, /* Index: 9 */ 1872 sd_initpkt_for_buf, /* Index: 10 */ 1873 sd_initpkt_for_buf, /* Index: 11 */ 1874 1875 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1876 sd_initpkt_for_buf, /* Index: 12 */ 1877 sd_initpkt_for_buf, /* Index: 13 */ 1878 sd_initpkt_for_buf, /* Index: 14 */ 1879 sd_initpkt_for_buf, /* Index: 15 */ 1880 1881 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1882 sd_initpkt_for_buf, /* Index: 16 */ 1883 sd_initpkt_for_buf, /* Index: 17 */ 1884 sd_initpkt_for_buf, /* Index: 18 */ 1885 1886 /* Chain for USCSI commands (non-checksum targets) */ 1887 sd_initpkt_for_uscsi, /* Index: 19 */ 1888 sd_initpkt_for_uscsi, /* Index: 20 */ 1889 1890 /* Chain for USCSI commands (checksum targets) */ 1891 sd_initpkt_for_uscsi, /* Index: 21 */ 1892 sd_initpkt_for_uscsi, /* Index: 22 */ 1893 sd_initpkt_for_uscsi, /* Index: 22 */ 1894 1895 /* Chain for "direct" USCSI commands (all targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 24 */ 1897 1898 /* Chain for "direct priority" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 25 */ 1900 1901 }; 1902 1903 1904 /* 1905 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1906 * The redundant entries are present so that the index used for accessing 1907 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1908 * with this table as well. 1909 */ 1910 typedef void (*sd_destroypkt_t)(struct buf *); 1911 1912 static sd_destroypkt_t sd_destroypkt_map[] = { 1913 1914 /* Chain for buf IO for disk drive targets (PM enabled) */ 1915 sd_destroypkt_for_buf, /* Index: 0 */ 1916 sd_destroypkt_for_buf, /* Index: 1 */ 1917 sd_destroypkt_for_buf, /* Index: 2 */ 1918 1919 /* Chain for buf IO for disk drive targets (PM disabled) */ 1920 sd_destroypkt_for_buf, /* Index: 3 */ 1921 sd_destroypkt_for_buf, /* Index: 4 */ 1922 1923 /* Chain for buf IO for removable-media targets (PM enabled) */ 1924 sd_destroypkt_for_buf, /* Index: 5 */ 1925 sd_destroypkt_for_buf, /* Index: 6 */ 1926 sd_destroypkt_for_buf, /* Index: 7 */ 1927 sd_destroypkt_for_buf, /* Index: 8 */ 1928 1929 /* Chain for buf IO for removable-media targets (PM disabled) */ 1930 sd_destroypkt_for_buf, /* Index: 9 */ 1931 sd_destroypkt_for_buf, /* Index: 10 */ 1932 sd_destroypkt_for_buf, /* Index: 11 */ 1933 1934 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1935 sd_destroypkt_for_buf, /* Index: 12 */ 1936 sd_destroypkt_for_buf, /* Index: 13 */ 1937 sd_destroypkt_for_buf, /* Index: 14 */ 1938 sd_destroypkt_for_buf, /* Index: 15 */ 1939 1940 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1941 sd_destroypkt_for_buf, /* Index: 16 */ 1942 sd_destroypkt_for_buf, /* Index: 17 */ 1943 sd_destroypkt_for_buf, /* Index: 18 */ 1944 1945 /* Chain for USCSI commands (non-checksum targets) */ 1946 sd_destroypkt_for_uscsi, /* Index: 19 */ 1947 sd_destroypkt_for_uscsi, /* Index: 20 */ 1948 1949 /* Chain for USCSI commands (checksum targets) */ 1950 sd_destroypkt_for_uscsi, /* Index: 21 */ 1951 sd_destroypkt_for_uscsi, /* Index: 22 */ 1952 sd_destroypkt_for_uscsi, /* Index: 22 */ 1953 1954 /* Chain for "direct" USCSI commands (all targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 24 */ 1956 1957 /* Chain for "direct priority" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 25 */ 1959 1960 }; 1961 1962 1963 1964 /* 1965 * Array to map a layering chain index to the appropriate chain "type". 1966 * The chain type indicates a specific property/usage of the chain. 1967 * The redundant entries are present so that the index used for accessing 1968 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1969 * with this table as well. 1970 */ 1971 1972 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1973 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1974 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1975 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1976 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1977 /* (for error recovery) */ 1978 1979 static int sd_chain_type_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 SD_CHAIN_BUFIO, /* Index: 0 */ 1983 SD_CHAIN_BUFIO, /* Index: 1 */ 1984 SD_CHAIN_BUFIO, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 3 */ 1988 SD_CHAIN_BUFIO, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 5 */ 1992 SD_CHAIN_BUFIO, /* Index: 6 */ 1993 SD_CHAIN_BUFIO, /* Index: 7 */ 1994 SD_CHAIN_BUFIO, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 SD_CHAIN_BUFIO, /* Index: 9 */ 1998 SD_CHAIN_BUFIO, /* Index: 10 */ 1999 SD_CHAIN_BUFIO, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 12 */ 2003 SD_CHAIN_BUFIO, /* Index: 13 */ 2004 SD_CHAIN_BUFIO, /* Index: 14 */ 2005 SD_CHAIN_BUFIO, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 16 */ 2009 SD_CHAIN_BUFIO, /* Index: 17 */ 2010 SD_CHAIN_BUFIO, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 SD_CHAIN_USCSI, /* Index: 19 */ 2014 SD_CHAIN_USCSI, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 SD_CHAIN_USCSI, /* Index: 21 */ 2018 SD_CHAIN_USCSI, /* Index: 22 */ 2019 SD_CHAIN_USCSI, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 SD_CHAIN_DIRECT, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2026 }; 2027 2028 2029 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2030 #define SD_IS_BUFIO(xp) \ 2031 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2032 2033 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2034 #define SD_IS_DIRECT_PRIORITY(xp) \ 2035 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2036 2037 2038 2039 /* 2040 * Struct, array, and macros to map a specific chain to the appropriate 2041 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2042 * 2043 * The sd_chain_index_map[] array is used at attach time to set the various 2044 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2045 * chain to be used with the instance. This allows different instances to use 2046 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2047 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2048 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2049 * dynamically & without the use of locking; and (2) a layer may update the 2050 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2051 * to allow for deferred processing of an IO within the same chain from a 2052 * different execution context. 2053 */ 2054 2055 struct sd_chain_index { 2056 int sci_iostart_index; 2057 int sci_iodone_index; 2058 }; 2059 2060 static struct sd_chain_index sd_chain_index_map[] = { 2061 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2062 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2063 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2064 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2065 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2066 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2067 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2068 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2069 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2070 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2071 }; 2072 2073 2074 /* 2075 * The following are indexes into the sd_chain_index_map[] array. 2076 */ 2077 2078 /* un->un_buf_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_DISK 0 2080 #define SD_CHAIN_INFO_DISK_NO_PM 1 2081 #define SD_CHAIN_INFO_RMMEDIA 2 2082 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2083 #define SD_CHAIN_INFO_CHKSUM 4 2084 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2085 2086 /* un->un_uscsi_chain_type must be set to one of these */ 2087 #define SD_CHAIN_INFO_USCSI_CMD 6 2088 /* USCSI with PM disabled is the same as DIRECT */ 2089 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2090 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2091 2092 /* un->un_direct_chain_type must be set to one of these */ 2093 #define SD_CHAIN_INFO_DIRECT_CMD 8 2094 2095 /* un->un_priority_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2097 2098 /* size for devid inquiries */ 2099 #define MAX_INQUIRY_SIZE 0xF0 2100 2101 /* 2102 * Macros used by functions to pass a given buf(9S) struct along to the 2103 * next function in the layering chain for further processing. 2104 * 2105 * In the following macros, passing more than three arguments to the called 2106 * routines causes the optimizer for the SPARC compiler to stop doing tail 2107 * call elimination which results in significant performance degradation. 2108 */ 2109 #define SD_BEGIN_IOSTART(index, un, bp) \ 2110 ((*(sd_iostart_chain[index]))(index, un, bp)) 2111 2112 #define SD_BEGIN_IODONE(index, un, bp) \ 2113 ((*(sd_iodone_chain[index]))(index, un, bp)) 2114 2115 #define SD_NEXT_IOSTART(index, un, bp) \ 2116 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2117 2118 #define SD_NEXT_IODONE(index, un, bp) \ 2119 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2120 2121 /* 2122 * Function: _init 2123 * 2124 * Description: This is the driver _init(9E) entry point. 2125 * 2126 * Return Code: Returns the value from mod_install(9F) or 2127 * ddi_soft_state_init(9F) as appropriate. 2128 * 2129 * Context: Called when driver module loaded. 2130 */ 2131 2132 int 2133 _init(void) 2134 { 2135 int err; 2136 2137 /* establish driver name from module name */ 2138 sd_label = mod_modname(&modlinkage); 2139 2140 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2141 SD_MAXUNIT); 2142 2143 if (err != 0) { 2144 return (err); 2145 } 2146 2147 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2148 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2149 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2150 2151 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2152 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2153 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2154 2155 /* 2156 * it's ok to init here even for fibre device 2157 */ 2158 sd_scsi_probe_cache_init(); 2159 2160 sd_scsi_target_lun_init(); 2161 2162 /* 2163 * Creating taskq before mod_install ensures that all callers (threads) 2164 * that enter the module after a successfull mod_install encounter 2165 * a valid taskq. 2166 */ 2167 sd_taskq_create(); 2168 2169 err = mod_install(&modlinkage); 2170 if (err != 0) { 2171 /* delete taskq if install fails */ 2172 sd_taskq_delete(); 2173 2174 mutex_destroy(&sd_detach_mutex); 2175 mutex_destroy(&sd_log_mutex); 2176 mutex_destroy(&sd_label_mutex); 2177 2178 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2179 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2180 cv_destroy(&sd_tr.srq_inprocess_cv); 2181 2182 sd_scsi_probe_cache_fini(); 2183 2184 sd_scsi_target_lun_fini(); 2185 2186 ddi_soft_state_fini(&sd_state); 2187 return (err); 2188 } 2189 2190 return (err); 2191 } 2192 2193 2194 /* 2195 * Function: _fini 2196 * 2197 * Description: This is the driver _fini(9E) entry point. 2198 * 2199 * Return Code: Returns the value from mod_remove(9F) 2200 * 2201 * Context: Called when driver module is unloaded. 2202 */ 2203 2204 int 2205 _fini(void) 2206 { 2207 int err; 2208 2209 if ((err = mod_remove(&modlinkage)) != 0) { 2210 return (err); 2211 } 2212 2213 sd_taskq_delete(); 2214 2215 mutex_destroy(&sd_detach_mutex); 2216 mutex_destroy(&sd_log_mutex); 2217 mutex_destroy(&sd_label_mutex); 2218 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2219 2220 sd_scsi_probe_cache_fini(); 2221 2222 sd_scsi_target_lun_fini(); 2223 2224 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2225 cv_destroy(&sd_tr.srq_inprocess_cv); 2226 2227 ddi_soft_state_fini(&sd_state); 2228 2229 return (err); 2230 } 2231 2232 2233 /* 2234 * Function: _info 2235 * 2236 * Description: This is the driver _info(9E) entry point. 2237 * 2238 * Arguments: modinfop - pointer to the driver modinfo structure 2239 * 2240 * Return Code: Returns the value from mod_info(9F). 2241 * 2242 * Context: Kernel thread context 2243 */ 2244 2245 int 2246 _info(struct modinfo *modinfop) 2247 { 2248 return (mod_info(&modlinkage, modinfop)); 2249 } 2250 2251 2252 /* 2253 * The following routines implement the driver message logging facility. 2254 * They provide component- and level- based debug output filtering. 2255 * Output may also be restricted to messages for a single instance by 2256 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2257 * to NULL, then messages for all instances are printed. 2258 * 2259 * These routines have been cloned from each other due to the language 2260 * constraints of macros and variable argument list processing. 2261 */ 2262 2263 2264 /* 2265 * Function: sd_log_err 2266 * 2267 * Description: This routine is called by the SD_ERROR macro for debug 2268 * logging of error conditions. 2269 * 2270 * Arguments: comp - driver component being logged 2271 * dev - pointer to driver info structure 2272 * fmt - error string and format to be logged 2273 */ 2274 2275 static void 2276 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2277 { 2278 va_list ap; 2279 dev_info_t *dev; 2280 2281 ASSERT(un != NULL); 2282 dev = SD_DEVINFO(un); 2283 ASSERT(dev != NULL); 2284 2285 /* 2286 * Filter messages based on the global component and level masks. 2287 * Also print if un matches the value of sd_debug_un, or if 2288 * sd_debug_un is set to NULL. 2289 */ 2290 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2291 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2292 mutex_enter(&sd_log_mutex); 2293 va_start(ap, fmt); 2294 (void) vsprintf(sd_log_buf, fmt, ap); 2295 va_end(ap); 2296 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2297 mutex_exit(&sd_log_mutex); 2298 } 2299 #ifdef SD_FAULT_INJECTION 2300 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2301 if (un->sd_injection_mask & comp) { 2302 mutex_enter(&sd_log_mutex); 2303 va_start(ap, fmt); 2304 (void) vsprintf(sd_log_buf, fmt, ap); 2305 va_end(ap); 2306 sd_injection_log(sd_log_buf, un); 2307 mutex_exit(&sd_log_mutex); 2308 } 2309 #endif 2310 } 2311 2312 2313 /* 2314 * Function: sd_log_info 2315 * 2316 * Description: This routine is called by the SD_INFO macro for debug 2317 * logging of general purpose informational conditions. 2318 * 2319 * Arguments: comp - driver component being logged 2320 * dev - pointer to driver info structure 2321 * fmt - info string and format to be logged 2322 */ 2323 2324 static void 2325 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2326 { 2327 va_list ap; 2328 dev_info_t *dev; 2329 2330 ASSERT(un != NULL); 2331 dev = SD_DEVINFO(un); 2332 ASSERT(dev != NULL); 2333 2334 /* 2335 * Filter messages based on the global component and level masks. 2336 * Also print if un matches the value of sd_debug_un, or if 2337 * sd_debug_un is set to NULL. 2338 */ 2339 if ((sd_component_mask & component) && 2340 (sd_level_mask & SD_LOGMASK_INFO) && 2341 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2342 mutex_enter(&sd_log_mutex); 2343 va_start(ap, fmt); 2344 (void) vsprintf(sd_log_buf, fmt, ap); 2345 va_end(ap); 2346 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2347 mutex_exit(&sd_log_mutex); 2348 } 2349 #ifdef SD_FAULT_INJECTION 2350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2351 if (un->sd_injection_mask & component) { 2352 mutex_enter(&sd_log_mutex); 2353 va_start(ap, fmt); 2354 (void) vsprintf(sd_log_buf, fmt, ap); 2355 va_end(ap); 2356 sd_injection_log(sd_log_buf, un); 2357 mutex_exit(&sd_log_mutex); 2358 } 2359 #endif 2360 } 2361 2362 2363 /* 2364 * Function: sd_log_trace 2365 * 2366 * Description: This routine is called by the SD_TRACE macro for debug 2367 * logging of trace conditions (i.e. function entry/exit). 2368 * 2369 * Arguments: comp - driver component being logged 2370 * dev - pointer to driver info structure 2371 * fmt - trace string and format to be logged 2372 */ 2373 2374 static void 2375 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2376 { 2377 va_list ap; 2378 dev_info_t *dev; 2379 2380 ASSERT(un != NULL); 2381 dev = SD_DEVINFO(un); 2382 ASSERT(dev != NULL); 2383 2384 /* 2385 * Filter messages based on the global component and level masks. 2386 * Also print if un matches the value of sd_debug_un, or if 2387 * sd_debug_un is set to NULL. 2388 */ 2389 if ((sd_component_mask & component) && 2390 (sd_level_mask & SD_LOGMASK_TRACE) && 2391 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2392 mutex_enter(&sd_log_mutex); 2393 va_start(ap, fmt); 2394 (void) vsprintf(sd_log_buf, fmt, ap); 2395 va_end(ap); 2396 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2397 mutex_exit(&sd_log_mutex); 2398 } 2399 #ifdef SD_FAULT_INJECTION 2400 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2401 if (un->sd_injection_mask & component) { 2402 mutex_enter(&sd_log_mutex); 2403 va_start(ap, fmt); 2404 (void) vsprintf(sd_log_buf, fmt, ap); 2405 va_end(ap); 2406 sd_injection_log(sd_log_buf, un); 2407 mutex_exit(&sd_log_mutex); 2408 } 2409 #endif 2410 } 2411 2412 2413 /* 2414 * Function: sdprobe 2415 * 2416 * Description: This is the driver probe(9e) entry point function. 2417 * 2418 * Arguments: devi - opaque device info handle 2419 * 2420 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2421 * DDI_PROBE_FAILURE: If the probe failed. 2422 * DDI_PROBE_PARTIAL: If the instance is not present now, 2423 * but may be present in the future. 2424 */ 2425 2426 static int 2427 sdprobe(dev_info_t *devi) 2428 { 2429 struct scsi_device *devp; 2430 int rval; 2431 int instance; 2432 2433 /* 2434 * if it wasn't for pln, sdprobe could actually be nulldev 2435 * in the "__fibre" case. 2436 */ 2437 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2438 return (DDI_PROBE_DONTCARE); 2439 } 2440 2441 devp = ddi_get_driver_private(devi); 2442 2443 if (devp == NULL) { 2444 /* Ooops... nexus driver is mis-configured... */ 2445 return (DDI_PROBE_FAILURE); 2446 } 2447 2448 instance = ddi_get_instance(devi); 2449 2450 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2451 return (DDI_PROBE_PARTIAL); 2452 } 2453 2454 /* 2455 * Call the SCSA utility probe routine to see if we actually 2456 * have a target at this SCSI nexus. 2457 */ 2458 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2459 case SCSIPROBE_EXISTS: 2460 switch (devp->sd_inq->inq_dtype) { 2461 case DTYPE_DIRECT: 2462 rval = DDI_PROBE_SUCCESS; 2463 break; 2464 case DTYPE_RODIRECT: 2465 /* CDs etc. Can be removable media */ 2466 rval = DDI_PROBE_SUCCESS; 2467 break; 2468 case DTYPE_OPTICAL: 2469 /* 2470 * Rewritable optical driver HP115AA 2471 * Can also be removable media 2472 */ 2473 2474 /* 2475 * Do not attempt to bind to DTYPE_OPTICAL if 2476 * pre solaris 9 sparc sd behavior is required 2477 * 2478 * If first time through and sd_dtype_optical_bind 2479 * has not been set in /etc/system check properties 2480 */ 2481 2482 if (sd_dtype_optical_bind < 0) { 2483 sd_dtype_optical_bind = ddi_prop_get_int 2484 (DDI_DEV_T_ANY, devi, 0, 2485 "optical-device-bind", 1); 2486 } 2487 2488 if (sd_dtype_optical_bind == 0) { 2489 rval = DDI_PROBE_FAILURE; 2490 } else { 2491 rval = DDI_PROBE_SUCCESS; 2492 } 2493 break; 2494 2495 case DTYPE_NOTPRESENT: 2496 default: 2497 rval = DDI_PROBE_FAILURE; 2498 break; 2499 } 2500 break; 2501 default: 2502 rval = DDI_PROBE_PARTIAL; 2503 break; 2504 } 2505 2506 /* 2507 * This routine checks for resource allocation prior to freeing, 2508 * so it will take care of the "smart probing" case where a 2509 * scsi_probe() may or may not have been issued and will *not* 2510 * free previously-freed resources. 2511 */ 2512 scsi_unprobe(devp); 2513 return (rval); 2514 } 2515 2516 2517 /* 2518 * Function: sdinfo 2519 * 2520 * Description: This is the driver getinfo(9e) entry point function. 2521 * Given the device number, return the devinfo pointer from 2522 * the scsi_device structure or the instance number 2523 * associated with the dev_t. 2524 * 2525 * Arguments: dip - pointer to device info structure 2526 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2527 * DDI_INFO_DEVT2INSTANCE) 2528 * arg - driver dev_t 2529 * resultp - user buffer for request response 2530 * 2531 * Return Code: DDI_SUCCESS 2532 * DDI_FAILURE 2533 */ 2534 /* ARGSUSED */ 2535 static int 2536 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2537 { 2538 struct sd_lun *un; 2539 dev_t dev; 2540 int instance; 2541 int error; 2542 2543 switch (infocmd) { 2544 case DDI_INFO_DEVT2DEVINFO: 2545 dev = (dev_t)arg; 2546 instance = SDUNIT(dev); 2547 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2548 return (DDI_FAILURE); 2549 } 2550 *result = (void *) SD_DEVINFO(un); 2551 error = DDI_SUCCESS; 2552 break; 2553 case DDI_INFO_DEVT2INSTANCE: 2554 dev = (dev_t)arg; 2555 instance = SDUNIT(dev); 2556 *result = (void *)(uintptr_t)instance; 2557 error = DDI_SUCCESS; 2558 break; 2559 default: 2560 error = DDI_FAILURE; 2561 } 2562 return (error); 2563 } 2564 2565 /* 2566 * Function: sd_prop_op 2567 * 2568 * Description: This is the driver prop_op(9e) entry point function. 2569 * Return the number of blocks for the partition in question 2570 * or forward the request to the property facilities. 2571 * 2572 * Arguments: dev - device number 2573 * dip - pointer to device info structure 2574 * prop_op - property operator 2575 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2576 * name - pointer to property name 2577 * valuep - pointer or address of the user buffer 2578 * lengthp - property length 2579 * 2580 * Return Code: DDI_PROP_SUCCESS 2581 * DDI_PROP_NOT_FOUND 2582 * DDI_PROP_UNDEFINED 2583 * DDI_PROP_NO_MEMORY 2584 * DDI_PROP_BUF_TOO_SMALL 2585 */ 2586 2587 static int 2588 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2589 char *name, caddr_t valuep, int *lengthp) 2590 { 2591 int instance = ddi_get_instance(dip); 2592 struct sd_lun *un; 2593 uint64_t nblocks64; 2594 2595 /* 2596 * Our dynamic properties are all device specific and size oriented. 2597 * Requests issued under conditions where size is valid are passed 2598 * to ddi_prop_op_nblocks with the size information, otherwise the 2599 * request is passed to ddi_prop_op. Size depends on valid geometry. 2600 */ 2601 un = ddi_get_soft_state(sd_state, instance); 2602 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2603 !SD_IS_VALID_LABEL(un)) { 2604 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2605 name, valuep, lengthp)); 2606 } else { 2607 /* get nblocks value */ 2608 ASSERT(!mutex_owned(SD_MUTEX(un))); 2609 2610 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2611 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, 2612 (void *)SD_PATH_DIRECT); 2613 2614 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2615 name, valuep, lengthp, nblocks64)); 2616 } 2617 } 2618 2619 /* 2620 * The following functions are for smart probing: 2621 * sd_scsi_probe_cache_init() 2622 * sd_scsi_probe_cache_fini() 2623 * sd_scsi_clear_probe_cache() 2624 * sd_scsi_probe_with_cache() 2625 */ 2626 2627 /* 2628 * Function: sd_scsi_probe_cache_init 2629 * 2630 * Description: Initializes the probe response cache mutex and head pointer. 2631 * 2632 * Context: Kernel thread context 2633 */ 2634 2635 static void 2636 sd_scsi_probe_cache_init(void) 2637 { 2638 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2639 sd_scsi_probe_cache_head = NULL; 2640 } 2641 2642 2643 /* 2644 * Function: sd_scsi_probe_cache_fini 2645 * 2646 * Description: Frees all resources associated with the probe response cache. 2647 * 2648 * Context: Kernel thread context 2649 */ 2650 2651 static void 2652 sd_scsi_probe_cache_fini(void) 2653 { 2654 struct sd_scsi_probe_cache *cp; 2655 struct sd_scsi_probe_cache *ncp; 2656 2657 /* Clean up our smart probing linked list */ 2658 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2659 ncp = cp->next; 2660 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2661 } 2662 sd_scsi_probe_cache_head = NULL; 2663 mutex_destroy(&sd_scsi_probe_cache_mutex); 2664 } 2665 2666 2667 /* 2668 * Function: sd_scsi_clear_probe_cache 2669 * 2670 * Description: This routine clears the probe response cache. This is 2671 * done when open() returns ENXIO so that when deferred 2672 * attach is attempted (possibly after a device has been 2673 * turned on) we will retry the probe. Since we don't know 2674 * which target we failed to open, we just clear the 2675 * entire cache. 2676 * 2677 * Context: Kernel thread context 2678 */ 2679 2680 static void 2681 sd_scsi_clear_probe_cache(void) 2682 { 2683 struct sd_scsi_probe_cache *cp; 2684 int i; 2685 2686 mutex_enter(&sd_scsi_probe_cache_mutex); 2687 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2688 /* 2689 * Reset all entries to SCSIPROBE_EXISTS. This will 2690 * force probing to be performed the next time 2691 * sd_scsi_probe_with_cache is called. 2692 */ 2693 for (i = 0; i < NTARGETS_WIDE; i++) { 2694 cp->cache[i] = SCSIPROBE_EXISTS; 2695 } 2696 } 2697 mutex_exit(&sd_scsi_probe_cache_mutex); 2698 } 2699 2700 2701 /* 2702 * Function: sd_scsi_probe_with_cache 2703 * 2704 * Description: This routine implements support for a scsi device probe 2705 * with cache. The driver maintains a cache of the target 2706 * responses to scsi probes. If we get no response from a 2707 * target during a probe inquiry, we remember that, and we 2708 * avoid additional calls to scsi_probe on non-zero LUNs 2709 * on the same target until the cache is cleared. By doing 2710 * so we avoid the 1/4 sec selection timeout for nonzero 2711 * LUNs. lun0 of a target is always probed. 2712 * 2713 * Arguments: devp - Pointer to a scsi_device(9S) structure 2714 * waitfunc - indicates what the allocator routines should 2715 * do when resources are not available. This value 2716 * is passed on to scsi_probe() when that routine 2717 * is called. 2718 * 2719 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2720 * otherwise the value returned by scsi_probe(9F). 2721 * 2722 * Context: Kernel thread context 2723 */ 2724 2725 static int 2726 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2727 { 2728 struct sd_scsi_probe_cache *cp; 2729 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2730 int lun, tgt; 2731 2732 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2733 SCSI_ADDR_PROP_LUN, 0); 2734 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2735 SCSI_ADDR_PROP_TARGET, -1); 2736 2737 /* Make sure caching enabled and target in range */ 2738 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2739 /* do it the old way (no cache) */ 2740 return (scsi_probe(devp, waitfn)); 2741 } 2742 2743 mutex_enter(&sd_scsi_probe_cache_mutex); 2744 2745 /* Find the cache for this scsi bus instance */ 2746 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2747 if (cp->pdip == pdip) { 2748 break; 2749 } 2750 } 2751 2752 /* If we can't find a cache for this pdip, create one */ 2753 if (cp == NULL) { 2754 int i; 2755 2756 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2757 KM_SLEEP); 2758 cp->pdip = pdip; 2759 cp->next = sd_scsi_probe_cache_head; 2760 sd_scsi_probe_cache_head = cp; 2761 for (i = 0; i < NTARGETS_WIDE; i++) { 2762 cp->cache[i] = SCSIPROBE_EXISTS; 2763 } 2764 } 2765 2766 mutex_exit(&sd_scsi_probe_cache_mutex); 2767 2768 /* Recompute the cache for this target if LUN zero */ 2769 if (lun == 0) { 2770 cp->cache[tgt] = SCSIPROBE_EXISTS; 2771 } 2772 2773 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2774 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2775 return (SCSIPROBE_NORESP); 2776 } 2777 2778 /* Do the actual probe; save & return the result */ 2779 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2780 } 2781 2782 2783 /* 2784 * Function: sd_scsi_target_lun_init 2785 * 2786 * Description: Initializes the attached lun chain mutex and head pointer. 2787 * 2788 * Context: Kernel thread context 2789 */ 2790 2791 static void 2792 sd_scsi_target_lun_init(void) 2793 { 2794 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2795 sd_scsi_target_lun_head = NULL; 2796 } 2797 2798 2799 /* 2800 * Function: sd_scsi_target_lun_fini 2801 * 2802 * Description: Frees all resources associated with the attached lun 2803 * chain 2804 * 2805 * Context: Kernel thread context 2806 */ 2807 2808 static void 2809 sd_scsi_target_lun_fini(void) 2810 { 2811 struct sd_scsi_hba_tgt_lun *cp; 2812 struct sd_scsi_hba_tgt_lun *ncp; 2813 2814 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2815 ncp = cp->next; 2816 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2817 } 2818 sd_scsi_target_lun_head = NULL; 2819 mutex_destroy(&sd_scsi_target_lun_mutex); 2820 } 2821 2822 2823 /* 2824 * Function: sd_scsi_get_target_lun_count 2825 * 2826 * Description: This routine will check in the attached lun chain to see 2827 * how many luns are attached on the required SCSI controller 2828 * and target. Currently, some capabilities like tagged queue 2829 * are supported per target based by HBA. So all luns in a 2830 * target have the same capabilities. Based on this assumption, 2831 * sd should only set these capabilities once per target. This 2832 * function is called when sd needs to decide how many luns 2833 * already attached on a target. 2834 * 2835 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2836 * controller device. 2837 * target - The target ID on the controller's SCSI bus. 2838 * 2839 * Return Code: The number of luns attached on the required target and 2840 * controller. 2841 * -1 if target ID is not in parallel SCSI scope or the given 2842 * dip is not in the chain. 2843 * 2844 * Context: Kernel thread context 2845 */ 2846 2847 static int 2848 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2849 { 2850 struct sd_scsi_hba_tgt_lun *cp; 2851 2852 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2853 return (-1); 2854 } 2855 2856 mutex_enter(&sd_scsi_target_lun_mutex); 2857 2858 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2859 if (cp->pdip == dip) { 2860 break; 2861 } 2862 } 2863 2864 mutex_exit(&sd_scsi_target_lun_mutex); 2865 2866 if (cp == NULL) { 2867 return (-1); 2868 } 2869 2870 return (cp->nlun[target]); 2871 } 2872 2873 2874 /* 2875 * Function: sd_scsi_update_lun_on_target 2876 * 2877 * Description: This routine is used to update the attached lun chain when a 2878 * lun is attached or detached on a target. 2879 * 2880 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2881 * controller device. 2882 * target - The target ID on the controller's SCSI bus. 2883 * flag - Indicate the lun is attached or detached. 2884 * 2885 * Context: Kernel thread context 2886 */ 2887 2888 static void 2889 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2890 { 2891 struct sd_scsi_hba_tgt_lun *cp; 2892 2893 mutex_enter(&sd_scsi_target_lun_mutex); 2894 2895 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2896 if (cp->pdip == dip) { 2897 break; 2898 } 2899 } 2900 2901 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2902 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2903 KM_SLEEP); 2904 cp->pdip = dip; 2905 cp->next = sd_scsi_target_lun_head; 2906 sd_scsi_target_lun_head = cp; 2907 } 2908 2909 mutex_exit(&sd_scsi_target_lun_mutex); 2910 2911 if (cp != NULL) { 2912 if (flag == SD_SCSI_LUN_ATTACH) { 2913 cp->nlun[target] ++; 2914 } else { 2915 cp->nlun[target] --; 2916 } 2917 } 2918 } 2919 2920 2921 /* 2922 * Function: sd_spin_up_unit 2923 * 2924 * Description: Issues the following commands to spin-up the device: 2925 * START STOP UNIT, and INQUIRY. 2926 * 2927 * Arguments: un - driver soft state (unit) structure 2928 * 2929 * Return Code: 0 - success 2930 * EIO - failure 2931 * EACCES - reservation conflict 2932 * 2933 * Context: Kernel thread context 2934 */ 2935 2936 static int 2937 sd_spin_up_unit(struct sd_lun *un) 2938 { 2939 size_t resid = 0; 2940 int has_conflict = FALSE; 2941 uchar_t *bufaddr; 2942 2943 ASSERT(un != NULL); 2944 2945 /* 2946 * Send a throwaway START UNIT command. 2947 * 2948 * If we fail on this, we don't care presently what precisely 2949 * is wrong. EMC's arrays will also fail this with a check 2950 * condition (0x2/0x4/0x3) if the device is "inactive," but 2951 * we don't want to fail the attach because it may become 2952 * "active" later. 2953 */ 2954 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2955 == EACCES) 2956 has_conflict = TRUE; 2957 2958 /* 2959 * Send another INQUIRY command to the target. This is necessary for 2960 * non-removable media direct access devices because their INQUIRY data 2961 * may not be fully qualified until they are spun up (perhaps via the 2962 * START command above). Note: This seems to be needed for some 2963 * legacy devices only.) The INQUIRY command should succeed even if a 2964 * Reservation Conflict is present. 2965 */ 2966 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2967 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2968 kmem_free(bufaddr, SUN_INQSIZE); 2969 return (EIO); 2970 } 2971 2972 /* 2973 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2974 * Note that this routine does not return a failure here even if the 2975 * INQUIRY command did not return any data. This is a legacy behavior. 2976 */ 2977 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2978 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2979 } 2980 2981 kmem_free(bufaddr, SUN_INQSIZE); 2982 2983 /* If we hit a reservation conflict above, tell the caller. */ 2984 if (has_conflict == TRUE) { 2985 return (EACCES); 2986 } 2987 2988 return (0); 2989 } 2990 2991 #ifdef _LP64 2992 /* 2993 * Function: sd_enable_descr_sense 2994 * 2995 * Description: This routine attempts to select descriptor sense format 2996 * using the Control mode page. Devices that support 64 bit 2997 * LBAs (for >2TB luns) should also implement descriptor 2998 * sense data so we will call this function whenever we see 2999 * a lun larger than 2TB. If for some reason the device 3000 * supports 64 bit LBAs but doesn't support descriptor sense 3001 * presumably the mode select will fail. Everything will 3002 * continue to work normally except that we will not get 3003 * complete sense data for commands that fail with an LBA 3004 * larger than 32 bits. 3005 * 3006 * Arguments: un - driver soft state (unit) structure 3007 * 3008 * Context: Kernel thread context only 3009 */ 3010 3011 static void 3012 sd_enable_descr_sense(struct sd_lun *un) 3013 { 3014 uchar_t *header; 3015 struct mode_control_scsi3 *ctrl_bufp; 3016 size_t buflen; 3017 size_t bd_len; 3018 3019 /* 3020 * Read MODE SENSE page 0xA, Control Mode Page 3021 */ 3022 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3023 sizeof (struct mode_control_scsi3); 3024 header = kmem_zalloc(buflen, KM_SLEEP); 3025 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3026 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3027 SD_ERROR(SD_LOG_COMMON, un, 3028 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3029 goto eds_exit; 3030 } 3031 3032 /* 3033 * Determine size of Block Descriptors in order to locate 3034 * the mode page data. ATAPI devices return 0, SCSI devices 3035 * should return MODE_BLK_DESC_LENGTH. 3036 */ 3037 bd_len = ((struct mode_header *)header)->bdesc_length; 3038 3039 ctrl_bufp = (struct mode_control_scsi3 *) 3040 (header + MODE_HEADER_LENGTH + bd_len); 3041 3042 /* 3043 * Clear PS bit for MODE SELECT 3044 */ 3045 ctrl_bufp->mode_page.ps = 0; 3046 3047 /* 3048 * Set D_SENSE to enable descriptor sense format. 3049 */ 3050 ctrl_bufp->d_sense = 1; 3051 3052 /* 3053 * Use MODE SELECT to commit the change to the D_SENSE bit 3054 */ 3055 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3056 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3057 SD_INFO(SD_LOG_COMMON, un, 3058 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3059 goto eds_exit; 3060 } 3061 3062 eds_exit: 3063 kmem_free(header, buflen); 3064 } 3065 3066 /* 3067 * Function: sd_reenable_dsense_task 3068 * 3069 * Description: Re-enable descriptor sense after device or bus reset 3070 * 3071 * Context: Executes in a taskq() thread context 3072 */ 3073 static void 3074 sd_reenable_dsense_task(void *arg) 3075 { 3076 struct sd_lun *un = arg; 3077 3078 ASSERT(un != NULL); 3079 sd_enable_descr_sense(un); 3080 } 3081 #endif /* _LP64 */ 3082 3083 /* 3084 * Function: sd_set_mmc_caps 3085 * 3086 * Description: This routine determines if the device is MMC compliant and if 3087 * the device supports CDDA via a mode sense of the CDVD 3088 * capabilities mode page. Also checks if the device is a 3089 * dvdram writable device. 3090 * 3091 * Arguments: un - driver soft state (unit) structure 3092 * 3093 * Context: Kernel thread context only 3094 */ 3095 3096 static void 3097 sd_set_mmc_caps(struct sd_lun *un) 3098 { 3099 struct mode_header_grp2 *sense_mhp; 3100 uchar_t *sense_page; 3101 caddr_t buf; 3102 int bd_len; 3103 int status; 3104 struct uscsi_cmd com; 3105 int rtn; 3106 uchar_t *out_data_rw, *out_data_hd; 3107 uchar_t *rqbuf_rw, *rqbuf_hd; 3108 3109 ASSERT(un != NULL); 3110 3111 /* 3112 * The flags which will be set in this function are - mmc compliant, 3113 * dvdram writable device, cdda support. Initialize them to FALSE 3114 * and if a capability is detected - it will be set to TRUE. 3115 */ 3116 un->un_f_mmc_cap = FALSE; 3117 un->un_f_dvdram_writable_device = FALSE; 3118 un->un_f_cfg_cdda = FALSE; 3119 3120 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3121 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3122 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3123 3124 if (status != 0) { 3125 /* command failed; just return */ 3126 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3127 return; 3128 } 3129 /* 3130 * If the mode sense request for the CDROM CAPABILITIES 3131 * page (0x2A) succeeds the device is assumed to be MMC. 3132 */ 3133 un->un_f_mmc_cap = TRUE; 3134 3135 /* Get to the page data */ 3136 sense_mhp = (struct mode_header_grp2 *)buf; 3137 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3138 sense_mhp->bdesc_length_lo; 3139 if (bd_len > MODE_BLK_DESC_LENGTH) { 3140 /* 3141 * We did not get back the expected block descriptor 3142 * length so we cannot determine if the device supports 3143 * CDDA. However, we still indicate the device is MMC 3144 * according to the successful response to the page 3145 * 0x2A mode sense request. 3146 */ 3147 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3148 "sd_set_mmc_caps: Mode Sense returned " 3149 "invalid block descriptor length\n"); 3150 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3151 return; 3152 } 3153 3154 /* See if read CDDA is supported */ 3155 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3156 bd_len); 3157 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3158 3159 /* See if writing DVD RAM is supported. */ 3160 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3161 if (un->un_f_dvdram_writable_device == TRUE) { 3162 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3163 return; 3164 } 3165 3166 /* 3167 * If the device presents DVD or CD capabilities in the mode 3168 * page, we can return here since a RRD will not have 3169 * these capabilities. 3170 */ 3171 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3172 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3173 return; 3174 } 3175 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3176 3177 /* 3178 * If un->un_f_dvdram_writable_device is still FALSE, 3179 * check for a Removable Rigid Disk (RRD). A RRD 3180 * device is identified by the features RANDOM_WRITABLE and 3181 * HARDWARE_DEFECT_MANAGEMENT. 3182 */ 3183 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3184 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3185 3186 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3187 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3188 RANDOM_WRITABLE, SD_PATH_STANDARD); 3189 if (rtn != 0) { 3190 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3191 kmem_free(rqbuf_rw, SENSE_LENGTH); 3192 return; 3193 } 3194 3195 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3196 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3197 3198 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3199 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3200 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3201 if (rtn == 0) { 3202 /* 3203 * We have good information, check for random writable 3204 * and hardware defect features. 3205 */ 3206 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3207 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3208 un->un_f_dvdram_writable_device = TRUE; 3209 } 3210 } 3211 3212 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3213 kmem_free(rqbuf_rw, SENSE_LENGTH); 3214 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3215 kmem_free(rqbuf_hd, SENSE_LENGTH); 3216 } 3217 3218 /* 3219 * Function: sd_check_for_writable_cd 3220 * 3221 * Description: This routine determines if the media in the device is 3222 * writable or not. It uses the get configuration command (0x46) 3223 * to determine if the media is writable 3224 * 3225 * Arguments: un - driver soft state (unit) structure 3226 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3227 * chain and the normal command waitq, or 3228 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3229 * "direct" chain and bypass the normal command 3230 * waitq. 3231 * 3232 * Context: Never called at interrupt context. 3233 */ 3234 3235 static void 3236 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3237 { 3238 struct uscsi_cmd com; 3239 uchar_t *out_data; 3240 uchar_t *rqbuf; 3241 int rtn; 3242 uchar_t *out_data_rw, *out_data_hd; 3243 uchar_t *rqbuf_rw, *rqbuf_hd; 3244 struct mode_header_grp2 *sense_mhp; 3245 uchar_t *sense_page; 3246 caddr_t buf; 3247 int bd_len; 3248 int status; 3249 3250 ASSERT(un != NULL); 3251 ASSERT(mutex_owned(SD_MUTEX(un))); 3252 3253 /* 3254 * Initialize the writable media to false, if configuration info. 3255 * tells us otherwise then only we will set it. 3256 */ 3257 un->un_f_mmc_writable_media = FALSE; 3258 mutex_exit(SD_MUTEX(un)); 3259 3260 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3261 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3262 3263 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3264 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3265 3266 mutex_enter(SD_MUTEX(un)); 3267 if (rtn == 0) { 3268 /* 3269 * We have good information, check for writable DVD. 3270 */ 3271 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3272 un->un_f_mmc_writable_media = TRUE; 3273 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3274 kmem_free(rqbuf, SENSE_LENGTH); 3275 return; 3276 } 3277 } 3278 3279 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3280 kmem_free(rqbuf, SENSE_LENGTH); 3281 3282 /* 3283 * Determine if this is a RRD type device. 3284 */ 3285 mutex_exit(SD_MUTEX(un)); 3286 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3287 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3288 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3289 mutex_enter(SD_MUTEX(un)); 3290 if (status != 0) { 3291 /* command failed; just return */ 3292 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3293 return; 3294 } 3295 3296 /* Get to the page data */ 3297 sense_mhp = (struct mode_header_grp2 *)buf; 3298 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3299 if (bd_len > MODE_BLK_DESC_LENGTH) { 3300 /* 3301 * We did not get back the expected block descriptor length so 3302 * we cannot check the mode page. 3303 */ 3304 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3305 "sd_check_for_writable_cd: Mode Sense returned " 3306 "invalid block descriptor length\n"); 3307 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3308 return; 3309 } 3310 3311 /* 3312 * If the device presents DVD or CD capabilities in the mode 3313 * page, we can return here since a RRD device will not have 3314 * these capabilities. 3315 */ 3316 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3317 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3318 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3319 return; 3320 } 3321 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3322 3323 /* 3324 * If un->un_f_mmc_writable_media is still FALSE, 3325 * check for RRD type media. A RRD device is identified 3326 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3327 */ 3328 mutex_exit(SD_MUTEX(un)); 3329 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3330 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3331 3332 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3333 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3334 RANDOM_WRITABLE, path_flag); 3335 if (rtn != 0) { 3336 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3337 kmem_free(rqbuf_rw, SENSE_LENGTH); 3338 mutex_enter(SD_MUTEX(un)); 3339 return; 3340 } 3341 3342 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3343 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3344 3345 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3346 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3347 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3348 mutex_enter(SD_MUTEX(un)); 3349 if (rtn == 0) { 3350 /* 3351 * We have good information, check for random writable 3352 * and hardware defect features as current. 3353 */ 3354 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3355 (out_data_rw[10] & 0x1) && 3356 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3357 (out_data_hd[10] & 0x1)) { 3358 un->un_f_mmc_writable_media = TRUE; 3359 } 3360 } 3361 3362 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3363 kmem_free(rqbuf_rw, SENSE_LENGTH); 3364 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3365 kmem_free(rqbuf_hd, SENSE_LENGTH); 3366 } 3367 3368 /* 3369 * Function: sd_read_unit_properties 3370 * 3371 * Description: The following implements a property lookup mechanism. 3372 * Properties for particular disks (keyed on vendor, model 3373 * and rev numbers) are sought in the sd.conf file via 3374 * sd_process_sdconf_file(), and if not found there, are 3375 * looked for in a list hardcoded in this driver via 3376 * sd_process_sdconf_table() Once located the properties 3377 * are used to update the driver unit structure. 3378 * 3379 * Arguments: un - driver soft state (unit) structure 3380 */ 3381 3382 static void 3383 sd_read_unit_properties(struct sd_lun *un) 3384 { 3385 /* 3386 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3387 * the "sd-config-list" property (from the sd.conf file) or if 3388 * there was not a match for the inquiry vid/pid. If this event 3389 * occurs the static driver configuration table is searched for 3390 * a match. 3391 */ 3392 ASSERT(un != NULL); 3393 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3394 sd_process_sdconf_table(un); 3395 } 3396 3397 /* check for LSI device */ 3398 sd_is_lsi(un); 3399 3400 3401 } 3402 3403 3404 /* 3405 * Function: sd_process_sdconf_file 3406 * 3407 * Description: Use ddi_getlongprop to obtain the properties from the 3408 * driver's config file (ie, sd.conf) and update the driver 3409 * soft state structure accordingly. 3410 * 3411 * Arguments: un - driver soft state (unit) structure 3412 * 3413 * Return Code: SD_SUCCESS - The properties were successfully set according 3414 * to the driver configuration file. 3415 * SD_FAILURE - The driver config list was not obtained or 3416 * there was no vid/pid match. This indicates that 3417 * the static config table should be used. 3418 * 3419 * The config file has a property, "sd-config-list", which consists of 3420 * one or more duplets as follows: 3421 * 3422 * sd-config-list= 3423 * <duplet>, 3424 * [<duplet>,] 3425 * [<duplet>]; 3426 * 3427 * The structure of each duplet is as follows: 3428 * 3429 * <duplet>:= <vid+pid>,<data-property-name_list> 3430 * 3431 * The first entry of the duplet is the device ID string (the concatenated 3432 * vid & pid; not to be confused with a device_id). This is defined in 3433 * the same way as in the sd_disk_table. 3434 * 3435 * The second part of the duplet is a string that identifies a 3436 * data-property-name-list. The data-property-name-list is defined as 3437 * follows: 3438 * 3439 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3440 * 3441 * The syntax of <data-property-name> depends on the <version> field. 3442 * 3443 * If version = SD_CONF_VERSION_1 we have the following syntax: 3444 * 3445 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3446 * 3447 * where the prop0 value will be used to set prop0 if bit0 set in the 3448 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3449 * 3450 */ 3451 3452 static int 3453 sd_process_sdconf_file(struct sd_lun *un) 3454 { 3455 char *config_list = NULL; 3456 int config_list_len; 3457 int len; 3458 int dupletlen = 0; 3459 char *vidptr; 3460 int vidlen; 3461 char *dnlist_ptr; 3462 char *dataname_ptr; 3463 int dnlist_len; 3464 int dataname_len; 3465 int *data_list; 3466 int data_list_len; 3467 int rval = SD_FAILURE; 3468 int i; 3469 3470 ASSERT(un != NULL); 3471 3472 /* Obtain the configuration list associated with the .conf file */ 3473 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3474 sd_config_list, (caddr_t)&config_list, &config_list_len) 3475 != DDI_PROP_SUCCESS) { 3476 return (SD_FAILURE); 3477 } 3478 3479 /* 3480 * Compare vids in each duplet to the inquiry vid - if a match is 3481 * made, get the data value and update the soft state structure 3482 * accordingly. 3483 * 3484 * Note: This algorithm is complex and difficult to maintain. It should 3485 * be replaced with a more robust implementation. 3486 */ 3487 for (len = config_list_len, vidptr = config_list; len > 0; 3488 vidptr += dupletlen, len -= dupletlen) { 3489 /* 3490 * Note: The assumption here is that each vid entry is on 3491 * a unique line from its associated duplet. 3492 */ 3493 vidlen = dupletlen = (int)strlen(vidptr); 3494 if ((vidlen == 0) || 3495 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3496 dupletlen++; 3497 continue; 3498 } 3499 3500 /* 3501 * dnlist contains 1 or more blank separated 3502 * data-property-name entries 3503 */ 3504 dnlist_ptr = vidptr + vidlen + 1; 3505 dnlist_len = (int)strlen(dnlist_ptr); 3506 dupletlen += dnlist_len + 2; 3507 3508 /* 3509 * Set a pointer for the first data-property-name 3510 * entry in the list 3511 */ 3512 dataname_ptr = dnlist_ptr; 3513 dataname_len = 0; 3514 3515 /* 3516 * Loop through all data-property-name entries in the 3517 * data-property-name-list setting the properties for each. 3518 */ 3519 while (dataname_len < dnlist_len) { 3520 int version; 3521 3522 /* 3523 * Determine the length of the current 3524 * data-property-name entry by indexing until a 3525 * blank or NULL is encountered. When the space is 3526 * encountered reset it to a NULL for compliance 3527 * with ddi_getlongprop(). 3528 */ 3529 for (i = 0; ((dataname_ptr[i] != ' ') && 3530 (dataname_ptr[i] != '\0')); i++) { 3531 ; 3532 } 3533 3534 dataname_len += i; 3535 /* If not null terminated, Make it so */ 3536 if (dataname_ptr[i] == ' ') { 3537 dataname_ptr[i] = '\0'; 3538 } 3539 dataname_len++; 3540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3541 "sd_process_sdconf_file: disk:%s, data:%s\n", 3542 vidptr, dataname_ptr); 3543 3544 /* Get the data list */ 3545 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3546 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3547 != DDI_PROP_SUCCESS) { 3548 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3549 "sd_process_sdconf_file: data property (%s)" 3550 " has no value\n", dataname_ptr); 3551 dataname_ptr = dnlist_ptr + dataname_len; 3552 continue; 3553 } 3554 3555 version = data_list[0]; 3556 3557 if (version == SD_CONF_VERSION_1) { 3558 sd_tunables values; 3559 3560 /* Set the properties */ 3561 if (sd_chk_vers1_data(un, data_list[1], 3562 &data_list[2], data_list_len, dataname_ptr) 3563 == SD_SUCCESS) { 3564 sd_get_tunables_from_conf(un, 3565 data_list[1], &data_list[2], 3566 &values); 3567 sd_set_vers1_properties(un, 3568 data_list[1], &values); 3569 rval = SD_SUCCESS; 3570 } else { 3571 rval = SD_FAILURE; 3572 } 3573 } else { 3574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3575 "data property %s version 0x%x is invalid.", 3576 dataname_ptr, version); 3577 rval = SD_FAILURE; 3578 } 3579 kmem_free(data_list, data_list_len); 3580 dataname_ptr = dnlist_ptr + dataname_len; 3581 } 3582 } 3583 3584 /* free up the memory allocated by ddi_getlongprop */ 3585 if (config_list) { 3586 kmem_free(config_list, config_list_len); 3587 } 3588 3589 return (rval); 3590 } 3591 3592 /* 3593 * Function: sd_get_tunables_from_conf() 3594 * 3595 * 3596 * This function reads the data list from the sd.conf file and pulls 3597 * the values that can have numeric values as arguments and places 3598 * the values in the apropriate sd_tunables member. 3599 * Since the order of the data list members varies across platforms 3600 * This function reads them from the data list in a platform specific 3601 * order and places them into the correct sd_tunable member that is 3602 * a consistant across all platforms. 3603 */ 3604 static void 3605 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3606 sd_tunables *values) 3607 { 3608 int i; 3609 int mask; 3610 3611 bzero(values, sizeof (sd_tunables)); 3612 3613 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3614 3615 mask = 1 << i; 3616 if (mask > flags) { 3617 break; 3618 } 3619 3620 switch (mask & flags) { 3621 case 0: /* This mask bit not set in flags */ 3622 continue; 3623 case SD_CONF_BSET_THROTTLE: 3624 values->sdt_throttle = data_list[i]; 3625 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3626 "sd_get_tunables_from_conf: throttle = %d\n", 3627 values->sdt_throttle); 3628 break; 3629 case SD_CONF_BSET_CTYPE: 3630 values->sdt_ctype = data_list[i]; 3631 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3632 "sd_get_tunables_from_conf: ctype = %d\n", 3633 values->sdt_ctype); 3634 break; 3635 case SD_CONF_BSET_NRR_COUNT: 3636 values->sdt_not_rdy_retries = data_list[i]; 3637 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3638 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3639 values->sdt_not_rdy_retries); 3640 break; 3641 case SD_CONF_BSET_BSY_RETRY_COUNT: 3642 values->sdt_busy_retries = data_list[i]; 3643 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3644 "sd_get_tunables_from_conf: busy_retries = %d\n", 3645 values->sdt_busy_retries); 3646 break; 3647 case SD_CONF_BSET_RST_RETRIES: 3648 values->sdt_reset_retries = data_list[i]; 3649 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3650 "sd_get_tunables_from_conf: reset_retries = %d\n", 3651 values->sdt_reset_retries); 3652 break; 3653 case SD_CONF_BSET_RSV_REL_TIME: 3654 values->sdt_reserv_rel_time = data_list[i]; 3655 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3656 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3657 values->sdt_reserv_rel_time); 3658 break; 3659 case SD_CONF_BSET_MIN_THROTTLE: 3660 values->sdt_min_throttle = data_list[i]; 3661 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3662 "sd_get_tunables_from_conf: min_throttle = %d\n", 3663 values->sdt_min_throttle); 3664 break; 3665 case SD_CONF_BSET_DISKSORT_DISABLED: 3666 values->sdt_disk_sort_dis = data_list[i]; 3667 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3668 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3669 values->sdt_disk_sort_dis); 3670 break; 3671 case SD_CONF_BSET_LUN_RESET_ENABLED: 3672 values->sdt_lun_reset_enable = data_list[i]; 3673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3674 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3675 "\n", values->sdt_lun_reset_enable); 3676 break; 3677 } 3678 } 3679 } 3680 3681 /* 3682 * Function: sd_process_sdconf_table 3683 * 3684 * Description: Search the static configuration table for a match on the 3685 * inquiry vid/pid and update the driver soft state structure 3686 * according to the table property values for the device. 3687 * 3688 * The form of a configuration table entry is: 3689 * <vid+pid>,<flags>,<property-data> 3690 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3691 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3692 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3693 * 3694 * Arguments: un - driver soft state (unit) structure 3695 */ 3696 3697 static void 3698 sd_process_sdconf_table(struct sd_lun *un) 3699 { 3700 char *id = NULL; 3701 int table_index; 3702 int idlen; 3703 3704 ASSERT(un != NULL); 3705 for (table_index = 0; table_index < sd_disk_table_size; 3706 table_index++) { 3707 id = sd_disk_table[table_index].device_id; 3708 idlen = strlen(id); 3709 if (idlen == 0) { 3710 continue; 3711 } 3712 3713 /* 3714 * The static configuration table currently does not 3715 * implement version 10 properties. Additionally, 3716 * multiple data-property-name entries are not 3717 * implemented in the static configuration table. 3718 */ 3719 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3720 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3721 "sd_process_sdconf_table: disk %s\n", id); 3722 sd_set_vers1_properties(un, 3723 sd_disk_table[table_index].flags, 3724 sd_disk_table[table_index].properties); 3725 break; 3726 } 3727 } 3728 } 3729 3730 3731 /* 3732 * Function: sd_sdconf_id_match 3733 * 3734 * Description: This local function implements a case sensitive vid/pid 3735 * comparison as well as the boundary cases of wild card and 3736 * multiple blanks. 3737 * 3738 * Note: An implicit assumption made here is that the scsi 3739 * inquiry structure will always keep the vid, pid and 3740 * revision strings in consecutive sequence, so they can be 3741 * read as a single string. If this assumption is not the 3742 * case, a separate string, to be used for the check, needs 3743 * to be built with these strings concatenated. 3744 * 3745 * Arguments: un - driver soft state (unit) structure 3746 * id - table or config file vid/pid 3747 * idlen - length of the vid/pid (bytes) 3748 * 3749 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3750 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3751 */ 3752 3753 static int 3754 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3755 { 3756 struct scsi_inquiry *sd_inq; 3757 int rval = SD_SUCCESS; 3758 3759 ASSERT(un != NULL); 3760 sd_inq = un->un_sd->sd_inq; 3761 ASSERT(id != NULL); 3762 3763 /* 3764 * We use the inq_vid as a pointer to a buffer containing the 3765 * vid and pid and use the entire vid/pid length of the table 3766 * entry for the comparison. This works because the inq_pid 3767 * data member follows inq_vid in the scsi_inquiry structure. 3768 */ 3769 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3770 /* 3771 * The user id string is compared to the inquiry vid/pid 3772 * using a case insensitive comparison and ignoring 3773 * multiple spaces. 3774 */ 3775 rval = sd_blank_cmp(un, id, idlen); 3776 if (rval != SD_SUCCESS) { 3777 /* 3778 * User id strings that start and end with a "*" 3779 * are a special case. These do not have a 3780 * specific vendor, and the product string can 3781 * appear anywhere in the 16 byte PID portion of 3782 * the inquiry data. This is a simple strstr() 3783 * type search for the user id in the inquiry data. 3784 */ 3785 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3786 char *pidptr = &id[1]; 3787 int i; 3788 int j; 3789 int pidstrlen = idlen - 2; 3790 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3791 pidstrlen; 3792 3793 if (j < 0) { 3794 return (SD_FAILURE); 3795 } 3796 for (i = 0; i < j; i++) { 3797 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3798 pidptr, pidstrlen) == 0) { 3799 rval = SD_SUCCESS; 3800 break; 3801 } 3802 } 3803 } 3804 } 3805 } 3806 return (rval); 3807 } 3808 3809 3810 /* 3811 * Function: sd_blank_cmp 3812 * 3813 * Description: If the id string starts and ends with a space, treat 3814 * multiple consecutive spaces as equivalent to a single 3815 * space. For example, this causes a sd_disk_table entry 3816 * of " NEC CDROM " to match a device's id string of 3817 * "NEC CDROM". 3818 * 3819 * Note: The success exit condition for this routine is if 3820 * the pointer to the table entry is '\0' and the cnt of 3821 * the inquiry length is zero. This will happen if the inquiry 3822 * string returned by the device is padded with spaces to be 3823 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3824 * SCSI spec states that the inquiry string is to be padded with 3825 * spaces. 3826 * 3827 * Arguments: un - driver soft state (unit) structure 3828 * id - table or config file vid/pid 3829 * idlen - length of the vid/pid (bytes) 3830 * 3831 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3832 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3833 */ 3834 3835 static int 3836 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3837 { 3838 char *p1; 3839 char *p2; 3840 int cnt; 3841 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3842 sizeof (SD_INQUIRY(un)->inq_pid); 3843 3844 ASSERT(un != NULL); 3845 p2 = un->un_sd->sd_inq->inq_vid; 3846 ASSERT(id != NULL); 3847 p1 = id; 3848 3849 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3850 /* 3851 * Note: string p1 is terminated by a NUL but string p2 3852 * isn't. The end of p2 is determined by cnt. 3853 */ 3854 for (;;) { 3855 /* skip over any extra blanks in both strings */ 3856 while ((*p1 != '\0') && (*p1 == ' ')) { 3857 p1++; 3858 } 3859 while ((cnt != 0) && (*p2 == ' ')) { 3860 p2++; 3861 cnt--; 3862 } 3863 3864 /* compare the two strings */ 3865 if ((cnt == 0) || 3866 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3867 break; 3868 } 3869 while ((cnt > 0) && 3870 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3871 p1++; 3872 p2++; 3873 cnt--; 3874 } 3875 } 3876 } 3877 3878 /* return SD_SUCCESS if both strings match */ 3879 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3880 } 3881 3882 3883 /* 3884 * Function: sd_chk_vers1_data 3885 * 3886 * Description: Verify the version 1 device properties provided by the 3887 * user via the configuration file 3888 * 3889 * Arguments: un - driver soft state (unit) structure 3890 * flags - integer mask indicating properties to be set 3891 * prop_list - integer list of property values 3892 * list_len - length of user provided data 3893 * 3894 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3895 * SD_FAILURE - Indicates the user provided data is invalid 3896 */ 3897 3898 static int 3899 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3900 int list_len, char *dataname_ptr) 3901 { 3902 int i; 3903 int mask = 1; 3904 int index = 0; 3905 3906 ASSERT(un != NULL); 3907 3908 /* Check for a NULL property name and list */ 3909 if (dataname_ptr == NULL) { 3910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3911 "sd_chk_vers1_data: NULL data property name."); 3912 return (SD_FAILURE); 3913 } 3914 if (prop_list == NULL) { 3915 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3916 "sd_chk_vers1_data: %s NULL data property list.", 3917 dataname_ptr); 3918 return (SD_FAILURE); 3919 } 3920 3921 /* Display a warning if undefined bits are set in the flags */ 3922 if (flags & ~SD_CONF_BIT_MASK) { 3923 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3924 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3925 "Properties not set.", 3926 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3927 return (SD_FAILURE); 3928 } 3929 3930 /* 3931 * Verify the length of the list by identifying the highest bit set 3932 * in the flags and validating that the property list has a length 3933 * up to the index of this bit. 3934 */ 3935 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3936 if (flags & mask) { 3937 index++; 3938 } 3939 mask = 1 << i; 3940 } 3941 if ((list_len / sizeof (int)) < (index + 2)) { 3942 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3943 "sd_chk_vers1_data: " 3944 "Data property list %s size is incorrect. " 3945 "Properties not set.", dataname_ptr); 3946 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3947 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3948 return (SD_FAILURE); 3949 } 3950 return (SD_SUCCESS); 3951 } 3952 3953 3954 /* 3955 * Function: sd_set_vers1_properties 3956 * 3957 * Description: Set version 1 device properties based on a property list 3958 * retrieved from the driver configuration file or static 3959 * configuration table. Version 1 properties have the format: 3960 * 3961 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3962 * 3963 * where the prop0 value will be used to set prop0 if bit0 3964 * is set in the flags 3965 * 3966 * Arguments: un - driver soft state (unit) structure 3967 * flags - integer mask indicating properties to be set 3968 * prop_list - integer list of property values 3969 */ 3970 3971 static void 3972 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3973 { 3974 ASSERT(un != NULL); 3975 3976 /* 3977 * Set the flag to indicate cache is to be disabled. An attempt 3978 * to disable the cache via sd_cache_control() will be made 3979 * later during attach once the basic initialization is complete. 3980 */ 3981 if (flags & SD_CONF_BSET_NOCACHE) { 3982 un->un_f_opt_disable_cache = TRUE; 3983 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3984 "sd_set_vers1_properties: caching disabled flag set\n"); 3985 } 3986 3987 /* CD-specific configuration parameters */ 3988 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3989 un->un_f_cfg_playmsf_bcd = TRUE; 3990 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3991 "sd_set_vers1_properties: playmsf_bcd set\n"); 3992 } 3993 if (flags & SD_CONF_BSET_READSUB_BCD) { 3994 un->un_f_cfg_readsub_bcd = TRUE; 3995 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3996 "sd_set_vers1_properties: readsub_bcd set\n"); 3997 } 3998 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3999 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4001 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4002 } 4003 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4004 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4005 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4006 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4007 } 4008 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4009 un->un_f_cfg_no_read_header = TRUE; 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4011 "sd_set_vers1_properties: no_read_header set\n"); 4012 } 4013 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4014 un->un_f_cfg_read_cd_xd4 = TRUE; 4015 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4016 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4017 } 4018 4019 /* Support for devices which do not have valid/unique serial numbers */ 4020 if (flags & SD_CONF_BSET_FAB_DEVID) { 4021 un->un_f_opt_fab_devid = TRUE; 4022 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4023 "sd_set_vers1_properties: fab_devid bit set\n"); 4024 } 4025 4026 /* Support for user throttle configuration */ 4027 if (flags & SD_CONF_BSET_THROTTLE) { 4028 ASSERT(prop_list != NULL); 4029 un->un_saved_throttle = un->un_throttle = 4030 prop_list->sdt_throttle; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_set_vers1_properties: throttle set to %d\n", 4033 prop_list->sdt_throttle); 4034 } 4035 4036 /* Set the per disk retry count according to the conf file or table. */ 4037 if (flags & SD_CONF_BSET_NRR_COUNT) { 4038 ASSERT(prop_list != NULL); 4039 if (prop_list->sdt_not_rdy_retries) { 4040 un->un_notready_retry_count = 4041 prop_list->sdt_not_rdy_retries; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: not ready retry count" 4044 " set to %d\n", un->un_notready_retry_count); 4045 } 4046 } 4047 4048 /* The controller type is reported for generic disk driver ioctls */ 4049 if (flags & SD_CONF_BSET_CTYPE) { 4050 ASSERT(prop_list != NULL); 4051 switch (prop_list->sdt_ctype) { 4052 case CTYPE_CDROM: 4053 un->un_ctype = prop_list->sdt_ctype; 4054 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4055 "sd_set_vers1_properties: ctype set to " 4056 "CTYPE_CDROM\n"); 4057 break; 4058 case CTYPE_CCS: 4059 un->un_ctype = prop_list->sdt_ctype; 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4061 "sd_set_vers1_properties: ctype set to " 4062 "CTYPE_CCS\n"); 4063 break; 4064 case CTYPE_ROD: /* RW optical */ 4065 un->un_ctype = prop_list->sdt_ctype; 4066 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4067 "sd_set_vers1_properties: ctype set to " 4068 "CTYPE_ROD\n"); 4069 break; 4070 default: 4071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4072 "sd_set_vers1_properties: Could not set " 4073 "invalid ctype value (%d)", 4074 prop_list->sdt_ctype); 4075 } 4076 } 4077 4078 /* Purple failover timeout */ 4079 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4080 ASSERT(prop_list != NULL); 4081 un->un_busy_retry_count = 4082 prop_list->sdt_busy_retries; 4083 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4084 "sd_set_vers1_properties: " 4085 "busy retry count set to %d\n", 4086 un->un_busy_retry_count); 4087 } 4088 4089 /* Purple reset retry count */ 4090 if (flags & SD_CONF_BSET_RST_RETRIES) { 4091 ASSERT(prop_list != NULL); 4092 un->un_reset_retry_count = 4093 prop_list->sdt_reset_retries; 4094 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4095 "sd_set_vers1_properties: " 4096 "reset retry count set to %d\n", 4097 un->un_reset_retry_count); 4098 } 4099 4100 /* Purple reservation release timeout */ 4101 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4102 ASSERT(prop_list != NULL); 4103 un->un_reserve_release_time = 4104 prop_list->sdt_reserv_rel_time; 4105 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4106 "sd_set_vers1_properties: " 4107 "reservation release timeout set to %d\n", 4108 un->un_reserve_release_time); 4109 } 4110 4111 /* 4112 * Driver flag telling the driver to verify that no commands are pending 4113 * for a device before issuing a Test Unit Ready. This is a workaround 4114 * for a firmware bug in some Seagate eliteI drives. 4115 */ 4116 if (flags & SD_CONF_BSET_TUR_CHECK) { 4117 un->un_f_cfg_tur_check = TRUE; 4118 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4119 "sd_set_vers1_properties: tur queue check set\n"); 4120 } 4121 4122 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4123 un->un_min_throttle = prop_list->sdt_min_throttle; 4124 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4125 "sd_set_vers1_properties: min throttle set to %d\n", 4126 un->un_min_throttle); 4127 } 4128 4129 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4130 un->un_f_disksort_disabled = 4131 (prop_list->sdt_disk_sort_dis != 0) ? 4132 TRUE : FALSE; 4133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4134 "sd_set_vers1_properties: disksort disabled " 4135 "flag set to %d\n", 4136 prop_list->sdt_disk_sort_dis); 4137 } 4138 4139 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4140 un->un_f_lun_reset_enabled = 4141 (prop_list->sdt_lun_reset_enable != 0) ? 4142 TRUE : FALSE; 4143 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4144 "sd_set_vers1_properties: lun reset enabled " 4145 "flag set to %d\n", 4146 prop_list->sdt_lun_reset_enable); 4147 } 4148 4149 /* 4150 * Validate the throttle values. 4151 * If any of the numbers are invalid, set everything to defaults. 4152 */ 4153 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4154 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4155 (un->un_min_throttle > un->un_throttle)) { 4156 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4157 un->un_min_throttle = sd_min_throttle; 4158 } 4159 } 4160 4161 /* 4162 * Function: sd_is_lsi() 4163 * 4164 * Description: Check for lsi devices, step throught the static device 4165 * table to match vid/pid. 4166 * 4167 * Args: un - ptr to sd_lun 4168 * 4169 * Notes: When creating new LSI property, need to add the new LSI property 4170 * to this function. 4171 */ 4172 static void 4173 sd_is_lsi(struct sd_lun *un) 4174 { 4175 char *id = NULL; 4176 int table_index; 4177 int idlen; 4178 void *prop; 4179 4180 ASSERT(un != NULL); 4181 for (table_index = 0; table_index < sd_disk_table_size; 4182 table_index++) { 4183 id = sd_disk_table[table_index].device_id; 4184 idlen = strlen(id); 4185 if (idlen == 0) { 4186 continue; 4187 } 4188 4189 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4190 prop = sd_disk_table[table_index].properties; 4191 if (prop == &lsi_properties || 4192 prop == &lsi_oem_properties || 4193 prop == &lsi_properties_scsi || 4194 prop == &symbios_properties) { 4195 un->un_f_cfg_is_lsi = TRUE; 4196 } 4197 break; 4198 } 4199 } 4200 } 4201 4202 /* 4203 * Function: sd_get_physical_geometry 4204 * 4205 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4206 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4207 * target, and use this information to initialize the physical 4208 * geometry cache specified by pgeom_p. 4209 * 4210 * MODE SENSE is an optional command, so failure in this case 4211 * does not necessarily denote an error. We want to use the 4212 * MODE SENSE commands to derive the physical geometry of the 4213 * device, but if either command fails, the logical geometry is 4214 * used as the fallback for disk label geometry in cmlb. 4215 * 4216 * This requires that un->un_blockcount and un->un_tgt_blocksize 4217 * have already been initialized for the current target and 4218 * that the current values be passed as args so that we don't 4219 * end up ever trying to use -1 as a valid value. This could 4220 * happen if either value is reset while we're not holding 4221 * the mutex. 4222 * 4223 * Arguments: un - driver soft state (unit) structure 4224 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4225 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4226 * to use the USCSI "direct" chain and bypass the normal 4227 * command waitq. 4228 * 4229 * Context: Kernel thread only (can sleep). 4230 */ 4231 4232 static int 4233 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4234 diskaddr_t capacity, int lbasize, int path_flag) 4235 { 4236 struct mode_format *page3p; 4237 struct mode_geometry *page4p; 4238 struct mode_header *headerp; 4239 int sector_size; 4240 int nsect; 4241 int nhead; 4242 int ncyl; 4243 int intrlv; 4244 int spc; 4245 diskaddr_t modesense_capacity; 4246 int rpm; 4247 int bd_len; 4248 int mode_header_length; 4249 uchar_t *p3bufp; 4250 uchar_t *p4bufp; 4251 int cdbsize; 4252 int ret = EIO; 4253 4254 ASSERT(un != NULL); 4255 4256 if (lbasize == 0) { 4257 if (ISCD(un)) { 4258 lbasize = 2048; 4259 } else { 4260 lbasize = un->un_sys_blocksize; 4261 } 4262 } 4263 pgeom_p->g_secsize = (unsigned short)lbasize; 4264 4265 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4266 4267 /* 4268 * Retrieve MODE SENSE page 3 - Format Device Page 4269 */ 4270 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4271 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4272 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4273 != 0) { 4274 SD_ERROR(SD_LOG_COMMON, un, 4275 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4276 goto page3_exit; 4277 } 4278 4279 /* 4280 * Determine size of Block Descriptors in order to locate the mode 4281 * page data. ATAPI devices return 0, SCSI devices should return 4282 * MODE_BLK_DESC_LENGTH. 4283 */ 4284 headerp = (struct mode_header *)p3bufp; 4285 if (un->un_f_cfg_is_atapi == TRUE) { 4286 struct mode_header_grp2 *mhp = 4287 (struct mode_header_grp2 *)headerp; 4288 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4289 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4290 } else { 4291 mode_header_length = MODE_HEADER_LENGTH; 4292 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4293 } 4294 4295 if (bd_len > MODE_BLK_DESC_LENGTH) { 4296 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4297 "received unexpected bd_len of %d, page3\n", bd_len); 4298 goto page3_exit; 4299 } 4300 4301 page3p = (struct mode_format *) 4302 ((caddr_t)headerp + mode_header_length + bd_len); 4303 4304 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4305 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4306 "mode sense pg3 code mismatch %d\n", 4307 page3p->mode_page.code); 4308 goto page3_exit; 4309 } 4310 4311 /* 4312 * Use this physical geometry data only if BOTH MODE SENSE commands 4313 * complete successfully; otherwise, revert to the logical geometry. 4314 * So, we need to save everything in temporary variables. 4315 */ 4316 sector_size = BE_16(page3p->data_bytes_sect); 4317 4318 /* 4319 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4320 */ 4321 if (sector_size == 0) { 4322 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4323 } else { 4324 sector_size &= ~(un->un_sys_blocksize - 1); 4325 } 4326 4327 nsect = BE_16(page3p->sect_track); 4328 intrlv = BE_16(page3p->interleave); 4329 4330 SD_INFO(SD_LOG_COMMON, un, 4331 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4332 SD_INFO(SD_LOG_COMMON, un, 4333 " mode page: %d; nsect: %d; sector size: %d;\n", 4334 page3p->mode_page.code, nsect, sector_size); 4335 SD_INFO(SD_LOG_COMMON, un, 4336 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4337 BE_16(page3p->track_skew), 4338 BE_16(page3p->cylinder_skew)); 4339 4340 4341 /* 4342 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4343 */ 4344 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4345 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4346 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4347 != 0) { 4348 SD_ERROR(SD_LOG_COMMON, un, 4349 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4350 goto page4_exit; 4351 } 4352 4353 /* 4354 * Determine size of Block Descriptors in order to locate the mode 4355 * page data. ATAPI devices return 0, SCSI devices should return 4356 * MODE_BLK_DESC_LENGTH. 4357 */ 4358 headerp = (struct mode_header *)p4bufp; 4359 if (un->un_f_cfg_is_atapi == TRUE) { 4360 struct mode_header_grp2 *mhp = 4361 (struct mode_header_grp2 *)headerp; 4362 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4363 } else { 4364 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4365 } 4366 4367 if (bd_len > MODE_BLK_DESC_LENGTH) { 4368 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4369 "received unexpected bd_len of %d, page4\n", bd_len); 4370 goto page4_exit; 4371 } 4372 4373 page4p = (struct mode_geometry *) 4374 ((caddr_t)headerp + mode_header_length + bd_len); 4375 4376 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4377 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4378 "mode sense pg4 code mismatch %d\n", 4379 page4p->mode_page.code); 4380 goto page4_exit; 4381 } 4382 4383 /* 4384 * Stash the data now, after we know that both commands completed. 4385 */ 4386 4387 4388 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4389 spc = nhead * nsect; 4390 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4391 rpm = BE_16(page4p->rpm); 4392 4393 modesense_capacity = spc * ncyl; 4394 4395 SD_INFO(SD_LOG_COMMON, un, 4396 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4397 SD_INFO(SD_LOG_COMMON, un, 4398 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4399 SD_INFO(SD_LOG_COMMON, un, 4400 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4401 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4402 (void *)pgeom_p, capacity); 4403 4404 /* 4405 * Compensate if the drive's geometry is not rectangular, i.e., 4406 * the product of C * H * S returned by MODE SENSE >= that returned 4407 * by read capacity. This is an idiosyncrasy of the original x86 4408 * disk subsystem. 4409 */ 4410 if (modesense_capacity >= capacity) { 4411 SD_INFO(SD_LOG_COMMON, un, 4412 "sd_get_physical_geometry: adjusting acyl; " 4413 "old: %d; new: %d\n", pgeom_p->g_acyl, 4414 (modesense_capacity - capacity + spc - 1) / spc); 4415 if (sector_size != 0) { 4416 /* 1243403: NEC D38x7 drives don't support sec size */ 4417 pgeom_p->g_secsize = (unsigned short)sector_size; 4418 } 4419 pgeom_p->g_nsect = (unsigned short)nsect; 4420 pgeom_p->g_nhead = (unsigned short)nhead; 4421 pgeom_p->g_capacity = capacity; 4422 pgeom_p->g_acyl = 4423 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4424 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4425 } 4426 4427 pgeom_p->g_rpm = (unsigned short)rpm; 4428 pgeom_p->g_intrlv = (unsigned short)intrlv; 4429 ret = 0; 4430 4431 SD_INFO(SD_LOG_COMMON, un, 4432 "sd_get_physical_geometry: mode sense geometry:\n"); 4433 SD_INFO(SD_LOG_COMMON, un, 4434 " nsect: %d; sector size: %d; interlv: %d\n", 4435 nsect, sector_size, intrlv); 4436 SD_INFO(SD_LOG_COMMON, un, 4437 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4438 nhead, ncyl, rpm, modesense_capacity); 4439 SD_INFO(SD_LOG_COMMON, un, 4440 "sd_get_physical_geometry: (cached)\n"); 4441 SD_INFO(SD_LOG_COMMON, un, 4442 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4443 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4444 pgeom_p->g_nhead, pgeom_p->g_nsect); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4447 pgeom_p->g_secsize, pgeom_p->g_capacity, 4448 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4449 4450 page4_exit: 4451 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4452 page3_exit: 4453 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4454 4455 return (ret); 4456 } 4457 4458 /* 4459 * Function: sd_get_virtual_geometry 4460 * 4461 * Description: Ask the controller to tell us about the target device. 4462 * 4463 * Arguments: un - pointer to softstate 4464 * capacity - disk capacity in #blocks 4465 * lbasize - disk block size in bytes 4466 * 4467 * Context: Kernel thread only 4468 */ 4469 4470 static int 4471 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4472 diskaddr_t capacity, int lbasize) 4473 { 4474 uint_t geombuf; 4475 int spc; 4476 4477 ASSERT(un != NULL); 4478 4479 /* Set sector size, and total number of sectors */ 4480 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4481 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4482 4483 /* Let the HBA tell us its geometry */ 4484 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4485 4486 /* A value of -1 indicates an undefined "geometry" property */ 4487 if (geombuf == (-1)) { 4488 return (EINVAL); 4489 } 4490 4491 /* Initialize the logical geometry cache. */ 4492 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4493 lgeom_p->g_nsect = geombuf & 0xffff; 4494 lgeom_p->g_secsize = un->un_sys_blocksize; 4495 4496 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4497 4498 /* 4499 * Note: The driver originally converted the capacity value from 4500 * target blocks to system blocks. However, the capacity value passed 4501 * to this routine is already in terms of system blocks (this scaling 4502 * is done when the READ CAPACITY command is issued and processed). 4503 * This 'error' may have gone undetected because the usage of g_ncyl 4504 * (which is based upon g_capacity) is very limited within the driver 4505 */ 4506 lgeom_p->g_capacity = capacity; 4507 4508 /* 4509 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4510 * hba may return zero values if the device has been removed. 4511 */ 4512 if (spc == 0) { 4513 lgeom_p->g_ncyl = 0; 4514 } else { 4515 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4516 } 4517 lgeom_p->g_acyl = 0; 4518 4519 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4520 return (0); 4521 4522 } 4523 /* 4524 * Function: sd_update_block_info 4525 * 4526 * Description: Calculate a byte count to sector count bitshift value 4527 * from sector size. 4528 * 4529 * Arguments: un: unit struct. 4530 * lbasize: new target sector size 4531 * capacity: new target capacity, ie. block count 4532 * 4533 * Context: Kernel thread context 4534 */ 4535 4536 static void 4537 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4538 { 4539 if (lbasize != 0) { 4540 un->un_tgt_blocksize = lbasize; 4541 un->un_f_tgt_blocksize_is_valid = TRUE; 4542 } 4543 4544 if (capacity != 0) { 4545 un->un_blockcount = capacity; 4546 un->un_f_blockcount_is_valid = TRUE; 4547 } 4548 } 4549 4550 4551 /* 4552 * Function: sd_register_devid 4553 * 4554 * Description: This routine will obtain the device id information from the 4555 * target, obtain the serial number, and register the device 4556 * id with the ddi framework. 4557 * 4558 * Arguments: devi - the system's dev_info_t for the device. 4559 * un - driver soft state (unit) structure 4560 * reservation_flag - indicates if a reservation conflict 4561 * occurred during attach 4562 * 4563 * Context: Kernel Thread 4564 */ 4565 static void 4566 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4567 { 4568 int rval = 0; 4569 uchar_t *inq80 = NULL; 4570 size_t inq80_len = MAX_INQUIRY_SIZE; 4571 size_t inq80_resid = 0; 4572 uchar_t *inq83 = NULL; 4573 size_t inq83_len = MAX_INQUIRY_SIZE; 4574 size_t inq83_resid = 0; 4575 4576 ASSERT(un != NULL); 4577 ASSERT(mutex_owned(SD_MUTEX(un))); 4578 ASSERT((SD_DEVINFO(un)) == devi); 4579 4580 /* 4581 * This is the case of antiquated Sun disk drives that have the 4582 * FAB_DEVID property set in the disk_table. These drives 4583 * manage the devid's by storing them in last 2 available sectors 4584 * on the drive and have them fabricated by the ddi layer by calling 4585 * ddi_devid_init and passing the DEVID_FAB flag. 4586 */ 4587 if (un->un_f_opt_fab_devid == TRUE) { 4588 /* 4589 * Depending on EINVAL isn't reliable, since a reserved disk 4590 * may result in invalid geometry, so check to make sure a 4591 * reservation conflict did not occur during attach. 4592 */ 4593 if ((sd_get_devid(un) == EINVAL) && 4594 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4595 /* 4596 * The devid is invalid AND there is no reservation 4597 * conflict. Fabricate a new devid. 4598 */ 4599 (void) sd_create_devid(un); 4600 } 4601 4602 /* Register the devid if it exists */ 4603 if (un->un_devid != NULL) { 4604 (void) ddi_devid_register(SD_DEVINFO(un), 4605 un->un_devid); 4606 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4607 "sd_register_devid: Devid Fabricated\n"); 4608 } 4609 return; 4610 } 4611 4612 /* 4613 * We check the availibility of the World Wide Name (0x83) and Unit 4614 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4615 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4616 * 0x83 is availible, that is the best choice. Our next choice is 4617 * 0x80. If neither are availible, we munge the devid from the device 4618 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4619 * to fabricate a devid for non-Sun qualified disks. 4620 */ 4621 if (sd_check_vpd_page_support(un) == 0) { 4622 /* collect page 80 data if available */ 4623 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4624 4625 mutex_exit(SD_MUTEX(un)); 4626 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4627 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4628 0x01, 0x80, &inq80_resid); 4629 4630 if (rval != 0) { 4631 kmem_free(inq80, inq80_len); 4632 inq80 = NULL; 4633 inq80_len = 0; 4634 } 4635 mutex_enter(SD_MUTEX(un)); 4636 } 4637 4638 /* collect page 83 data if available */ 4639 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4640 mutex_exit(SD_MUTEX(un)); 4641 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4642 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4643 0x01, 0x83, &inq83_resid); 4644 4645 if (rval != 0) { 4646 kmem_free(inq83, inq83_len); 4647 inq83 = NULL; 4648 inq83_len = 0; 4649 } 4650 mutex_enter(SD_MUTEX(un)); 4651 } 4652 } 4653 4654 /* encode best devid possible based on data available */ 4655 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4656 (char *)ddi_driver_name(SD_DEVINFO(un)), 4657 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4658 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4659 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4660 4661 /* devid successfully encoded, register devid */ 4662 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4663 4664 } else { 4665 /* 4666 * Unable to encode a devid based on data available. 4667 * This is not a Sun qualified disk. Older Sun disk 4668 * drives that have the SD_FAB_DEVID property 4669 * set in the disk_table and non Sun qualified 4670 * disks are treated in the same manner. These 4671 * drives manage the devid's by storing them in 4672 * last 2 available sectors on the drive and 4673 * have them fabricated by the ddi layer by 4674 * calling ddi_devid_init and passing the 4675 * DEVID_FAB flag. 4676 * Create a fabricate devid only if there's no 4677 * fabricate devid existed. 4678 */ 4679 if (sd_get_devid(un) == EINVAL) { 4680 (void) sd_create_devid(un); 4681 } 4682 un->un_f_opt_fab_devid = TRUE; 4683 4684 /* Register the devid if it exists */ 4685 if (un->un_devid != NULL) { 4686 (void) ddi_devid_register(SD_DEVINFO(un), 4687 un->un_devid); 4688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4689 "sd_register_devid: devid fabricated using " 4690 "ddi framework\n"); 4691 } 4692 } 4693 4694 /* clean up resources */ 4695 if (inq80 != NULL) { 4696 kmem_free(inq80, inq80_len); 4697 } 4698 if (inq83 != NULL) { 4699 kmem_free(inq83, inq83_len); 4700 } 4701 } 4702 4703 4704 4705 /* 4706 * Function: sd_get_devid 4707 * 4708 * Description: This routine will return 0 if a valid device id has been 4709 * obtained from the target and stored in the soft state. If a 4710 * valid device id has not been previously read and stored, a 4711 * read attempt will be made. 4712 * 4713 * Arguments: un - driver soft state (unit) structure 4714 * 4715 * Return Code: 0 if we successfully get the device id 4716 * 4717 * Context: Kernel Thread 4718 */ 4719 4720 static int 4721 sd_get_devid(struct sd_lun *un) 4722 { 4723 struct dk_devid *dkdevid; 4724 ddi_devid_t tmpid; 4725 uint_t *ip; 4726 size_t sz; 4727 diskaddr_t blk; 4728 int status; 4729 int chksum; 4730 int i; 4731 size_t buffer_size; 4732 4733 ASSERT(un != NULL); 4734 ASSERT(mutex_owned(SD_MUTEX(un))); 4735 4736 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4737 un); 4738 4739 if (un->un_devid != NULL) { 4740 return (0); 4741 } 4742 4743 mutex_exit(SD_MUTEX(un)); 4744 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4745 (void *)SD_PATH_DIRECT) != 0) { 4746 mutex_enter(SD_MUTEX(un)); 4747 return (EINVAL); 4748 } 4749 4750 /* 4751 * Read and verify device id, stored in the reserved cylinders at the 4752 * end of the disk. Backup label is on the odd sectors of the last 4753 * track of the last cylinder. Device id will be on track of the next 4754 * to last cylinder. 4755 */ 4756 mutex_enter(SD_MUTEX(un)); 4757 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4758 mutex_exit(SD_MUTEX(un)); 4759 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4760 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4761 SD_PATH_DIRECT); 4762 if (status != 0) { 4763 goto error; 4764 } 4765 4766 /* Validate the revision */ 4767 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4768 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4769 status = EINVAL; 4770 goto error; 4771 } 4772 4773 /* Calculate the checksum */ 4774 chksum = 0; 4775 ip = (uint_t *)dkdevid; 4776 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4777 i++) { 4778 chksum ^= ip[i]; 4779 } 4780 4781 /* Compare the checksums */ 4782 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4783 status = EINVAL; 4784 goto error; 4785 } 4786 4787 /* Validate the device id */ 4788 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4789 status = EINVAL; 4790 goto error; 4791 } 4792 4793 /* 4794 * Store the device id in the driver soft state 4795 */ 4796 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4797 tmpid = kmem_alloc(sz, KM_SLEEP); 4798 4799 mutex_enter(SD_MUTEX(un)); 4800 4801 un->un_devid = tmpid; 4802 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4803 4804 kmem_free(dkdevid, buffer_size); 4805 4806 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4807 4808 return (status); 4809 error: 4810 mutex_enter(SD_MUTEX(un)); 4811 kmem_free(dkdevid, buffer_size); 4812 return (status); 4813 } 4814 4815 4816 /* 4817 * Function: sd_create_devid 4818 * 4819 * Description: This routine will fabricate the device id and write it 4820 * to the disk. 4821 * 4822 * Arguments: un - driver soft state (unit) structure 4823 * 4824 * Return Code: value of the fabricated device id 4825 * 4826 * Context: Kernel Thread 4827 */ 4828 4829 static ddi_devid_t 4830 sd_create_devid(struct sd_lun *un) 4831 { 4832 ASSERT(un != NULL); 4833 4834 /* Fabricate the devid */ 4835 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4836 == DDI_FAILURE) { 4837 return (NULL); 4838 } 4839 4840 /* Write the devid to disk */ 4841 if (sd_write_deviceid(un) != 0) { 4842 ddi_devid_free(un->un_devid); 4843 un->un_devid = NULL; 4844 } 4845 4846 return (un->un_devid); 4847 } 4848 4849 4850 /* 4851 * Function: sd_write_deviceid 4852 * 4853 * Description: This routine will write the device id to the disk 4854 * reserved sector. 4855 * 4856 * Arguments: un - driver soft state (unit) structure 4857 * 4858 * Return Code: EINVAL 4859 * value returned by sd_send_scsi_cmd 4860 * 4861 * Context: Kernel Thread 4862 */ 4863 4864 static int 4865 sd_write_deviceid(struct sd_lun *un) 4866 { 4867 struct dk_devid *dkdevid; 4868 diskaddr_t blk; 4869 uint_t *ip, chksum; 4870 int status; 4871 int i; 4872 4873 ASSERT(mutex_owned(SD_MUTEX(un))); 4874 4875 mutex_exit(SD_MUTEX(un)); 4876 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4877 (void *)SD_PATH_DIRECT) != 0) { 4878 mutex_enter(SD_MUTEX(un)); 4879 return (-1); 4880 } 4881 4882 4883 /* Allocate the buffer */ 4884 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4885 4886 /* Fill in the revision */ 4887 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4888 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4889 4890 /* Copy in the device id */ 4891 mutex_enter(SD_MUTEX(un)); 4892 bcopy(un->un_devid, &dkdevid->dkd_devid, 4893 ddi_devid_sizeof(un->un_devid)); 4894 mutex_exit(SD_MUTEX(un)); 4895 4896 /* Calculate the checksum */ 4897 chksum = 0; 4898 ip = (uint_t *)dkdevid; 4899 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4900 i++) { 4901 chksum ^= ip[i]; 4902 } 4903 4904 /* Fill-in checksum */ 4905 DKD_FORMCHKSUM(chksum, dkdevid); 4906 4907 /* Write the reserved sector */ 4908 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4909 SD_PATH_DIRECT); 4910 4911 kmem_free(dkdevid, un->un_sys_blocksize); 4912 4913 mutex_enter(SD_MUTEX(un)); 4914 return (status); 4915 } 4916 4917 4918 /* 4919 * Function: sd_check_vpd_page_support 4920 * 4921 * Description: This routine sends an inquiry command with the EVPD bit set and 4922 * a page code of 0x00 to the device. It is used to determine which 4923 * vital product pages are availible to find the devid. We are 4924 * looking for pages 0x83 or 0x80. If we return a negative 1, the 4925 * device does not support that command. 4926 * 4927 * Arguments: un - driver soft state (unit) structure 4928 * 4929 * Return Code: 0 - success 4930 * 1 - check condition 4931 * 4932 * Context: This routine can sleep. 4933 */ 4934 4935 static int 4936 sd_check_vpd_page_support(struct sd_lun *un) 4937 { 4938 uchar_t *page_list = NULL; 4939 uchar_t page_length = 0xff; /* Use max possible length */ 4940 uchar_t evpd = 0x01; /* Set the EVPD bit */ 4941 uchar_t page_code = 0x00; /* Supported VPD Pages */ 4942 int rval = 0; 4943 int counter; 4944 4945 ASSERT(un != NULL); 4946 ASSERT(mutex_owned(SD_MUTEX(un))); 4947 4948 mutex_exit(SD_MUTEX(un)); 4949 4950 /* 4951 * We'll set the page length to the maximum to save figuring it out 4952 * with an additional call. 4953 */ 4954 page_list = kmem_zalloc(page_length, KM_SLEEP); 4955 4956 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 4957 page_code, NULL); 4958 4959 mutex_enter(SD_MUTEX(un)); 4960 4961 /* 4962 * Now we must validate that the device accepted the command, as some 4963 * drives do not support it. If the drive does support it, we will 4964 * return 0, and the supported pages will be in un_vpd_page_mask. If 4965 * not, we return -1. 4966 */ 4967 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 4968 /* Loop to find one of the 2 pages we need */ 4969 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 4970 4971 /* 4972 * Pages are returned in ascending order, and 0x83 is what we 4973 * are hoping for. 4974 */ 4975 while ((page_list[counter] <= 0x83) && 4976 (counter <= (page_list[VPD_PAGE_LENGTH] + 4977 VPD_HEAD_OFFSET))) { 4978 /* 4979 * Add 3 because page_list[3] is the number of 4980 * pages minus 3 4981 */ 4982 4983 switch (page_list[counter]) { 4984 case 0x00: 4985 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 4986 break; 4987 case 0x80: 4988 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 4989 break; 4990 case 0x81: 4991 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 4992 break; 4993 case 0x82: 4994 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 4995 break; 4996 case 0x83: 4997 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 4998 break; 4999 } 5000 counter++; 5001 } 5002 5003 } else { 5004 rval = -1; 5005 5006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5007 "sd_check_vpd_page_support: This drive does not implement " 5008 "VPD pages.\n"); 5009 } 5010 5011 kmem_free(page_list, page_length); 5012 5013 return (rval); 5014 } 5015 5016 5017 /* 5018 * Function: sd_setup_pm 5019 * 5020 * Description: Initialize Power Management on the device 5021 * 5022 * Context: Kernel Thread 5023 */ 5024 5025 static void 5026 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5027 { 5028 uint_t log_page_size; 5029 uchar_t *log_page_data; 5030 int rval; 5031 5032 /* 5033 * Since we are called from attach, holding a mutex for 5034 * un is unnecessary. Because some of the routines called 5035 * from here require SD_MUTEX to not be held, assert this 5036 * right up front. 5037 */ 5038 ASSERT(!mutex_owned(SD_MUTEX(un))); 5039 /* 5040 * Since the sd device does not have the 'reg' property, 5041 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5042 * The following code is to tell cpr that this device 5043 * DOES need to be suspended and resumed. 5044 */ 5045 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5046 "pm-hardware-state", "needs-suspend-resume"); 5047 5048 /* 5049 * This complies with the new power management framework 5050 * for certain desktop machines. Create the pm_components 5051 * property as a string array property. 5052 */ 5053 if (un->un_f_pm_supported) { 5054 /* 5055 * not all devices have a motor, try it first. 5056 * some devices may return ILLEGAL REQUEST, some 5057 * will hang 5058 * The following START_STOP_UNIT is used to check if target 5059 * device has a motor. 5060 */ 5061 un->un_f_start_stop_supported = TRUE; 5062 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5063 SD_PATH_DIRECT) != 0) { 5064 un->un_f_start_stop_supported = FALSE; 5065 } 5066 5067 /* 5068 * create pm properties anyways otherwise the parent can't 5069 * go to sleep 5070 */ 5071 (void) sd_create_pm_components(devi, un); 5072 un->un_f_pm_is_enabled = TRUE; 5073 return; 5074 } 5075 5076 if (!un->un_f_log_sense_supported) { 5077 un->un_power_level = SD_SPINDLE_ON; 5078 un->un_f_pm_is_enabled = FALSE; 5079 return; 5080 } 5081 5082 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5083 5084 #ifdef SDDEBUG 5085 if (sd_force_pm_supported) { 5086 /* Force a successful result */ 5087 rval = 1; 5088 } 5089 #endif 5090 5091 /* 5092 * If the start-stop cycle counter log page is not supported 5093 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5094 * then we should not create the pm_components property. 5095 */ 5096 if (rval == -1) { 5097 /* 5098 * Error. 5099 * Reading log sense failed, most likely this is 5100 * an older drive that does not support log sense. 5101 * If this fails auto-pm is not supported. 5102 */ 5103 un->un_power_level = SD_SPINDLE_ON; 5104 un->un_f_pm_is_enabled = FALSE; 5105 5106 } else if (rval == 0) { 5107 /* 5108 * Page not found. 5109 * The start stop cycle counter is implemented as page 5110 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5111 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5112 */ 5113 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5114 /* 5115 * Page found, use this one. 5116 */ 5117 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5118 un->un_f_pm_is_enabled = TRUE; 5119 } else { 5120 /* 5121 * Error or page not found. 5122 * auto-pm is not supported for this device. 5123 */ 5124 un->un_power_level = SD_SPINDLE_ON; 5125 un->un_f_pm_is_enabled = FALSE; 5126 } 5127 } else { 5128 /* 5129 * Page found, use it. 5130 */ 5131 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5132 un->un_f_pm_is_enabled = TRUE; 5133 } 5134 5135 5136 if (un->un_f_pm_is_enabled == TRUE) { 5137 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5138 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5139 5140 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5141 log_page_size, un->un_start_stop_cycle_page, 5142 0x01, 0, SD_PATH_DIRECT); 5143 #ifdef SDDEBUG 5144 if (sd_force_pm_supported) { 5145 /* Force a successful result */ 5146 rval = 0; 5147 } 5148 #endif 5149 5150 /* 5151 * If the Log sense for Page( Start/stop cycle counter page) 5152 * succeeds, then power managment is supported and we can 5153 * enable auto-pm. 5154 */ 5155 if (rval == 0) { 5156 (void) sd_create_pm_components(devi, un); 5157 } else { 5158 un->un_power_level = SD_SPINDLE_ON; 5159 un->un_f_pm_is_enabled = FALSE; 5160 } 5161 5162 kmem_free(log_page_data, log_page_size); 5163 } 5164 } 5165 5166 5167 /* 5168 * Function: sd_create_pm_components 5169 * 5170 * Description: Initialize PM property. 5171 * 5172 * Context: Kernel thread context 5173 */ 5174 5175 static void 5176 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5177 { 5178 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5179 5180 ASSERT(!mutex_owned(SD_MUTEX(un))); 5181 5182 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5183 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5184 /* 5185 * When components are initially created they are idle, 5186 * power up any non-removables. 5187 * Note: the return value of pm_raise_power can't be used 5188 * for determining if PM should be enabled for this device. 5189 * Even if you check the return values and remove this 5190 * property created above, the PM framework will not honor the 5191 * change after the first call to pm_raise_power. Hence, 5192 * removal of that property does not help if pm_raise_power 5193 * fails. In the case of removable media, the start/stop 5194 * will fail if the media is not present. 5195 */ 5196 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5197 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5198 mutex_enter(SD_MUTEX(un)); 5199 un->un_power_level = SD_SPINDLE_ON; 5200 mutex_enter(&un->un_pm_mutex); 5201 /* Set to on and not busy. */ 5202 un->un_pm_count = 0; 5203 } else { 5204 mutex_enter(SD_MUTEX(un)); 5205 un->un_power_level = SD_SPINDLE_OFF; 5206 mutex_enter(&un->un_pm_mutex); 5207 /* Set to off. */ 5208 un->un_pm_count = -1; 5209 } 5210 mutex_exit(&un->un_pm_mutex); 5211 mutex_exit(SD_MUTEX(un)); 5212 } else { 5213 un->un_power_level = SD_SPINDLE_ON; 5214 un->un_f_pm_is_enabled = FALSE; 5215 } 5216 } 5217 5218 5219 /* 5220 * Function: sd_ddi_suspend 5221 * 5222 * Description: Performs system power-down operations. This includes 5223 * setting the drive state to indicate its suspended so 5224 * that no new commands will be accepted. Also, wait for 5225 * all commands that are in transport or queued to a timer 5226 * for retry to complete. All timeout threads are cancelled. 5227 * 5228 * Return Code: DDI_FAILURE or DDI_SUCCESS 5229 * 5230 * Context: Kernel thread context 5231 */ 5232 5233 static int 5234 sd_ddi_suspend(dev_info_t *devi) 5235 { 5236 struct sd_lun *un; 5237 clock_t wait_cmds_complete; 5238 5239 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5240 if (un == NULL) { 5241 return (DDI_FAILURE); 5242 } 5243 5244 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5245 5246 mutex_enter(SD_MUTEX(un)); 5247 5248 /* Return success if the device is already suspended. */ 5249 if (un->un_state == SD_STATE_SUSPENDED) { 5250 mutex_exit(SD_MUTEX(un)); 5251 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5252 "device already suspended, exiting\n"); 5253 return (DDI_SUCCESS); 5254 } 5255 5256 /* Return failure if the device is being used by HA */ 5257 if (un->un_resvd_status & 5258 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5259 mutex_exit(SD_MUTEX(un)); 5260 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5261 "device in use by HA, exiting\n"); 5262 return (DDI_FAILURE); 5263 } 5264 5265 /* 5266 * Return failure if the device is in a resource wait 5267 * or power changing state. 5268 */ 5269 if ((un->un_state == SD_STATE_RWAIT) || 5270 (un->un_state == SD_STATE_PM_CHANGING)) { 5271 mutex_exit(SD_MUTEX(un)); 5272 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5273 "device in resource wait state, exiting\n"); 5274 return (DDI_FAILURE); 5275 } 5276 5277 5278 un->un_save_state = un->un_last_state; 5279 New_state(un, SD_STATE_SUSPENDED); 5280 5281 /* 5282 * Wait for all commands that are in transport or queued to a timer 5283 * for retry to complete. 5284 * 5285 * While waiting, no new commands will be accepted or sent because of 5286 * the new state we set above. 5287 * 5288 * Wait till current operation has completed. If we are in the resource 5289 * wait state (with an intr outstanding) then we need to wait till the 5290 * intr completes and starts the next cmd. We want to wait for 5291 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5292 */ 5293 wait_cmds_complete = ddi_get_lbolt() + 5294 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5295 5296 while (un->un_ncmds_in_transport != 0) { 5297 /* 5298 * Fail if commands do not finish in the specified time. 5299 */ 5300 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5301 wait_cmds_complete) == -1) { 5302 /* 5303 * Undo the state changes made above. Everything 5304 * must go back to it's original value. 5305 */ 5306 Restore_state(un); 5307 un->un_last_state = un->un_save_state; 5308 /* Wake up any threads that might be waiting. */ 5309 cv_broadcast(&un->un_suspend_cv); 5310 mutex_exit(SD_MUTEX(un)); 5311 SD_ERROR(SD_LOG_IO_PM, un, 5312 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5313 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5314 return (DDI_FAILURE); 5315 } 5316 } 5317 5318 /* 5319 * Cancel SCSI watch thread and timeouts, if any are active 5320 */ 5321 5322 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5323 opaque_t temp_token = un->un_swr_token; 5324 mutex_exit(SD_MUTEX(un)); 5325 scsi_watch_suspend(temp_token); 5326 mutex_enter(SD_MUTEX(un)); 5327 } 5328 5329 if (un->un_reset_throttle_timeid != NULL) { 5330 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5331 un->un_reset_throttle_timeid = NULL; 5332 mutex_exit(SD_MUTEX(un)); 5333 (void) untimeout(temp_id); 5334 mutex_enter(SD_MUTEX(un)); 5335 } 5336 5337 if (un->un_dcvb_timeid != NULL) { 5338 timeout_id_t temp_id = un->un_dcvb_timeid; 5339 un->un_dcvb_timeid = NULL; 5340 mutex_exit(SD_MUTEX(un)); 5341 (void) untimeout(temp_id); 5342 mutex_enter(SD_MUTEX(un)); 5343 } 5344 5345 mutex_enter(&un->un_pm_mutex); 5346 if (un->un_pm_timeid != NULL) { 5347 timeout_id_t temp_id = un->un_pm_timeid; 5348 un->un_pm_timeid = NULL; 5349 mutex_exit(&un->un_pm_mutex); 5350 mutex_exit(SD_MUTEX(un)); 5351 (void) untimeout(temp_id); 5352 mutex_enter(SD_MUTEX(un)); 5353 } else { 5354 mutex_exit(&un->un_pm_mutex); 5355 } 5356 5357 if (un->un_retry_timeid != NULL) { 5358 timeout_id_t temp_id = un->un_retry_timeid; 5359 un->un_retry_timeid = NULL; 5360 mutex_exit(SD_MUTEX(un)); 5361 (void) untimeout(temp_id); 5362 mutex_enter(SD_MUTEX(un)); 5363 } 5364 5365 if (un->un_direct_priority_timeid != NULL) { 5366 timeout_id_t temp_id = un->un_direct_priority_timeid; 5367 un->un_direct_priority_timeid = NULL; 5368 mutex_exit(SD_MUTEX(un)); 5369 (void) untimeout(temp_id); 5370 mutex_enter(SD_MUTEX(un)); 5371 } 5372 5373 if (un->un_f_is_fibre == TRUE) { 5374 /* 5375 * Remove callbacks for insert and remove events 5376 */ 5377 if (un->un_insert_event != NULL) { 5378 mutex_exit(SD_MUTEX(un)); 5379 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5380 mutex_enter(SD_MUTEX(un)); 5381 un->un_insert_event = NULL; 5382 } 5383 5384 if (un->un_remove_event != NULL) { 5385 mutex_exit(SD_MUTEX(un)); 5386 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5387 mutex_enter(SD_MUTEX(un)); 5388 un->un_remove_event = NULL; 5389 } 5390 } 5391 5392 mutex_exit(SD_MUTEX(un)); 5393 5394 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5395 5396 return (DDI_SUCCESS); 5397 } 5398 5399 5400 /* 5401 * Function: sd_ddi_pm_suspend 5402 * 5403 * Description: Set the drive state to low power. 5404 * Someone else is required to actually change the drive 5405 * power level. 5406 * 5407 * Arguments: un - driver soft state (unit) structure 5408 * 5409 * Return Code: DDI_FAILURE or DDI_SUCCESS 5410 * 5411 * Context: Kernel thread context 5412 */ 5413 5414 static int 5415 sd_ddi_pm_suspend(struct sd_lun *un) 5416 { 5417 ASSERT(un != NULL); 5418 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5419 5420 ASSERT(!mutex_owned(SD_MUTEX(un))); 5421 mutex_enter(SD_MUTEX(un)); 5422 5423 /* 5424 * Exit if power management is not enabled for this device, or if 5425 * the device is being used by HA. 5426 */ 5427 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5428 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5429 mutex_exit(SD_MUTEX(un)); 5430 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5431 return (DDI_SUCCESS); 5432 } 5433 5434 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5435 un->un_ncmds_in_driver); 5436 5437 /* 5438 * See if the device is not busy, ie.: 5439 * - we have no commands in the driver for this device 5440 * - not waiting for resources 5441 */ 5442 if ((un->un_ncmds_in_driver == 0) && 5443 (un->un_state != SD_STATE_RWAIT)) { 5444 /* 5445 * The device is not busy, so it is OK to go to low power state. 5446 * Indicate low power, but rely on someone else to actually 5447 * change it. 5448 */ 5449 mutex_enter(&un->un_pm_mutex); 5450 un->un_pm_count = -1; 5451 mutex_exit(&un->un_pm_mutex); 5452 un->un_power_level = SD_SPINDLE_OFF; 5453 } 5454 5455 mutex_exit(SD_MUTEX(un)); 5456 5457 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5458 5459 return (DDI_SUCCESS); 5460 } 5461 5462 5463 /* 5464 * Function: sd_ddi_resume 5465 * 5466 * Description: Performs system power-up operations.. 5467 * 5468 * Return Code: DDI_SUCCESS 5469 * DDI_FAILURE 5470 * 5471 * Context: Kernel thread context 5472 */ 5473 5474 static int 5475 sd_ddi_resume(dev_info_t *devi) 5476 { 5477 struct sd_lun *un; 5478 5479 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5480 if (un == NULL) { 5481 return (DDI_FAILURE); 5482 } 5483 5484 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5485 5486 mutex_enter(SD_MUTEX(un)); 5487 Restore_state(un); 5488 5489 /* 5490 * Restore the state which was saved to give the 5491 * the right state in un_last_state 5492 */ 5493 un->un_last_state = un->un_save_state; 5494 /* 5495 * Note: throttle comes back at full. 5496 * Also note: this MUST be done before calling pm_raise_power 5497 * otherwise the system can get hung in biowait. The scenario where 5498 * this'll happen is under cpr suspend. Writing of the system 5499 * state goes through sddump, which writes 0 to un_throttle. If 5500 * writing the system state then fails, example if the partition is 5501 * too small, then cpr attempts a resume. If throttle isn't restored 5502 * from the saved value until after calling pm_raise_power then 5503 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5504 * in biowait. 5505 */ 5506 un->un_throttle = un->un_saved_throttle; 5507 5508 /* 5509 * The chance of failure is very rare as the only command done in power 5510 * entry point is START command when you transition from 0->1 or 5511 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5512 * which suspend was done. Ignore the return value as the resume should 5513 * not be failed. In the case of removable media the media need not be 5514 * inserted and hence there is a chance that raise power will fail with 5515 * media not present. 5516 */ 5517 if (un->un_f_attach_spinup) { 5518 mutex_exit(SD_MUTEX(un)); 5519 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5520 mutex_enter(SD_MUTEX(un)); 5521 } 5522 5523 /* 5524 * Don't broadcast to the suspend cv and therefore possibly 5525 * start I/O until after power has been restored. 5526 */ 5527 cv_broadcast(&un->un_suspend_cv); 5528 cv_broadcast(&un->un_state_cv); 5529 5530 /* restart thread */ 5531 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5532 scsi_watch_resume(un->un_swr_token); 5533 } 5534 5535 #if (defined(__fibre)) 5536 if (un->un_f_is_fibre == TRUE) { 5537 /* 5538 * Add callbacks for insert and remove events 5539 */ 5540 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5541 sd_init_event_callbacks(un); 5542 } 5543 } 5544 #endif 5545 5546 /* 5547 * Transport any pending commands to the target. 5548 * 5549 * If this is a low-activity device commands in queue will have to wait 5550 * until new commands come in, which may take awhile. Also, we 5551 * specifically don't check un_ncmds_in_transport because we know that 5552 * there really are no commands in progress after the unit was 5553 * suspended and we could have reached the throttle level, been 5554 * suspended, and have no new commands coming in for awhile. Highly 5555 * unlikely, but so is the low-activity disk scenario. 5556 */ 5557 ddi_xbuf_dispatch(un->un_xbuf_attr); 5558 5559 sd_start_cmds(un, NULL); 5560 mutex_exit(SD_MUTEX(un)); 5561 5562 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5563 5564 return (DDI_SUCCESS); 5565 } 5566 5567 5568 /* 5569 * Function: sd_ddi_pm_resume 5570 * 5571 * Description: Set the drive state to powered on. 5572 * Someone else is required to actually change the drive 5573 * power level. 5574 * 5575 * Arguments: un - driver soft state (unit) structure 5576 * 5577 * Return Code: DDI_SUCCESS 5578 * 5579 * Context: Kernel thread context 5580 */ 5581 5582 static int 5583 sd_ddi_pm_resume(struct sd_lun *un) 5584 { 5585 ASSERT(un != NULL); 5586 5587 ASSERT(!mutex_owned(SD_MUTEX(un))); 5588 mutex_enter(SD_MUTEX(un)); 5589 un->un_power_level = SD_SPINDLE_ON; 5590 5591 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5592 mutex_enter(&un->un_pm_mutex); 5593 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5594 un->un_pm_count++; 5595 ASSERT(un->un_pm_count == 0); 5596 /* 5597 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5598 * un_suspend_cv is for a system resume, not a power management 5599 * device resume. (4297749) 5600 * cv_broadcast(&un->un_suspend_cv); 5601 */ 5602 } 5603 mutex_exit(&un->un_pm_mutex); 5604 mutex_exit(SD_MUTEX(un)); 5605 5606 return (DDI_SUCCESS); 5607 } 5608 5609 5610 /* 5611 * Function: sd_pm_idletimeout_handler 5612 * 5613 * Description: A timer routine that's active only while a device is busy. 5614 * The purpose is to extend slightly the pm framework's busy 5615 * view of the device to prevent busy/idle thrashing for 5616 * back-to-back commands. Do this by comparing the current time 5617 * to the time at which the last command completed and when the 5618 * difference is greater than sd_pm_idletime, call 5619 * pm_idle_component. In addition to indicating idle to the pm 5620 * framework, update the chain type to again use the internal pm 5621 * layers of the driver. 5622 * 5623 * Arguments: arg - driver soft state (unit) structure 5624 * 5625 * Context: Executes in a timeout(9F) thread context 5626 */ 5627 5628 static void 5629 sd_pm_idletimeout_handler(void *arg) 5630 { 5631 struct sd_lun *un = arg; 5632 5633 time_t now; 5634 5635 mutex_enter(&sd_detach_mutex); 5636 if (un->un_detach_count != 0) { 5637 /* Abort if the instance is detaching */ 5638 mutex_exit(&sd_detach_mutex); 5639 return; 5640 } 5641 mutex_exit(&sd_detach_mutex); 5642 5643 now = ddi_get_time(); 5644 /* 5645 * Grab both mutexes, in the proper order, since we're accessing 5646 * both PM and softstate variables. 5647 */ 5648 mutex_enter(SD_MUTEX(un)); 5649 mutex_enter(&un->un_pm_mutex); 5650 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5651 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5652 /* 5653 * Update the chain types. 5654 * This takes affect on the next new command received. 5655 */ 5656 if (un->un_f_non_devbsize_supported) { 5657 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5658 } else { 5659 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5660 } 5661 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5662 5663 SD_TRACE(SD_LOG_IO_PM, un, 5664 "sd_pm_idletimeout_handler: idling device\n"); 5665 (void) pm_idle_component(SD_DEVINFO(un), 0); 5666 un->un_pm_idle_timeid = NULL; 5667 } else { 5668 un->un_pm_idle_timeid = 5669 timeout(sd_pm_idletimeout_handler, un, 5670 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5671 } 5672 mutex_exit(&un->un_pm_mutex); 5673 mutex_exit(SD_MUTEX(un)); 5674 } 5675 5676 5677 /* 5678 * Function: sd_pm_timeout_handler 5679 * 5680 * Description: Callback to tell framework we are idle. 5681 * 5682 * Context: timeout(9f) thread context. 5683 */ 5684 5685 static void 5686 sd_pm_timeout_handler(void *arg) 5687 { 5688 struct sd_lun *un = arg; 5689 5690 (void) pm_idle_component(SD_DEVINFO(un), 0); 5691 mutex_enter(&un->un_pm_mutex); 5692 un->un_pm_timeid = NULL; 5693 mutex_exit(&un->un_pm_mutex); 5694 } 5695 5696 5697 /* 5698 * Function: sdpower 5699 * 5700 * Description: PM entry point. 5701 * 5702 * Return Code: DDI_SUCCESS 5703 * DDI_FAILURE 5704 * 5705 * Context: Kernel thread context 5706 */ 5707 5708 static int 5709 sdpower(dev_info_t *devi, int component, int level) 5710 { 5711 struct sd_lun *un; 5712 int instance; 5713 int rval = DDI_SUCCESS; 5714 uint_t i, log_page_size, maxcycles, ncycles; 5715 uchar_t *log_page_data; 5716 int log_sense_page; 5717 int medium_present; 5718 time_t intvlp; 5719 dev_t dev; 5720 struct pm_trans_data sd_pm_tran_data; 5721 uchar_t save_state; 5722 int sval; 5723 uchar_t state_before_pm; 5724 int got_semaphore_here; 5725 5726 instance = ddi_get_instance(devi); 5727 5728 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5729 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5730 component != 0) { 5731 return (DDI_FAILURE); 5732 } 5733 5734 dev = sd_make_device(SD_DEVINFO(un)); 5735 5736 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5737 5738 /* 5739 * Must synchronize power down with close. 5740 * Attempt to decrement/acquire the open/close semaphore, 5741 * but do NOT wait on it. If it's not greater than zero, 5742 * ie. it can't be decremented without waiting, then 5743 * someone else, either open or close, already has it 5744 * and the try returns 0. Use that knowledge here to determine 5745 * if it's OK to change the device power level. 5746 * Also, only increment it on exit if it was decremented, ie. gotten, 5747 * here. 5748 */ 5749 got_semaphore_here = sema_tryp(&un->un_semoclose); 5750 5751 mutex_enter(SD_MUTEX(un)); 5752 5753 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5754 un->un_ncmds_in_driver); 5755 5756 /* 5757 * If un_ncmds_in_driver is non-zero it indicates commands are 5758 * already being processed in the driver, or if the semaphore was 5759 * not gotten here it indicates an open or close is being processed. 5760 * At the same time somebody is requesting to go low power which 5761 * can't happen, therefore we need to return failure. 5762 */ 5763 if ((level == SD_SPINDLE_OFF) && 5764 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5765 mutex_exit(SD_MUTEX(un)); 5766 5767 if (got_semaphore_here != 0) { 5768 sema_v(&un->un_semoclose); 5769 } 5770 SD_TRACE(SD_LOG_IO_PM, un, 5771 "sdpower: exit, device has queued cmds.\n"); 5772 return (DDI_FAILURE); 5773 } 5774 5775 /* 5776 * if it is OFFLINE that means the disk is completely dead 5777 * in our case we have to put the disk in on or off by sending commands 5778 * Of course that will fail anyway so return back here. 5779 * 5780 * Power changes to a device that's OFFLINE or SUSPENDED 5781 * are not allowed. 5782 */ 5783 if ((un->un_state == SD_STATE_OFFLINE) || 5784 (un->un_state == SD_STATE_SUSPENDED)) { 5785 mutex_exit(SD_MUTEX(un)); 5786 5787 if (got_semaphore_here != 0) { 5788 sema_v(&un->un_semoclose); 5789 } 5790 SD_TRACE(SD_LOG_IO_PM, un, 5791 "sdpower: exit, device is off-line.\n"); 5792 return (DDI_FAILURE); 5793 } 5794 5795 /* 5796 * Change the device's state to indicate it's power level 5797 * is being changed. Do this to prevent a power off in the 5798 * middle of commands, which is especially bad on devices 5799 * that are really powered off instead of just spun down. 5800 */ 5801 state_before_pm = un->un_state; 5802 un->un_state = SD_STATE_PM_CHANGING; 5803 5804 mutex_exit(SD_MUTEX(un)); 5805 5806 /* 5807 * If "pm-capable" property is set to TRUE by HBA drivers, 5808 * bypass the following checking, otherwise, check the log 5809 * sense information for this device 5810 */ 5811 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5812 /* 5813 * Get the log sense information to understand whether the 5814 * the powercycle counts have gone beyond the threshhold. 5815 */ 5816 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5817 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5818 5819 mutex_enter(SD_MUTEX(un)); 5820 log_sense_page = un->un_start_stop_cycle_page; 5821 mutex_exit(SD_MUTEX(un)); 5822 5823 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5824 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5825 #ifdef SDDEBUG 5826 if (sd_force_pm_supported) { 5827 /* Force a successful result */ 5828 rval = 0; 5829 } 5830 #endif 5831 if (rval != 0) { 5832 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5833 "Log Sense Failed\n"); 5834 kmem_free(log_page_data, log_page_size); 5835 /* Cannot support power management on those drives */ 5836 5837 if (got_semaphore_here != 0) { 5838 sema_v(&un->un_semoclose); 5839 } 5840 /* 5841 * On exit put the state back to it's original value 5842 * and broadcast to anyone waiting for the power 5843 * change completion. 5844 */ 5845 mutex_enter(SD_MUTEX(un)); 5846 un->un_state = state_before_pm; 5847 cv_broadcast(&un->un_suspend_cv); 5848 mutex_exit(SD_MUTEX(un)); 5849 SD_TRACE(SD_LOG_IO_PM, un, 5850 "sdpower: exit, Log Sense Failed.\n"); 5851 return (DDI_FAILURE); 5852 } 5853 5854 /* 5855 * From the page data - Convert the essential information to 5856 * pm_trans_data 5857 */ 5858 maxcycles = 5859 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5860 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5861 5862 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5863 5864 ncycles = 5865 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5866 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5867 5868 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5869 5870 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5871 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5872 log_page_data[8+i]; 5873 } 5874 5875 kmem_free(log_page_data, log_page_size); 5876 5877 /* 5878 * Call pm_trans_check routine to get the Ok from 5879 * the global policy 5880 */ 5881 5882 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5883 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5884 5885 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5886 #ifdef SDDEBUG 5887 if (sd_force_pm_supported) { 5888 /* Force a successful result */ 5889 rval = 1; 5890 } 5891 #endif 5892 switch (rval) { 5893 case 0: 5894 /* 5895 * Not Ok to Power cycle or error in parameters passed 5896 * Would have given the advised time to consider power 5897 * cycle. Based on the new intvlp parameter we are 5898 * supposed to pretend we are busy so that pm framework 5899 * will never call our power entry point. Because of 5900 * that install a timeout handler and wait for the 5901 * recommended time to elapse so that power management 5902 * can be effective again. 5903 * 5904 * To effect this behavior, call pm_busy_component to 5905 * indicate to the framework this device is busy. 5906 * By not adjusting un_pm_count the rest of PM in 5907 * the driver will function normally, and independant 5908 * of this but because the framework is told the device 5909 * is busy it won't attempt powering down until it gets 5910 * a matching idle. The timeout handler sends this. 5911 * Note: sd_pm_entry can't be called here to do this 5912 * because sdpower may have been called as a result 5913 * of a call to pm_raise_power from within sd_pm_entry. 5914 * 5915 * If a timeout handler is already active then 5916 * don't install another. 5917 */ 5918 mutex_enter(&un->un_pm_mutex); 5919 if (un->un_pm_timeid == NULL) { 5920 un->un_pm_timeid = 5921 timeout(sd_pm_timeout_handler, 5922 un, intvlp * drv_usectohz(1000000)); 5923 mutex_exit(&un->un_pm_mutex); 5924 (void) pm_busy_component(SD_DEVINFO(un), 0); 5925 } else { 5926 mutex_exit(&un->un_pm_mutex); 5927 } 5928 if (got_semaphore_here != 0) { 5929 sema_v(&un->un_semoclose); 5930 } 5931 /* 5932 * On exit put the state back to it's original value 5933 * and broadcast to anyone waiting for the power 5934 * change completion. 5935 */ 5936 mutex_enter(SD_MUTEX(un)); 5937 un->un_state = state_before_pm; 5938 cv_broadcast(&un->un_suspend_cv); 5939 mutex_exit(SD_MUTEX(un)); 5940 5941 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 5942 "trans check Failed, not ok to power cycle.\n"); 5943 return (DDI_FAILURE); 5944 5945 case -1: 5946 if (got_semaphore_here != 0) { 5947 sema_v(&un->un_semoclose); 5948 } 5949 /* 5950 * On exit put the state back to it's original value 5951 * and broadcast to anyone waiting for the power 5952 * change completion. 5953 */ 5954 mutex_enter(SD_MUTEX(un)); 5955 un->un_state = state_before_pm; 5956 cv_broadcast(&un->un_suspend_cv); 5957 mutex_exit(SD_MUTEX(un)); 5958 SD_TRACE(SD_LOG_IO_PM, un, 5959 "sdpower: exit, trans check command Failed.\n"); 5960 return (DDI_FAILURE); 5961 } 5962 } 5963 5964 if (level == SD_SPINDLE_OFF) { 5965 /* 5966 * Save the last state... if the STOP FAILS we need it 5967 * for restoring 5968 */ 5969 mutex_enter(SD_MUTEX(un)); 5970 save_state = un->un_last_state; 5971 /* 5972 * There must not be any cmds. getting processed 5973 * in the driver when we get here. Power to the 5974 * device is potentially going off. 5975 */ 5976 ASSERT(un->un_ncmds_in_driver == 0); 5977 mutex_exit(SD_MUTEX(un)); 5978 5979 /* 5980 * For now suspend the device completely before spindle is 5981 * turned off 5982 */ 5983 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 5984 if (got_semaphore_here != 0) { 5985 sema_v(&un->un_semoclose); 5986 } 5987 /* 5988 * On exit put the state back to it's original value 5989 * and broadcast to anyone waiting for the power 5990 * change completion. 5991 */ 5992 mutex_enter(SD_MUTEX(un)); 5993 un->un_state = state_before_pm; 5994 cv_broadcast(&un->un_suspend_cv); 5995 mutex_exit(SD_MUTEX(un)); 5996 SD_TRACE(SD_LOG_IO_PM, un, 5997 "sdpower: exit, PM suspend Failed.\n"); 5998 return (DDI_FAILURE); 5999 } 6000 } 6001 6002 /* 6003 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6004 * close, or strategy. Dump no long uses this routine, it uses it's 6005 * own code so it can be done in polled mode. 6006 */ 6007 6008 medium_present = TRUE; 6009 6010 /* 6011 * When powering up, issue a TUR in case the device is at unit 6012 * attention. Don't do retries. Bypass the PM layer, otherwise 6013 * a deadlock on un_pm_busy_cv will occur. 6014 */ 6015 if (level == SD_SPINDLE_ON) { 6016 (void) sd_send_scsi_TEST_UNIT_READY(un, 6017 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6018 } 6019 6020 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6021 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6022 6023 sval = sd_send_scsi_START_STOP_UNIT(un, 6024 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6025 SD_PATH_DIRECT); 6026 /* Command failed, check for media present. */ 6027 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6028 medium_present = FALSE; 6029 } 6030 6031 /* 6032 * The conditions of interest here are: 6033 * if a spindle off with media present fails, 6034 * then restore the state and return an error. 6035 * else if a spindle on fails, 6036 * then return an error (there's no state to restore). 6037 * In all other cases we setup for the new state 6038 * and return success. 6039 */ 6040 switch (level) { 6041 case SD_SPINDLE_OFF: 6042 if ((medium_present == TRUE) && (sval != 0)) { 6043 /* The stop command from above failed */ 6044 rval = DDI_FAILURE; 6045 /* 6046 * The stop command failed, and we have media 6047 * present. Put the level back by calling the 6048 * sd_pm_resume() and set the state back to 6049 * it's previous value. 6050 */ 6051 (void) sd_ddi_pm_resume(un); 6052 mutex_enter(SD_MUTEX(un)); 6053 un->un_last_state = save_state; 6054 mutex_exit(SD_MUTEX(un)); 6055 break; 6056 } 6057 /* 6058 * The stop command from above succeeded. 6059 */ 6060 if (un->un_f_monitor_media_state) { 6061 /* 6062 * Terminate watch thread in case of removable media 6063 * devices going into low power state. This is as per 6064 * the requirements of pm framework, otherwise commands 6065 * will be generated for the device (through watch 6066 * thread), even when the device is in low power state. 6067 */ 6068 mutex_enter(SD_MUTEX(un)); 6069 un->un_f_watcht_stopped = FALSE; 6070 if (un->un_swr_token != NULL) { 6071 opaque_t temp_token = un->un_swr_token; 6072 un->un_f_watcht_stopped = TRUE; 6073 un->un_swr_token = NULL; 6074 mutex_exit(SD_MUTEX(un)); 6075 (void) scsi_watch_request_terminate(temp_token, 6076 SCSI_WATCH_TERMINATE_WAIT); 6077 } else { 6078 mutex_exit(SD_MUTEX(un)); 6079 } 6080 } 6081 break; 6082 6083 default: /* The level requested is spindle on... */ 6084 /* 6085 * Legacy behavior: return success on a failed spinup 6086 * if there is no media in the drive. 6087 * Do this by looking at medium_present here. 6088 */ 6089 if ((sval != 0) && medium_present) { 6090 /* The start command from above failed */ 6091 rval = DDI_FAILURE; 6092 break; 6093 } 6094 /* 6095 * The start command from above succeeded 6096 * Resume the devices now that we have 6097 * started the disks 6098 */ 6099 (void) sd_ddi_pm_resume(un); 6100 6101 /* 6102 * Resume the watch thread since it was suspended 6103 * when the device went into low power mode. 6104 */ 6105 if (un->un_f_monitor_media_state) { 6106 mutex_enter(SD_MUTEX(un)); 6107 if (un->un_f_watcht_stopped == TRUE) { 6108 opaque_t temp_token; 6109 6110 un->un_f_watcht_stopped = FALSE; 6111 mutex_exit(SD_MUTEX(un)); 6112 temp_token = scsi_watch_request_submit( 6113 SD_SCSI_DEVP(un), 6114 sd_check_media_time, 6115 SENSE_LENGTH, sd_media_watch_cb, 6116 (caddr_t)dev); 6117 mutex_enter(SD_MUTEX(un)); 6118 un->un_swr_token = temp_token; 6119 } 6120 mutex_exit(SD_MUTEX(un)); 6121 } 6122 } 6123 if (got_semaphore_here != 0) { 6124 sema_v(&un->un_semoclose); 6125 } 6126 /* 6127 * On exit put the state back to it's original value 6128 * and broadcast to anyone waiting for the power 6129 * change completion. 6130 */ 6131 mutex_enter(SD_MUTEX(un)); 6132 un->un_state = state_before_pm; 6133 cv_broadcast(&un->un_suspend_cv); 6134 mutex_exit(SD_MUTEX(un)); 6135 6136 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6137 6138 return (rval); 6139 } 6140 6141 6142 6143 /* 6144 * Function: sdattach 6145 * 6146 * Description: Driver's attach(9e) entry point function. 6147 * 6148 * Arguments: devi - opaque device info handle 6149 * cmd - attach type 6150 * 6151 * Return Code: DDI_SUCCESS 6152 * DDI_FAILURE 6153 * 6154 * Context: Kernel thread context 6155 */ 6156 6157 static int 6158 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6159 { 6160 switch (cmd) { 6161 case DDI_ATTACH: 6162 return (sd_unit_attach(devi)); 6163 case DDI_RESUME: 6164 return (sd_ddi_resume(devi)); 6165 default: 6166 break; 6167 } 6168 return (DDI_FAILURE); 6169 } 6170 6171 6172 /* 6173 * Function: sddetach 6174 * 6175 * Description: Driver's detach(9E) entry point function. 6176 * 6177 * Arguments: devi - opaque device info handle 6178 * cmd - detach type 6179 * 6180 * Return Code: DDI_SUCCESS 6181 * DDI_FAILURE 6182 * 6183 * Context: Kernel thread context 6184 */ 6185 6186 static int 6187 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6188 { 6189 switch (cmd) { 6190 case DDI_DETACH: 6191 return (sd_unit_detach(devi)); 6192 case DDI_SUSPEND: 6193 return (sd_ddi_suspend(devi)); 6194 default: 6195 break; 6196 } 6197 return (DDI_FAILURE); 6198 } 6199 6200 6201 /* 6202 * Function: sd_sync_with_callback 6203 * 6204 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6205 * state while the callback routine is active. 6206 * 6207 * Arguments: un: softstate structure for the instance 6208 * 6209 * Context: Kernel thread context 6210 */ 6211 6212 static void 6213 sd_sync_with_callback(struct sd_lun *un) 6214 { 6215 ASSERT(un != NULL); 6216 6217 mutex_enter(SD_MUTEX(un)); 6218 6219 ASSERT(un->un_in_callback >= 0); 6220 6221 while (un->un_in_callback > 0) { 6222 mutex_exit(SD_MUTEX(un)); 6223 delay(2); 6224 mutex_enter(SD_MUTEX(un)); 6225 } 6226 6227 mutex_exit(SD_MUTEX(un)); 6228 } 6229 6230 /* 6231 * Function: sd_unit_attach 6232 * 6233 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6234 * the soft state structure for the device and performs 6235 * all necessary structure and device initializations. 6236 * 6237 * Arguments: devi: the system's dev_info_t for the device. 6238 * 6239 * Return Code: DDI_SUCCESS if attach is successful. 6240 * DDI_FAILURE if any part of the attach fails. 6241 * 6242 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6243 * Kernel thread context only. Can sleep. 6244 */ 6245 6246 static int 6247 sd_unit_attach(dev_info_t *devi) 6248 { 6249 struct scsi_device *devp; 6250 struct sd_lun *un; 6251 char *variantp; 6252 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6253 int instance; 6254 int rval; 6255 int wc_enabled; 6256 int tgt; 6257 uint64_t capacity; 6258 uint_t lbasize = 0; 6259 dev_info_t *pdip = ddi_get_parent(devi); 6260 int offbyone = 0; 6261 int geom_label_valid = 0; 6262 6263 /* 6264 * Retrieve the target driver's private data area. This was set 6265 * up by the HBA. 6266 */ 6267 devp = ddi_get_driver_private(devi); 6268 6269 /* 6270 * Retrieve the target ID of the device. 6271 */ 6272 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6273 SCSI_ADDR_PROP_TARGET, -1); 6274 6275 /* 6276 * Since we have no idea what state things were left in by the last 6277 * user of the device, set up some 'default' settings, ie. turn 'em 6278 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6279 * Do this before the scsi_probe, which sends an inquiry. 6280 * This is a fix for bug (4430280). 6281 * Of special importance is wide-xfer. The drive could have been left 6282 * in wide transfer mode by the last driver to communicate with it, 6283 * this includes us. If that's the case, and if the following is not 6284 * setup properly or we don't re-negotiate with the drive prior to 6285 * transferring data to/from the drive, it causes bus parity errors, 6286 * data overruns, and unexpected interrupts. This first occurred when 6287 * the fix for bug (4378686) was made. 6288 */ 6289 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6290 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6291 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6292 6293 /* 6294 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6295 * on a target. Setting it per lun instance actually sets the 6296 * capability of this target, which affects those luns already 6297 * attached on the same target. So during attach, we can only disable 6298 * this capability only when no other lun has been attached on this 6299 * target. By doing this, we assume a target has the same tagged-qing 6300 * capability for every lun. The condition can be removed when HBA 6301 * is changed to support per lun based tagged-qing capability. 6302 */ 6303 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6304 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6305 } 6306 6307 /* 6308 * Use scsi_probe() to issue an INQUIRY command to the device. 6309 * This call will allocate and fill in the scsi_inquiry structure 6310 * and point the sd_inq member of the scsi_device structure to it. 6311 * If the attach succeeds, then this memory will not be de-allocated 6312 * (via scsi_unprobe()) until the instance is detached. 6313 */ 6314 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6315 goto probe_failed; 6316 } 6317 6318 /* 6319 * Check the device type as specified in the inquiry data and 6320 * claim it if it is of a type that we support. 6321 */ 6322 switch (devp->sd_inq->inq_dtype) { 6323 case DTYPE_DIRECT: 6324 break; 6325 case DTYPE_RODIRECT: 6326 break; 6327 case DTYPE_OPTICAL: 6328 break; 6329 case DTYPE_NOTPRESENT: 6330 default: 6331 /* Unsupported device type; fail the attach. */ 6332 goto probe_failed; 6333 } 6334 6335 /* 6336 * Allocate the soft state structure for this unit. 6337 * 6338 * We rely upon this memory being set to all zeroes by 6339 * ddi_soft_state_zalloc(). We assume that any member of the 6340 * soft state structure that is not explicitly initialized by 6341 * this routine will have a value of zero. 6342 */ 6343 instance = ddi_get_instance(devp->sd_dev); 6344 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6345 goto probe_failed; 6346 } 6347 6348 /* 6349 * Retrieve a pointer to the newly-allocated soft state. 6350 * 6351 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6352 * was successful, unless something has gone horribly wrong and the 6353 * ddi's soft state internals are corrupt (in which case it is 6354 * probably better to halt here than just fail the attach....) 6355 */ 6356 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6357 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6358 instance); 6359 /*NOTREACHED*/ 6360 } 6361 6362 /* 6363 * Link the back ptr of the driver soft state to the scsi_device 6364 * struct for this lun. 6365 * Save a pointer to the softstate in the driver-private area of 6366 * the scsi_device struct. 6367 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6368 * we first set un->un_sd below. 6369 */ 6370 un->un_sd = devp; 6371 devp->sd_private = (opaque_t)un; 6372 6373 /* 6374 * The following must be after devp is stored in the soft state struct. 6375 */ 6376 #ifdef SDDEBUG 6377 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6378 "%s_unit_attach: un:0x%p instance:%d\n", 6379 ddi_driver_name(devi), un, instance); 6380 #endif 6381 6382 /* 6383 * Set up the device type and node type (for the minor nodes). 6384 * By default we assume that the device can at least support the 6385 * Common Command Set. Call it a CD-ROM if it reports itself 6386 * as a RODIRECT device. 6387 */ 6388 switch (devp->sd_inq->inq_dtype) { 6389 case DTYPE_RODIRECT: 6390 un->un_node_type = DDI_NT_CD_CHAN; 6391 un->un_ctype = CTYPE_CDROM; 6392 break; 6393 case DTYPE_OPTICAL: 6394 un->un_node_type = DDI_NT_BLOCK_CHAN; 6395 un->un_ctype = CTYPE_ROD; 6396 break; 6397 default: 6398 un->un_node_type = DDI_NT_BLOCK_CHAN; 6399 un->un_ctype = CTYPE_CCS; 6400 break; 6401 } 6402 6403 /* 6404 * Try to read the interconnect type from the HBA. 6405 * 6406 * Note: This driver is currently compiled as two binaries, a parallel 6407 * scsi version (sd) and a fibre channel version (ssd). All functional 6408 * differences are determined at compile time. In the future a single 6409 * binary will be provided and the inteconnect type will be used to 6410 * differentiate between fibre and parallel scsi behaviors. At that time 6411 * it will be necessary for all fibre channel HBAs to support this 6412 * property. 6413 * 6414 * set un_f_is_fiber to TRUE ( default fiber ) 6415 */ 6416 un->un_f_is_fibre = TRUE; 6417 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6418 case INTERCONNECT_SSA: 6419 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6421 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6422 break; 6423 case INTERCONNECT_PARALLEL: 6424 un->un_f_is_fibre = FALSE; 6425 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6426 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6427 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6428 break; 6429 case INTERCONNECT_SATA: 6430 un->un_f_is_fibre = FALSE; 6431 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6432 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6433 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6434 break; 6435 case INTERCONNECT_FIBRE: 6436 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6437 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6438 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6439 break; 6440 case INTERCONNECT_FABRIC: 6441 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6442 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6443 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6444 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6445 break; 6446 default: 6447 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6448 /* 6449 * The HBA does not support the "interconnect-type" property 6450 * (or did not provide a recognized type). 6451 * 6452 * Note: This will be obsoleted when a single fibre channel 6453 * and parallel scsi driver is delivered. In the meantime the 6454 * interconnect type will be set to the platform default.If that 6455 * type is not parallel SCSI, it means that we should be 6456 * assuming "ssd" semantics. However, here this also means that 6457 * the FC HBA is not supporting the "interconnect-type" property 6458 * like we expect it to, so log this occurrence. 6459 */ 6460 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6461 if (!SD_IS_PARALLEL_SCSI(un)) { 6462 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6463 "sd_unit_attach: un:0x%p Assuming " 6464 "INTERCONNECT_FIBRE\n", un); 6465 } else { 6466 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6467 "sd_unit_attach: un:0x%p Assuming " 6468 "INTERCONNECT_PARALLEL\n", un); 6469 un->un_f_is_fibre = FALSE; 6470 } 6471 #else 6472 /* 6473 * Note: This source will be implemented when a single fibre 6474 * channel and parallel scsi driver is delivered. The default 6475 * will be to assume that if a device does not support the 6476 * "interconnect-type" property it is a parallel SCSI HBA and 6477 * we will set the interconnect type for parallel scsi. 6478 */ 6479 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6480 un->un_f_is_fibre = FALSE; 6481 #endif 6482 break; 6483 } 6484 6485 if (un->un_f_is_fibre == TRUE) { 6486 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6487 SCSI_VERSION_3) { 6488 switch (un->un_interconnect_type) { 6489 case SD_INTERCONNECT_FIBRE: 6490 case SD_INTERCONNECT_SSA: 6491 un->un_node_type = DDI_NT_BLOCK_WWN; 6492 break; 6493 default: 6494 break; 6495 } 6496 } 6497 } 6498 6499 /* 6500 * Initialize the Request Sense command for the target 6501 */ 6502 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6503 goto alloc_rqs_failed; 6504 } 6505 6506 /* 6507 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6508 * with seperate binary for sd and ssd. 6509 * 6510 * x86 has 1 binary, un_retry_count is set base on connection type. 6511 * The hardcoded values will go away when Sparc uses 1 binary 6512 * for sd and ssd. This hardcoded values need to match 6513 * SD_RETRY_COUNT in sddef.h 6514 * The value used is base on interconnect type. 6515 * fibre = 3, parallel = 5 6516 */ 6517 #if defined(__i386) || defined(__amd64) 6518 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6519 #else 6520 un->un_retry_count = SD_RETRY_COUNT; 6521 #endif 6522 6523 /* 6524 * Set the per disk retry count to the default number of retries 6525 * for disks and CDROMs. This value can be overridden by the 6526 * disk property list or an entry in sd.conf. 6527 */ 6528 un->un_notready_retry_count = 6529 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6530 : DISK_NOT_READY_RETRY_COUNT(un); 6531 6532 /* 6533 * Set the busy retry count to the default value of un_retry_count. 6534 * This can be overridden by entries in sd.conf or the device 6535 * config table. 6536 */ 6537 un->un_busy_retry_count = un->un_retry_count; 6538 6539 /* 6540 * Init the reset threshold for retries. This number determines 6541 * how many retries must be performed before a reset can be issued 6542 * (for certain error conditions). This can be overridden by entries 6543 * in sd.conf or the device config table. 6544 */ 6545 un->un_reset_retry_count = (un->un_retry_count / 2); 6546 6547 /* 6548 * Set the victim_retry_count to the default un_retry_count 6549 */ 6550 un->un_victim_retry_count = (2 * un->un_retry_count); 6551 6552 /* 6553 * Set the reservation release timeout to the default value of 6554 * 5 seconds. This can be overridden by entries in ssd.conf or the 6555 * device config table. 6556 */ 6557 un->un_reserve_release_time = 5; 6558 6559 /* 6560 * Set up the default maximum transfer size. Note that this may 6561 * get updated later in the attach, when setting up default wide 6562 * operations for disks. 6563 */ 6564 #if defined(__i386) || defined(__amd64) 6565 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6566 #else 6567 un->un_max_xfer_size = (uint_t)maxphys; 6568 #endif 6569 6570 /* 6571 * Get "allow bus device reset" property (defaults to "enabled" if 6572 * the property was not defined). This is to disable bus resets for 6573 * certain kinds of error recovery. Note: In the future when a run-time 6574 * fibre check is available the soft state flag should default to 6575 * enabled. 6576 */ 6577 if (un->un_f_is_fibre == TRUE) { 6578 un->un_f_allow_bus_device_reset = TRUE; 6579 } else { 6580 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6581 "allow-bus-device-reset", 1) != 0) { 6582 un->un_f_allow_bus_device_reset = TRUE; 6583 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6584 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 6585 un); 6586 } else { 6587 un->un_f_allow_bus_device_reset = FALSE; 6588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6589 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 6590 un); 6591 } 6592 } 6593 6594 /* 6595 * Check if this is an ATAPI device. ATAPI devices use Group 1 6596 * Read/Write commands and Group 2 Mode Sense/Select commands. 6597 * 6598 * Note: The "obsolete" way of doing this is to check for the "atapi" 6599 * property. The new "variant" property with a value of "atapi" has been 6600 * introduced so that future 'variants' of standard SCSI behavior (like 6601 * atapi) could be specified by the underlying HBA drivers by supplying 6602 * a new value for the "variant" property, instead of having to define a 6603 * new property. 6604 */ 6605 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6606 un->un_f_cfg_is_atapi = TRUE; 6607 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6608 "sd_unit_attach: un:0x%p Atapi device\n", un); 6609 } 6610 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6611 &variantp) == DDI_PROP_SUCCESS) { 6612 if (strcmp(variantp, "atapi") == 0) { 6613 un->un_f_cfg_is_atapi = TRUE; 6614 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6615 "sd_unit_attach: un:0x%p Atapi device\n", un); 6616 } 6617 ddi_prop_free(variantp); 6618 } 6619 6620 un->un_cmd_timeout = SD_IO_TIME; 6621 6622 /* Info on current states, statuses, etc. (Updated frequently) */ 6623 un->un_state = SD_STATE_NORMAL; 6624 un->un_last_state = SD_STATE_NORMAL; 6625 6626 /* Control & status info for command throttling */ 6627 un->un_throttle = sd_max_throttle; 6628 un->un_saved_throttle = sd_max_throttle; 6629 un->un_min_throttle = sd_min_throttle; 6630 6631 if (un->un_f_is_fibre == TRUE) { 6632 un->un_f_use_adaptive_throttle = TRUE; 6633 } else { 6634 un->un_f_use_adaptive_throttle = FALSE; 6635 } 6636 6637 /* Removable media support. */ 6638 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6639 un->un_mediastate = DKIO_NONE; 6640 un->un_specified_mediastate = DKIO_NONE; 6641 6642 /* CVs for suspend/resume (PM or DR) */ 6643 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6644 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6645 6646 /* Power management support. */ 6647 un->un_power_level = SD_SPINDLE_UNINIT; 6648 6649 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6650 un->un_f_wcc_inprog = 0; 6651 6652 /* 6653 * The open/close semaphore is used to serialize threads executing 6654 * in the driver's open & close entry point routines for a given 6655 * instance. 6656 */ 6657 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6658 6659 /* 6660 * The conf file entry and softstate variable is a forceful override, 6661 * meaning a non-zero value must be entered to change the default. 6662 */ 6663 un->un_f_disksort_disabled = FALSE; 6664 6665 /* 6666 * Retrieve the properties from the static driver table or the driver 6667 * configuration file (.conf) for this unit and update the soft state 6668 * for the device as needed for the indicated properties. 6669 * Note: the property configuration needs to occur here as some of the 6670 * following routines may have dependancies on soft state flags set 6671 * as part of the driver property configuration. 6672 */ 6673 sd_read_unit_properties(un); 6674 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6675 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6676 6677 /* 6678 * Only if a device has "hotpluggable" property, it is 6679 * treated as hotpluggable device. Otherwise, it is 6680 * regarded as non-hotpluggable one. 6681 */ 6682 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6683 -1) != -1) { 6684 un->un_f_is_hotpluggable = TRUE; 6685 } 6686 6687 /* 6688 * set unit's attributes(flags) according to "hotpluggable" and 6689 * RMB bit in INQUIRY data. 6690 */ 6691 sd_set_unit_attributes(un, devi); 6692 6693 /* 6694 * By default, we mark the capacity, lbasize, and geometry 6695 * as invalid. Only if we successfully read a valid capacity 6696 * will we update the un_blockcount and un_tgt_blocksize with the 6697 * valid values (the geometry will be validated later). 6698 */ 6699 un->un_f_blockcount_is_valid = FALSE; 6700 un->un_f_tgt_blocksize_is_valid = FALSE; 6701 6702 /* 6703 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6704 * otherwise. 6705 */ 6706 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6707 un->un_blockcount = 0; 6708 6709 /* 6710 * Set up the per-instance info needed to determine the correct 6711 * CDBs and other info for issuing commands to the target. 6712 */ 6713 sd_init_cdb_limits(un); 6714 6715 /* 6716 * Set up the IO chains to use, based upon the target type. 6717 */ 6718 if (un->un_f_non_devbsize_supported) { 6719 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6720 } else { 6721 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6722 } 6723 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6724 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6725 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6726 6727 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6728 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6729 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6730 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6731 6732 6733 if (ISCD(un)) { 6734 un->un_additional_codes = sd_additional_codes; 6735 } else { 6736 un->un_additional_codes = NULL; 6737 } 6738 6739 /* 6740 * Create the kstats here so they can be available for attach-time 6741 * routines that send commands to the unit (either polled or via 6742 * sd_send_scsi_cmd). 6743 * 6744 * Note: This is a critical sequence that needs to be maintained: 6745 * 1) Instantiate the kstats here, before any routines using the 6746 * iopath (i.e. sd_send_scsi_cmd). 6747 * 2) Instantiate and initialize the partition stats 6748 * (sd_set_pstats). 6749 * 3) Initialize the error stats (sd_set_errstats), following 6750 * sd_validate_geometry(),sd_register_devid(), 6751 * and sd_cache_control(). 6752 */ 6753 6754 un->un_stats = kstat_create(sd_label, instance, 6755 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6756 if (un->un_stats != NULL) { 6757 un->un_stats->ks_lock = SD_MUTEX(un); 6758 kstat_install(un->un_stats); 6759 } 6760 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6761 "sd_unit_attach: un:0x%p un_stats created\n", un); 6762 6763 sd_create_errstats(un, instance); 6764 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6765 "sd_unit_attach: un:0x%p errstats created\n", un); 6766 6767 /* 6768 * The following if/else code was relocated here from below as part 6769 * of the fix for bug (4430280). However with the default setup added 6770 * on entry to this routine, it's no longer absolutely necessary for 6771 * this to be before the call to sd_spin_up_unit. 6772 */ 6773 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6774 /* 6775 * If SCSI-2 tagged queueing is supported by the target 6776 * and by the host adapter then we will enable it. 6777 */ 6778 un->un_tagflags = 0; 6779 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6780 (devp->sd_inq->inq_cmdque) && 6781 (un->un_f_arq_enabled == TRUE)) { 6782 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6783 1, 1) == 1) { 6784 un->un_tagflags = FLAG_STAG; 6785 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6786 "sd_unit_attach: un:0x%p tag queueing " 6787 "enabled\n", un); 6788 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6789 "untagged-qing", 0) == 1) { 6790 un->un_f_opt_queueing = TRUE; 6791 un->un_saved_throttle = un->un_throttle = 6792 min(un->un_throttle, 3); 6793 } else { 6794 un->un_f_opt_queueing = FALSE; 6795 un->un_saved_throttle = un->un_throttle = 1; 6796 } 6797 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6798 == 1) && (un->un_f_arq_enabled == TRUE)) { 6799 /* The Host Adapter supports internal queueing. */ 6800 un->un_f_opt_queueing = TRUE; 6801 un->un_saved_throttle = un->un_throttle = 6802 min(un->un_throttle, 3); 6803 } else { 6804 un->un_f_opt_queueing = FALSE; 6805 un->un_saved_throttle = un->un_throttle = 1; 6806 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6807 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6808 } 6809 6810 /* 6811 * Enable large transfers for SATA/SAS drives 6812 */ 6813 if (SD_IS_SERIAL(un)) { 6814 un->un_max_xfer_size = 6815 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6816 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6817 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6818 "sd_unit_attach: un:0x%p max transfer " 6819 "size=0x%x\n", un, un->un_max_xfer_size); 6820 6821 } 6822 6823 /* Setup or tear down default wide operations for disks */ 6824 6825 /* 6826 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6827 * and "ssd_max_xfer_size" to exist simultaneously on the same 6828 * system and be set to different values. In the future this 6829 * code may need to be updated when the ssd module is 6830 * obsoleted and removed from the system. (4299588) 6831 */ 6832 if (SD_IS_PARALLEL_SCSI(un) && 6833 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6834 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6835 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6836 1, 1) == 1) { 6837 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6838 "sd_unit_attach: un:0x%p Wide Transfer " 6839 "enabled\n", un); 6840 } 6841 6842 /* 6843 * If tagged queuing has also been enabled, then 6844 * enable large xfers 6845 */ 6846 if (un->un_saved_throttle == sd_max_throttle) { 6847 un->un_max_xfer_size = 6848 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6849 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6850 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6851 "sd_unit_attach: un:0x%p max transfer " 6852 "size=0x%x\n", un, un->un_max_xfer_size); 6853 } 6854 } else { 6855 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6856 0, 1) == 1) { 6857 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6858 "sd_unit_attach: un:0x%p " 6859 "Wide Transfer disabled\n", un); 6860 } 6861 } 6862 } else { 6863 un->un_tagflags = FLAG_STAG; 6864 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6865 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6866 } 6867 6868 /* 6869 * If this target supports LUN reset, try to enable it. 6870 */ 6871 if (un->un_f_lun_reset_enabled) { 6872 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6873 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6874 "un:0x%p lun_reset capability set\n", un); 6875 } else { 6876 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6877 "un:0x%p lun-reset capability not set\n", un); 6878 } 6879 } 6880 6881 /* 6882 * At this point in the attach, we have enough info in the 6883 * soft state to be able to issue commands to the target. 6884 * 6885 * All command paths used below MUST issue their commands as 6886 * SD_PATH_DIRECT. This is important as intermediate layers 6887 * are not all initialized yet (such as PM). 6888 */ 6889 6890 /* 6891 * Send a TEST UNIT READY command to the device. This should clear 6892 * any outstanding UNIT ATTENTION that may be present. 6893 * 6894 * Note: Don't check for success, just track if there is a reservation, 6895 * this is a throw away command to clear any unit attentions. 6896 * 6897 * Note: This MUST be the first command issued to the target during 6898 * attach to ensure power on UNIT ATTENTIONS are cleared. 6899 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6900 * with attempts at spinning up a device with no media. 6901 */ 6902 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6903 reservation_flag = SD_TARGET_IS_RESERVED; 6904 } 6905 6906 /* 6907 * If the device is NOT a removable media device, attempt to spin 6908 * it up (using the START_STOP_UNIT command) and read its capacity 6909 * (using the READ CAPACITY command). Note, however, that either 6910 * of these could fail and in some cases we would continue with 6911 * the attach despite the failure (see below). 6912 */ 6913 if (un->un_f_descr_format_supported) { 6914 switch (sd_spin_up_unit(un)) { 6915 case 0: 6916 /* 6917 * Spin-up was successful; now try to read the 6918 * capacity. If successful then save the results 6919 * and mark the capacity & lbasize as valid. 6920 */ 6921 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6922 "sd_unit_attach: un:0x%p spin-up successful\n", un); 6923 6924 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 6925 &lbasize, SD_PATH_DIRECT)) { 6926 case 0: { 6927 if (capacity > DK_MAX_BLOCKS) { 6928 #ifdef _LP64 6929 if (capacity + 1 > 6930 SD_GROUP1_MAX_ADDRESS) { 6931 /* 6932 * Enable descriptor format 6933 * sense data so that we can 6934 * get 64 bit sense data 6935 * fields. 6936 */ 6937 sd_enable_descr_sense(un); 6938 } 6939 #else 6940 /* 32-bit kernels can't handle this */ 6941 scsi_log(SD_DEVINFO(un), 6942 sd_label, CE_WARN, 6943 "disk has %llu blocks, which " 6944 "is too large for a 32-bit " 6945 "kernel", capacity); 6946 6947 #if defined(__i386) || defined(__amd64) 6948 /* 6949 * 1TB disk was treated as (1T - 512)B 6950 * in the past, so that it might have 6951 * valid VTOC and solaris partitions, 6952 * we have to allow it to continue to 6953 * work. 6954 */ 6955 if (capacity -1 > DK_MAX_BLOCKS) 6956 #endif 6957 goto spinup_failed; 6958 #endif 6959 } 6960 6961 /* 6962 * Here it's not necessary to check the case: 6963 * the capacity of the device is bigger than 6964 * what the max hba cdb can support. Because 6965 * sd_send_scsi_READ_CAPACITY will retrieve 6966 * the capacity by sending USCSI command, which 6967 * is constrained by the max hba cdb. Actually, 6968 * sd_send_scsi_READ_CAPACITY will return 6969 * EINVAL when using bigger cdb than required 6970 * cdb length. Will handle this case in 6971 * "case EINVAL". 6972 */ 6973 6974 /* 6975 * The following relies on 6976 * sd_send_scsi_READ_CAPACITY never 6977 * returning 0 for capacity and/or lbasize. 6978 */ 6979 sd_update_block_info(un, lbasize, capacity); 6980 6981 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6982 "sd_unit_attach: un:0x%p capacity = %ld " 6983 "blocks; lbasize= %ld.\n", un, 6984 un->un_blockcount, un->un_tgt_blocksize); 6985 6986 break; 6987 } 6988 case EINVAL: 6989 /* 6990 * In the case where the max-cdb-length property 6991 * is smaller than the required CDB length for 6992 * a SCSI device, a target driver can fail to 6993 * attach to that device. 6994 */ 6995 scsi_log(SD_DEVINFO(un), 6996 sd_label, CE_WARN, 6997 "disk capacity is too large " 6998 "for current cdb length"); 6999 goto spinup_failed; 7000 case EACCES: 7001 /* 7002 * Should never get here if the spin-up 7003 * succeeded, but code it in anyway. 7004 * From here, just continue with the attach... 7005 */ 7006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7007 "sd_unit_attach: un:0x%p " 7008 "sd_send_scsi_READ_CAPACITY " 7009 "returned reservation conflict\n", un); 7010 reservation_flag = SD_TARGET_IS_RESERVED; 7011 break; 7012 default: 7013 /* 7014 * Likewise, should never get here if the 7015 * spin-up succeeded. Just continue with 7016 * the attach... 7017 */ 7018 break; 7019 } 7020 break; 7021 case EACCES: 7022 /* 7023 * Device is reserved by another host. In this case 7024 * we could not spin it up or read the capacity, but 7025 * we continue with the attach anyway. 7026 */ 7027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7028 "sd_unit_attach: un:0x%p spin-up reservation " 7029 "conflict.\n", un); 7030 reservation_flag = SD_TARGET_IS_RESERVED; 7031 break; 7032 default: 7033 /* Fail the attach if the spin-up failed. */ 7034 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7035 "sd_unit_attach: un:0x%p spin-up failed.", un); 7036 goto spinup_failed; 7037 } 7038 } 7039 7040 /* 7041 * Check to see if this is a MMC drive 7042 */ 7043 if (ISCD(un)) { 7044 sd_set_mmc_caps(un); 7045 } 7046 7047 7048 /* 7049 * Add a zero-length attribute to tell the world we support 7050 * kernel ioctls (for layered drivers) 7051 */ 7052 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7053 DDI_KERNEL_IOCTL, NULL, 0); 7054 7055 /* 7056 * Add a boolean property to tell the world we support 7057 * the B_FAILFAST flag (for layered drivers) 7058 */ 7059 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7060 "ddi-failfast-supported", NULL, 0); 7061 7062 /* 7063 * Initialize power management 7064 */ 7065 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7066 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7067 sd_setup_pm(un, devi); 7068 if (un->un_f_pm_is_enabled == FALSE) { 7069 /* 7070 * For performance, point to a jump table that does 7071 * not include pm. 7072 * The direct and priority chains don't change with PM. 7073 * 7074 * Note: this is currently done based on individual device 7075 * capabilities. When an interface for determining system 7076 * power enabled state becomes available, or when additional 7077 * layers are added to the command chain, these values will 7078 * have to be re-evaluated for correctness. 7079 */ 7080 if (un->un_f_non_devbsize_supported) { 7081 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7082 } else { 7083 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7084 } 7085 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7086 } 7087 7088 /* 7089 * This property is set to 0 by HA software to avoid retries 7090 * on a reserved disk. (The preferred property name is 7091 * "retry-on-reservation-conflict") (1189689) 7092 * 7093 * Note: The use of a global here can have unintended consequences. A 7094 * per instance variable is preferrable to match the capabilities of 7095 * different underlying hba's (4402600) 7096 */ 7097 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7098 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7099 sd_retry_on_reservation_conflict); 7100 if (sd_retry_on_reservation_conflict != 0) { 7101 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7102 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7103 sd_retry_on_reservation_conflict); 7104 } 7105 7106 /* Set up options for QFULL handling. */ 7107 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7108 "qfull-retries", -1)) != -1) { 7109 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7110 rval, 1); 7111 } 7112 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7113 "qfull-retry-interval", -1)) != -1) { 7114 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7115 rval, 1); 7116 } 7117 7118 /* 7119 * This just prints a message that announces the existence of the 7120 * device. The message is always printed in the system logfile, but 7121 * only appears on the console if the system is booted with the 7122 * -v (verbose) argument. 7123 */ 7124 ddi_report_dev(devi); 7125 7126 un->un_mediastate = DKIO_NONE; 7127 7128 cmlb_alloc_handle(&un->un_cmlbhandle); 7129 7130 #if defined(__i386) || defined(__amd64) 7131 /* 7132 * On x86, compensate for off-by-1 legacy error 7133 */ 7134 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7135 (lbasize == un->un_sys_blocksize)) 7136 offbyone = CMLB_OFF_BY_ONE; 7137 #endif 7138 7139 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7140 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7141 un->un_node_type, offbyone, un->un_cmlbhandle, 7142 (void *)SD_PATH_DIRECT) != 0) { 7143 goto cmlb_attach_failed; 7144 } 7145 7146 7147 /* 7148 * Read and validate the device's geometry (ie, disk label) 7149 * A new unformatted drive will not have a valid geometry, but 7150 * the driver needs to successfully attach to this device so 7151 * the drive can be formatted via ioctls. 7152 */ 7153 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7154 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7155 7156 mutex_enter(SD_MUTEX(un)); 7157 7158 /* 7159 * Read and initialize the devid for the unit. 7160 */ 7161 ASSERT(un->un_errstats != NULL); 7162 if (un->un_f_devid_supported) { 7163 sd_register_devid(un, devi, reservation_flag); 7164 } 7165 mutex_exit(SD_MUTEX(un)); 7166 7167 #if (defined(__fibre)) 7168 /* 7169 * Register callbacks for fibre only. You can't do this soley 7170 * on the basis of the devid_type because this is hba specific. 7171 * We need to query our hba capabilities to find out whether to 7172 * register or not. 7173 */ 7174 if (un->un_f_is_fibre) { 7175 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7176 sd_init_event_callbacks(un); 7177 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7178 "sd_unit_attach: un:0x%p event callbacks inserted", un); 7179 } 7180 } 7181 #endif 7182 7183 if (un->un_f_opt_disable_cache == TRUE) { 7184 /* 7185 * Disable both read cache and write cache. This is 7186 * the historic behavior of the keywords in the config file. 7187 */ 7188 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7189 0) { 7190 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7191 "sd_unit_attach: un:0x%p Could not disable " 7192 "caching", un); 7193 goto devid_failed; 7194 } 7195 } 7196 7197 /* 7198 * Check the value of the WCE bit now and 7199 * set un_f_write_cache_enabled accordingly. 7200 */ 7201 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7202 mutex_enter(SD_MUTEX(un)); 7203 un->un_f_write_cache_enabled = (wc_enabled != 0); 7204 mutex_exit(SD_MUTEX(un)); 7205 7206 /* 7207 * Set the pstat and error stat values here, so data obtained during the 7208 * previous attach-time routines is available. 7209 * 7210 * Note: This is a critical sequence that needs to be maintained: 7211 * 1) Instantiate the kstats before any routines using the iopath 7212 * (i.e. sd_send_scsi_cmd). 7213 * 2) Initialize the error stats (sd_set_errstats) and partition 7214 * stats (sd_set_pstats)here, following 7215 * cmlb_validate_geometry(), sd_register_devid(), and 7216 * sd_cache_control(). 7217 */ 7218 7219 if (un->un_f_pkstats_enabled && geom_label_valid) { 7220 sd_set_pstats(un); 7221 SD_TRACE(SD_LOG_IO_PARTITION, un, 7222 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7223 } 7224 7225 sd_set_errstats(un); 7226 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7227 "sd_unit_attach: un:0x%p errstats set\n", un); 7228 7229 /* 7230 * Find out what type of reservation this disk supports. 7231 */ 7232 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7233 case 0: 7234 /* 7235 * SCSI-3 reservations are supported. 7236 */ 7237 un->un_reservation_type = SD_SCSI3_RESERVATION; 7238 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7239 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7240 break; 7241 case ENOTSUP: 7242 /* 7243 * The PERSISTENT RESERVE IN command would not be recognized by 7244 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7245 */ 7246 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7247 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7248 un->un_reservation_type = SD_SCSI2_RESERVATION; 7249 break; 7250 default: 7251 /* 7252 * default to SCSI-3 reservations 7253 */ 7254 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7255 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7256 un->un_reservation_type = SD_SCSI3_RESERVATION; 7257 break; 7258 } 7259 7260 /* 7261 * After successfully attaching an instance, we record the information 7262 * of how many luns have been attached on the relative target and 7263 * controller for parallel SCSI. This information is used when sd tries 7264 * to set the tagged queuing capability in HBA. 7265 */ 7266 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7267 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7268 } 7269 7270 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7271 "sd_unit_attach: un:0x%p exit success\n", un); 7272 7273 return (DDI_SUCCESS); 7274 7275 /* 7276 * An error occurred during the attach; clean up & return failure. 7277 */ 7278 7279 devid_failed: 7280 7281 setup_pm_failed: 7282 ddi_remove_minor_node(devi, NULL); 7283 7284 cmlb_attach_failed: 7285 /* 7286 * Cleanup from the scsi_ifsetcap() calls (437868) 7287 */ 7288 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7289 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7290 7291 /* 7292 * Refer to the comments of setting tagged-qing in the beginning of 7293 * sd_unit_attach. We can only disable tagged queuing when there is 7294 * no lun attached on the target. 7295 */ 7296 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7297 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7298 } 7299 7300 if (un->un_f_is_fibre == FALSE) { 7301 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7302 } 7303 7304 spinup_failed: 7305 7306 mutex_enter(SD_MUTEX(un)); 7307 7308 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7309 if (un->un_direct_priority_timeid != NULL) { 7310 timeout_id_t temp_id = un->un_direct_priority_timeid; 7311 un->un_direct_priority_timeid = NULL; 7312 mutex_exit(SD_MUTEX(un)); 7313 (void) untimeout(temp_id); 7314 mutex_enter(SD_MUTEX(un)); 7315 } 7316 7317 /* Cancel any pending start/stop timeouts */ 7318 if (un->un_startstop_timeid != NULL) { 7319 timeout_id_t temp_id = un->un_startstop_timeid; 7320 un->un_startstop_timeid = NULL; 7321 mutex_exit(SD_MUTEX(un)); 7322 (void) untimeout(temp_id); 7323 mutex_enter(SD_MUTEX(un)); 7324 } 7325 7326 /* Cancel any pending reset-throttle timeouts */ 7327 if (un->un_reset_throttle_timeid != NULL) { 7328 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7329 un->un_reset_throttle_timeid = NULL; 7330 mutex_exit(SD_MUTEX(un)); 7331 (void) untimeout(temp_id); 7332 mutex_enter(SD_MUTEX(un)); 7333 } 7334 7335 /* Cancel any pending retry timeouts */ 7336 if (un->un_retry_timeid != NULL) { 7337 timeout_id_t temp_id = un->un_retry_timeid; 7338 un->un_retry_timeid = NULL; 7339 mutex_exit(SD_MUTEX(un)); 7340 (void) untimeout(temp_id); 7341 mutex_enter(SD_MUTEX(un)); 7342 } 7343 7344 /* Cancel any pending delayed cv broadcast timeouts */ 7345 if (un->un_dcvb_timeid != NULL) { 7346 timeout_id_t temp_id = un->un_dcvb_timeid; 7347 un->un_dcvb_timeid = NULL; 7348 mutex_exit(SD_MUTEX(un)); 7349 (void) untimeout(temp_id); 7350 mutex_enter(SD_MUTEX(un)); 7351 } 7352 7353 mutex_exit(SD_MUTEX(un)); 7354 7355 /* There should not be any in-progress I/O so ASSERT this check */ 7356 ASSERT(un->un_ncmds_in_transport == 0); 7357 ASSERT(un->un_ncmds_in_driver == 0); 7358 7359 /* Do not free the softstate if the callback routine is active */ 7360 sd_sync_with_callback(un); 7361 7362 /* 7363 * Partition stats apparently are not used with removables. These would 7364 * not have been created during attach, so no need to clean them up... 7365 */ 7366 if (un->un_stats != NULL) { 7367 kstat_delete(un->un_stats); 7368 un->un_stats = NULL; 7369 } 7370 if (un->un_errstats != NULL) { 7371 kstat_delete(un->un_errstats); 7372 un->un_errstats = NULL; 7373 } 7374 7375 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7376 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7377 7378 ddi_prop_remove_all(devi); 7379 sema_destroy(&un->un_semoclose); 7380 cv_destroy(&un->un_state_cv); 7381 7382 getrbuf_failed: 7383 7384 sd_free_rqs(un); 7385 7386 alloc_rqs_failed: 7387 7388 devp->sd_private = NULL; 7389 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7390 7391 get_softstate_failed: 7392 /* 7393 * Note: the man pages are unclear as to whether or not doing a 7394 * ddi_soft_state_free(sd_state, instance) is the right way to 7395 * clean up after the ddi_soft_state_zalloc() if the subsequent 7396 * ddi_get_soft_state() fails. The implication seems to be 7397 * that the get_soft_state cannot fail if the zalloc succeeds. 7398 */ 7399 ddi_soft_state_free(sd_state, instance); 7400 7401 probe_failed: 7402 scsi_unprobe(devp); 7403 #ifdef SDDEBUG 7404 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7405 (sd_level_mask & SD_LOGMASK_TRACE)) { 7406 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7407 (void *)un); 7408 } 7409 #endif 7410 return (DDI_FAILURE); 7411 } 7412 7413 7414 /* 7415 * Function: sd_unit_detach 7416 * 7417 * Description: Performs DDI_DETACH processing for sddetach(). 7418 * 7419 * Return Code: DDI_SUCCESS 7420 * DDI_FAILURE 7421 * 7422 * Context: Kernel thread context 7423 */ 7424 7425 static int 7426 sd_unit_detach(dev_info_t *devi) 7427 { 7428 struct scsi_device *devp; 7429 struct sd_lun *un; 7430 int i; 7431 int tgt; 7432 dev_t dev; 7433 dev_info_t *pdip = ddi_get_parent(devi); 7434 int instance = ddi_get_instance(devi); 7435 7436 mutex_enter(&sd_detach_mutex); 7437 7438 /* 7439 * Fail the detach for any of the following: 7440 * - Unable to get the sd_lun struct for the instance 7441 * - A layered driver has an outstanding open on the instance 7442 * - Another thread is already detaching this instance 7443 * - Another thread is currently performing an open 7444 */ 7445 devp = ddi_get_driver_private(devi); 7446 if ((devp == NULL) || 7447 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7448 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7449 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7450 mutex_exit(&sd_detach_mutex); 7451 return (DDI_FAILURE); 7452 } 7453 7454 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7455 7456 /* 7457 * Mark this instance as currently in a detach, to inhibit any 7458 * opens from a layered driver. 7459 */ 7460 un->un_detach_count++; 7461 mutex_exit(&sd_detach_mutex); 7462 7463 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7464 SCSI_ADDR_PROP_TARGET, -1); 7465 7466 dev = sd_make_device(SD_DEVINFO(un)); 7467 7468 #ifndef lint 7469 _NOTE(COMPETING_THREADS_NOW); 7470 #endif 7471 7472 mutex_enter(SD_MUTEX(un)); 7473 7474 /* 7475 * Fail the detach if there are any outstanding layered 7476 * opens on this device. 7477 */ 7478 for (i = 0; i < NDKMAP; i++) { 7479 if (un->un_ocmap.lyropen[i] != 0) { 7480 goto err_notclosed; 7481 } 7482 } 7483 7484 /* 7485 * Verify there are NO outstanding commands issued to this device. 7486 * ie, un_ncmds_in_transport == 0. 7487 * It's possible to have outstanding commands through the physio 7488 * code path, even though everything's closed. 7489 */ 7490 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7491 (un->un_direct_priority_timeid != NULL) || 7492 (un->un_state == SD_STATE_RWAIT)) { 7493 mutex_exit(SD_MUTEX(un)); 7494 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7495 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7496 goto err_stillbusy; 7497 } 7498 7499 /* 7500 * If we have the device reserved, release the reservation. 7501 */ 7502 if ((un->un_resvd_status & SD_RESERVE) && 7503 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7504 mutex_exit(SD_MUTEX(un)); 7505 /* 7506 * Note: sd_reserve_release sends a command to the device 7507 * via the sd_ioctlcmd() path, and can sleep. 7508 */ 7509 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7510 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7511 "sd_dr_detach: Cannot release reservation \n"); 7512 } 7513 } else { 7514 mutex_exit(SD_MUTEX(un)); 7515 } 7516 7517 /* 7518 * Untimeout any reserve recover, throttle reset, restart unit 7519 * and delayed broadcast timeout threads. Protect the timeout pointer 7520 * from getting nulled by their callback functions. 7521 */ 7522 mutex_enter(SD_MUTEX(un)); 7523 if (un->un_resvd_timeid != NULL) { 7524 timeout_id_t temp_id = un->un_resvd_timeid; 7525 un->un_resvd_timeid = NULL; 7526 mutex_exit(SD_MUTEX(un)); 7527 (void) untimeout(temp_id); 7528 mutex_enter(SD_MUTEX(un)); 7529 } 7530 7531 if (un->un_reset_throttle_timeid != NULL) { 7532 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7533 un->un_reset_throttle_timeid = NULL; 7534 mutex_exit(SD_MUTEX(un)); 7535 (void) untimeout(temp_id); 7536 mutex_enter(SD_MUTEX(un)); 7537 } 7538 7539 if (un->un_startstop_timeid != NULL) { 7540 timeout_id_t temp_id = un->un_startstop_timeid; 7541 un->un_startstop_timeid = NULL; 7542 mutex_exit(SD_MUTEX(un)); 7543 (void) untimeout(temp_id); 7544 mutex_enter(SD_MUTEX(un)); 7545 } 7546 7547 if (un->un_dcvb_timeid != NULL) { 7548 timeout_id_t temp_id = un->un_dcvb_timeid; 7549 un->un_dcvb_timeid = NULL; 7550 mutex_exit(SD_MUTEX(un)); 7551 (void) untimeout(temp_id); 7552 } else { 7553 mutex_exit(SD_MUTEX(un)); 7554 } 7555 7556 /* Remove any pending reservation reclaim requests for this device */ 7557 sd_rmv_resv_reclaim_req(dev); 7558 7559 mutex_enter(SD_MUTEX(un)); 7560 7561 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7562 if (un->un_direct_priority_timeid != NULL) { 7563 timeout_id_t temp_id = un->un_direct_priority_timeid; 7564 un->un_direct_priority_timeid = NULL; 7565 mutex_exit(SD_MUTEX(un)); 7566 (void) untimeout(temp_id); 7567 mutex_enter(SD_MUTEX(un)); 7568 } 7569 7570 /* Cancel any active multi-host disk watch thread requests */ 7571 if (un->un_mhd_token != NULL) { 7572 mutex_exit(SD_MUTEX(un)); 7573 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7574 if (scsi_watch_request_terminate(un->un_mhd_token, 7575 SCSI_WATCH_TERMINATE_NOWAIT)) { 7576 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7577 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7578 /* 7579 * Note: We are returning here after having removed 7580 * some driver timeouts above. This is consistent with 7581 * the legacy implementation but perhaps the watch 7582 * terminate call should be made with the wait flag set. 7583 */ 7584 goto err_stillbusy; 7585 } 7586 mutex_enter(SD_MUTEX(un)); 7587 un->un_mhd_token = NULL; 7588 } 7589 7590 if (un->un_swr_token != NULL) { 7591 mutex_exit(SD_MUTEX(un)); 7592 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7593 if (scsi_watch_request_terminate(un->un_swr_token, 7594 SCSI_WATCH_TERMINATE_NOWAIT)) { 7595 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7596 "sd_dr_detach: Cannot cancel swr watch request\n"); 7597 /* 7598 * Note: We are returning here after having removed 7599 * some driver timeouts above. This is consistent with 7600 * the legacy implementation but perhaps the watch 7601 * terminate call should be made with the wait flag set. 7602 */ 7603 goto err_stillbusy; 7604 } 7605 mutex_enter(SD_MUTEX(un)); 7606 un->un_swr_token = NULL; 7607 } 7608 7609 mutex_exit(SD_MUTEX(un)); 7610 7611 /* 7612 * Clear any scsi_reset_notifies. We clear the reset notifies 7613 * if we have not registered one. 7614 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7615 */ 7616 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7617 sd_mhd_reset_notify_cb, (caddr_t)un); 7618 7619 /* 7620 * protect the timeout pointers from getting nulled by 7621 * their callback functions during the cancellation process. 7622 * In such a scenario untimeout can be invoked with a null value. 7623 */ 7624 _NOTE(NO_COMPETING_THREADS_NOW); 7625 7626 mutex_enter(&un->un_pm_mutex); 7627 if (un->un_pm_idle_timeid != NULL) { 7628 timeout_id_t temp_id = un->un_pm_idle_timeid; 7629 un->un_pm_idle_timeid = NULL; 7630 mutex_exit(&un->un_pm_mutex); 7631 7632 /* 7633 * Timeout is active; cancel it. 7634 * Note that it'll never be active on a device 7635 * that does not support PM therefore we don't 7636 * have to check before calling pm_idle_component. 7637 */ 7638 (void) untimeout(temp_id); 7639 (void) pm_idle_component(SD_DEVINFO(un), 0); 7640 mutex_enter(&un->un_pm_mutex); 7641 } 7642 7643 /* 7644 * Check whether there is already a timeout scheduled for power 7645 * management. If yes then don't lower the power here, that's. 7646 * the timeout handler's job. 7647 */ 7648 if (un->un_pm_timeid != NULL) { 7649 timeout_id_t temp_id = un->un_pm_timeid; 7650 un->un_pm_timeid = NULL; 7651 mutex_exit(&un->un_pm_mutex); 7652 /* 7653 * Timeout is active; cancel it. 7654 * Note that it'll never be active on a device 7655 * that does not support PM therefore we don't 7656 * have to check before calling pm_idle_component. 7657 */ 7658 (void) untimeout(temp_id); 7659 (void) pm_idle_component(SD_DEVINFO(un), 0); 7660 7661 } else { 7662 mutex_exit(&un->un_pm_mutex); 7663 if ((un->un_f_pm_is_enabled == TRUE) && 7664 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7665 DDI_SUCCESS)) { 7666 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7667 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7668 /* 7669 * Fix for bug: 4297749, item # 13 7670 * The above test now includes a check to see if PM is 7671 * supported by this device before call 7672 * pm_lower_power(). 7673 * Note, the following is not dead code. The call to 7674 * pm_lower_power above will generate a call back into 7675 * our sdpower routine which might result in a timeout 7676 * handler getting activated. Therefore the following 7677 * code is valid and necessary. 7678 */ 7679 mutex_enter(&un->un_pm_mutex); 7680 if (un->un_pm_timeid != NULL) { 7681 timeout_id_t temp_id = un->un_pm_timeid; 7682 un->un_pm_timeid = NULL; 7683 mutex_exit(&un->un_pm_mutex); 7684 (void) untimeout(temp_id); 7685 (void) pm_idle_component(SD_DEVINFO(un), 0); 7686 } else { 7687 mutex_exit(&un->un_pm_mutex); 7688 } 7689 } 7690 } 7691 7692 /* 7693 * Cleanup from the scsi_ifsetcap() calls (437868) 7694 * Relocated here from above to be after the call to 7695 * pm_lower_power, which was getting errors. 7696 */ 7697 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7698 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7699 7700 /* 7701 * Currently, tagged queuing is supported per target based by HBA. 7702 * Setting this per lun instance actually sets the capability of this 7703 * target in HBA, which affects those luns already attached on the 7704 * same target. So during detach, we can only disable this capability 7705 * only when this is the only lun left on this target. By doing 7706 * this, we assume a target has the same tagged queuing capability 7707 * for every lun. The condition can be removed when HBA is changed to 7708 * support per lun based tagged queuing capability. 7709 */ 7710 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7711 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7712 } 7713 7714 if (un->un_f_is_fibre == FALSE) { 7715 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7716 } 7717 7718 /* 7719 * Remove any event callbacks, fibre only 7720 */ 7721 if (un->un_f_is_fibre == TRUE) { 7722 if ((un->un_insert_event != NULL) && 7723 (ddi_remove_event_handler(un->un_insert_cb_id) != 7724 DDI_SUCCESS)) { 7725 /* 7726 * Note: We are returning here after having done 7727 * substantial cleanup above. This is consistent 7728 * with the legacy implementation but this may not 7729 * be the right thing to do. 7730 */ 7731 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7732 "sd_dr_detach: Cannot cancel insert event\n"); 7733 goto err_remove_event; 7734 } 7735 un->un_insert_event = NULL; 7736 7737 if ((un->un_remove_event != NULL) && 7738 (ddi_remove_event_handler(un->un_remove_cb_id) != 7739 DDI_SUCCESS)) { 7740 /* 7741 * Note: We are returning here after having done 7742 * substantial cleanup above. This is consistent 7743 * with the legacy implementation but this may not 7744 * be the right thing to do. 7745 */ 7746 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7747 "sd_dr_detach: Cannot cancel remove event\n"); 7748 goto err_remove_event; 7749 } 7750 un->un_remove_event = NULL; 7751 } 7752 7753 /* Do not free the softstate if the callback routine is active */ 7754 sd_sync_with_callback(un); 7755 7756 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7757 cmlb_free_handle(&un->un_cmlbhandle); 7758 7759 /* 7760 * Hold the detach mutex here, to make sure that no other threads ever 7761 * can access a (partially) freed soft state structure. 7762 */ 7763 mutex_enter(&sd_detach_mutex); 7764 7765 /* 7766 * Clean up the soft state struct. 7767 * Cleanup is done in reverse order of allocs/inits. 7768 * At this point there should be no competing threads anymore. 7769 */ 7770 7771 /* Unregister and free device id. */ 7772 ddi_devid_unregister(devi); 7773 if (un->un_devid) { 7774 ddi_devid_free(un->un_devid); 7775 un->un_devid = NULL; 7776 } 7777 7778 /* 7779 * Destroy wmap cache if it exists. 7780 */ 7781 if (un->un_wm_cache != NULL) { 7782 kmem_cache_destroy(un->un_wm_cache); 7783 un->un_wm_cache = NULL; 7784 } 7785 7786 /* 7787 * kstat cleanup is done in detach for all device types (4363169). 7788 * We do not want to fail detach if the device kstats are not deleted 7789 * since there is a confusion about the devo_refcnt for the device. 7790 * We just delete the kstats and let detach complete successfully. 7791 */ 7792 if (un->un_stats != NULL) { 7793 kstat_delete(un->un_stats); 7794 un->un_stats = NULL; 7795 } 7796 if (un->un_errstats != NULL) { 7797 kstat_delete(un->un_errstats); 7798 un->un_errstats = NULL; 7799 } 7800 7801 /* Remove partition stats */ 7802 if (un->un_f_pkstats_enabled) { 7803 for (i = 0; i < NSDMAP; i++) { 7804 if (un->un_pstats[i] != NULL) { 7805 kstat_delete(un->un_pstats[i]); 7806 un->un_pstats[i] = NULL; 7807 } 7808 } 7809 } 7810 7811 /* Remove xbuf registration */ 7812 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7813 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7814 7815 /* Remove driver properties */ 7816 ddi_prop_remove_all(devi); 7817 7818 mutex_destroy(&un->un_pm_mutex); 7819 cv_destroy(&un->un_pm_busy_cv); 7820 7821 cv_destroy(&un->un_wcc_cv); 7822 7823 /* Open/close semaphore */ 7824 sema_destroy(&un->un_semoclose); 7825 7826 /* Removable media condvar. */ 7827 cv_destroy(&un->un_state_cv); 7828 7829 /* Suspend/resume condvar. */ 7830 cv_destroy(&un->un_suspend_cv); 7831 cv_destroy(&un->un_disk_busy_cv); 7832 7833 sd_free_rqs(un); 7834 7835 /* Free up soft state */ 7836 devp->sd_private = NULL; 7837 7838 bzero(un, sizeof (struct sd_lun)); 7839 ddi_soft_state_free(sd_state, instance); 7840 7841 mutex_exit(&sd_detach_mutex); 7842 7843 /* This frees up the INQUIRY data associated with the device. */ 7844 scsi_unprobe(devp); 7845 7846 /* 7847 * After successfully detaching an instance, we update the information 7848 * of how many luns have been attached in the relative target and 7849 * controller for parallel SCSI. This information is used when sd tries 7850 * to set the tagged queuing capability in HBA. 7851 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7852 * check if the device is parallel SCSI. However, we don't need to 7853 * check here because we've already checked during attach. No device 7854 * that is not parallel SCSI is in the chain. 7855 */ 7856 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7857 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7858 } 7859 7860 return (DDI_SUCCESS); 7861 7862 err_notclosed: 7863 mutex_exit(SD_MUTEX(un)); 7864 7865 err_stillbusy: 7866 _NOTE(NO_COMPETING_THREADS_NOW); 7867 7868 err_remove_event: 7869 mutex_enter(&sd_detach_mutex); 7870 un->un_detach_count--; 7871 mutex_exit(&sd_detach_mutex); 7872 7873 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7874 return (DDI_FAILURE); 7875 } 7876 7877 7878 /* 7879 * Function: sd_create_errstats 7880 * 7881 * Description: This routine instantiates the device error stats. 7882 * 7883 * Note: During attach the stats are instantiated first so they are 7884 * available for attach-time routines that utilize the driver 7885 * iopath to send commands to the device. The stats are initialized 7886 * separately so data obtained during some attach-time routines is 7887 * available. (4362483) 7888 * 7889 * Arguments: un - driver soft state (unit) structure 7890 * instance - driver instance 7891 * 7892 * Context: Kernel thread context 7893 */ 7894 7895 static void 7896 sd_create_errstats(struct sd_lun *un, int instance) 7897 { 7898 struct sd_errstats *stp; 7899 char kstatmodule_err[KSTAT_STRLEN]; 7900 char kstatname[KSTAT_STRLEN]; 7901 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7902 7903 ASSERT(un != NULL); 7904 7905 if (un->un_errstats != NULL) { 7906 return; 7907 } 7908 7909 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7910 "%serr", sd_label); 7911 (void) snprintf(kstatname, sizeof (kstatname), 7912 "%s%d,err", sd_label, instance); 7913 7914 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 7915 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 7916 7917 if (un->un_errstats == NULL) { 7918 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7919 "sd_create_errstats: Failed kstat_create\n"); 7920 return; 7921 } 7922 7923 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7924 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 7925 KSTAT_DATA_UINT32); 7926 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 7927 KSTAT_DATA_UINT32); 7928 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 7929 KSTAT_DATA_UINT32); 7930 kstat_named_init(&stp->sd_vid, "Vendor", 7931 KSTAT_DATA_CHAR); 7932 kstat_named_init(&stp->sd_pid, "Product", 7933 KSTAT_DATA_CHAR); 7934 kstat_named_init(&stp->sd_revision, "Revision", 7935 KSTAT_DATA_CHAR); 7936 kstat_named_init(&stp->sd_serial, "Serial No", 7937 KSTAT_DATA_CHAR); 7938 kstat_named_init(&stp->sd_capacity, "Size", 7939 KSTAT_DATA_ULONGLONG); 7940 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 7941 KSTAT_DATA_UINT32); 7942 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 7943 KSTAT_DATA_UINT32); 7944 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 7945 KSTAT_DATA_UINT32); 7946 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 7947 KSTAT_DATA_UINT32); 7948 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 7949 KSTAT_DATA_UINT32); 7950 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 7951 KSTAT_DATA_UINT32); 7952 7953 un->un_errstats->ks_private = un; 7954 un->un_errstats->ks_update = nulldev; 7955 7956 kstat_install(un->un_errstats); 7957 } 7958 7959 7960 /* 7961 * Function: sd_set_errstats 7962 * 7963 * Description: This routine sets the value of the vendor id, product id, 7964 * revision, serial number, and capacity device error stats. 7965 * 7966 * Note: During attach the stats are instantiated first so they are 7967 * available for attach-time routines that utilize the driver 7968 * iopath to send commands to the device. The stats are initialized 7969 * separately so data obtained during some attach-time routines is 7970 * available. (4362483) 7971 * 7972 * Arguments: un - driver soft state (unit) structure 7973 * 7974 * Context: Kernel thread context 7975 */ 7976 7977 static void 7978 sd_set_errstats(struct sd_lun *un) 7979 { 7980 struct sd_errstats *stp; 7981 7982 ASSERT(un != NULL); 7983 ASSERT(un->un_errstats != NULL); 7984 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7985 ASSERT(stp != NULL); 7986 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 7987 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 7988 (void) strncpy(stp->sd_revision.value.c, 7989 un->un_sd->sd_inq->inq_revision, 4); 7990 7991 /* 7992 * All the errstats are persistent across detach/attach, 7993 * so reset all the errstats here in case of the hot 7994 * replacement of disk drives, except for not changed 7995 * Sun qualified drives. 7996 */ 7997 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 7998 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 7999 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8000 stp->sd_softerrs.value.ui32 = 0; 8001 stp->sd_harderrs.value.ui32 = 0; 8002 stp->sd_transerrs.value.ui32 = 0; 8003 stp->sd_rq_media_err.value.ui32 = 0; 8004 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8005 stp->sd_rq_nodev_err.value.ui32 = 0; 8006 stp->sd_rq_recov_err.value.ui32 = 0; 8007 stp->sd_rq_illrq_err.value.ui32 = 0; 8008 stp->sd_rq_pfa_err.value.ui32 = 0; 8009 } 8010 8011 /* 8012 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8013 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8014 * (4376302)) 8015 */ 8016 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8017 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8018 sizeof (SD_INQUIRY(un)->inq_serial)); 8019 } 8020 8021 if (un->un_f_blockcount_is_valid != TRUE) { 8022 /* 8023 * Set capacity error stat to 0 for no media. This ensures 8024 * a valid capacity is displayed in response to 'iostat -E' 8025 * when no media is present in the device. 8026 */ 8027 stp->sd_capacity.value.ui64 = 0; 8028 } else { 8029 /* 8030 * Multiply un_blockcount by un->un_sys_blocksize to get 8031 * capacity. 8032 * 8033 * Note: for non-512 blocksize devices "un_blockcount" has been 8034 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8035 * (un_tgt_blocksize / un->un_sys_blocksize). 8036 */ 8037 stp->sd_capacity.value.ui64 = (uint64_t) 8038 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8039 } 8040 } 8041 8042 8043 /* 8044 * Function: sd_set_pstats 8045 * 8046 * Description: This routine instantiates and initializes the partition 8047 * stats for each partition with more than zero blocks. 8048 * (4363169) 8049 * 8050 * Arguments: un - driver soft state (unit) structure 8051 * 8052 * Context: Kernel thread context 8053 */ 8054 8055 static void 8056 sd_set_pstats(struct sd_lun *un) 8057 { 8058 char kstatname[KSTAT_STRLEN]; 8059 int instance; 8060 int i; 8061 diskaddr_t nblks = 0; 8062 char *partname = NULL; 8063 8064 ASSERT(un != NULL); 8065 8066 instance = ddi_get_instance(SD_DEVINFO(un)); 8067 8068 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8069 for (i = 0; i < NSDMAP; i++) { 8070 8071 if (cmlb_partinfo(un->un_cmlbhandle, i, 8072 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8073 continue; 8074 mutex_enter(SD_MUTEX(un)); 8075 8076 if ((un->un_pstats[i] == NULL) && 8077 (nblks != 0)) { 8078 8079 (void) snprintf(kstatname, sizeof (kstatname), 8080 "%s%d,%s", sd_label, instance, 8081 partname); 8082 8083 un->un_pstats[i] = kstat_create(sd_label, 8084 instance, kstatname, "partition", KSTAT_TYPE_IO, 8085 1, KSTAT_FLAG_PERSISTENT); 8086 if (un->un_pstats[i] != NULL) { 8087 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8088 kstat_install(un->un_pstats[i]); 8089 } 8090 } 8091 mutex_exit(SD_MUTEX(un)); 8092 } 8093 } 8094 8095 8096 #if (defined(__fibre)) 8097 /* 8098 * Function: sd_init_event_callbacks 8099 * 8100 * Description: This routine initializes the insertion and removal event 8101 * callbacks. (fibre only) 8102 * 8103 * Arguments: un - driver soft state (unit) structure 8104 * 8105 * Context: Kernel thread context 8106 */ 8107 8108 static void 8109 sd_init_event_callbacks(struct sd_lun *un) 8110 { 8111 ASSERT(un != NULL); 8112 8113 if ((un->un_insert_event == NULL) && 8114 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8115 &un->un_insert_event) == DDI_SUCCESS)) { 8116 /* 8117 * Add the callback for an insertion event 8118 */ 8119 (void) ddi_add_event_handler(SD_DEVINFO(un), 8120 un->un_insert_event, sd_event_callback, (void *)un, 8121 &(un->un_insert_cb_id)); 8122 } 8123 8124 if ((un->un_remove_event == NULL) && 8125 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8126 &un->un_remove_event) == DDI_SUCCESS)) { 8127 /* 8128 * Add the callback for a removal event 8129 */ 8130 (void) ddi_add_event_handler(SD_DEVINFO(un), 8131 un->un_remove_event, sd_event_callback, (void *)un, 8132 &(un->un_remove_cb_id)); 8133 } 8134 } 8135 8136 8137 /* 8138 * Function: sd_event_callback 8139 * 8140 * Description: This routine handles insert/remove events (photon). The 8141 * state is changed to OFFLINE which can be used to supress 8142 * error msgs. (fibre only) 8143 * 8144 * Arguments: un - driver soft state (unit) structure 8145 * 8146 * Context: Callout thread context 8147 */ 8148 /* ARGSUSED */ 8149 static void 8150 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8151 void *bus_impldata) 8152 { 8153 struct sd_lun *un = (struct sd_lun *)arg; 8154 8155 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8156 if (event == un->un_insert_event) { 8157 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8158 mutex_enter(SD_MUTEX(un)); 8159 if (un->un_state == SD_STATE_OFFLINE) { 8160 if (un->un_last_state != SD_STATE_SUSPENDED) { 8161 un->un_state = un->un_last_state; 8162 } else { 8163 /* 8164 * We have gone through SUSPEND/RESUME while 8165 * we were offline. Restore the last state 8166 */ 8167 un->un_state = un->un_save_state; 8168 } 8169 } 8170 mutex_exit(SD_MUTEX(un)); 8171 8172 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8173 } else if (event == un->un_remove_event) { 8174 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8175 mutex_enter(SD_MUTEX(un)); 8176 /* 8177 * We need to handle an event callback that occurs during 8178 * the suspend operation, since we don't prevent it. 8179 */ 8180 if (un->un_state != SD_STATE_OFFLINE) { 8181 if (un->un_state != SD_STATE_SUSPENDED) { 8182 New_state(un, SD_STATE_OFFLINE); 8183 } else { 8184 un->un_last_state = SD_STATE_OFFLINE; 8185 } 8186 } 8187 mutex_exit(SD_MUTEX(un)); 8188 } else { 8189 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8190 "!Unknown event\n"); 8191 } 8192 8193 } 8194 #endif 8195 8196 /* 8197 * Function: sd_cache_control() 8198 * 8199 * Description: This routine is the driver entry point for setting 8200 * read and write caching by modifying the WCE (write cache 8201 * enable) and RCD (read cache disable) bits of mode 8202 * page 8 (MODEPAGE_CACHING). 8203 * 8204 * Arguments: un - driver soft state (unit) structure 8205 * rcd_flag - flag for controlling the read cache 8206 * wce_flag - flag for controlling the write cache 8207 * 8208 * Return Code: EIO 8209 * code returned by sd_send_scsi_MODE_SENSE and 8210 * sd_send_scsi_MODE_SELECT 8211 * 8212 * Context: Kernel Thread 8213 */ 8214 8215 static int 8216 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8217 { 8218 struct mode_caching *mode_caching_page; 8219 uchar_t *header; 8220 size_t buflen; 8221 int hdrlen; 8222 int bd_len; 8223 int rval = 0; 8224 struct mode_header_grp2 *mhp; 8225 8226 ASSERT(un != NULL); 8227 8228 /* 8229 * Do a test unit ready, otherwise a mode sense may not work if this 8230 * is the first command sent to the device after boot. 8231 */ 8232 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8233 8234 if (un->un_f_cfg_is_atapi == TRUE) { 8235 hdrlen = MODE_HEADER_LENGTH_GRP2; 8236 } else { 8237 hdrlen = MODE_HEADER_LENGTH; 8238 } 8239 8240 /* 8241 * Allocate memory for the retrieved mode page and its headers. Set 8242 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8243 * we get all of the mode sense data otherwise, the mode select 8244 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8245 */ 8246 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8247 sizeof (struct mode_cache_scsi3); 8248 8249 header = kmem_zalloc(buflen, KM_SLEEP); 8250 8251 /* Get the information from the device. */ 8252 if (un->un_f_cfg_is_atapi == TRUE) { 8253 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8254 MODEPAGE_CACHING, SD_PATH_DIRECT); 8255 } else { 8256 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8257 MODEPAGE_CACHING, SD_PATH_DIRECT); 8258 } 8259 if (rval != 0) { 8260 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8261 "sd_cache_control: Mode Sense Failed\n"); 8262 kmem_free(header, buflen); 8263 return (rval); 8264 } 8265 8266 /* 8267 * Determine size of Block Descriptors in order to locate 8268 * the mode page data. ATAPI devices return 0, SCSI devices 8269 * should return MODE_BLK_DESC_LENGTH. 8270 */ 8271 if (un->un_f_cfg_is_atapi == TRUE) { 8272 mhp = (struct mode_header_grp2 *)header; 8273 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8274 } else { 8275 bd_len = ((struct mode_header *)header)->bdesc_length; 8276 } 8277 8278 if (bd_len > MODE_BLK_DESC_LENGTH) { 8279 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8280 "sd_cache_control: Mode Sense returned invalid " 8281 "block descriptor length\n"); 8282 kmem_free(header, buflen); 8283 return (EIO); 8284 } 8285 8286 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8287 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8288 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8289 " caching page code mismatch %d\n", 8290 mode_caching_page->mode_page.code); 8291 kmem_free(header, buflen); 8292 return (EIO); 8293 } 8294 8295 /* Check the relevant bits on successful mode sense. */ 8296 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8297 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8298 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8299 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8300 8301 size_t sbuflen; 8302 uchar_t save_pg; 8303 8304 /* 8305 * Construct select buffer length based on the 8306 * length of the sense data returned. 8307 */ 8308 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8309 sizeof (struct mode_page) + 8310 (int)mode_caching_page->mode_page.length; 8311 8312 /* 8313 * Set the caching bits as requested. 8314 */ 8315 if (rcd_flag == SD_CACHE_ENABLE) 8316 mode_caching_page->rcd = 0; 8317 else if (rcd_flag == SD_CACHE_DISABLE) 8318 mode_caching_page->rcd = 1; 8319 8320 if (wce_flag == SD_CACHE_ENABLE) 8321 mode_caching_page->wce = 1; 8322 else if (wce_flag == SD_CACHE_DISABLE) 8323 mode_caching_page->wce = 0; 8324 8325 /* 8326 * Save the page if the mode sense says the 8327 * drive supports it. 8328 */ 8329 save_pg = mode_caching_page->mode_page.ps ? 8330 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8331 8332 /* Clear reserved bits before mode select. */ 8333 mode_caching_page->mode_page.ps = 0; 8334 8335 /* 8336 * Clear out mode header for mode select. 8337 * The rest of the retrieved page will be reused. 8338 */ 8339 bzero(header, hdrlen); 8340 8341 if (un->un_f_cfg_is_atapi == TRUE) { 8342 mhp = (struct mode_header_grp2 *)header; 8343 mhp->bdesc_length_hi = bd_len >> 8; 8344 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8345 } else { 8346 ((struct mode_header *)header)->bdesc_length = bd_len; 8347 } 8348 8349 /* Issue mode select to change the cache settings */ 8350 if (un->un_f_cfg_is_atapi == TRUE) { 8351 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8352 sbuflen, save_pg, SD_PATH_DIRECT); 8353 } else { 8354 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8355 sbuflen, save_pg, SD_PATH_DIRECT); 8356 } 8357 } 8358 8359 kmem_free(header, buflen); 8360 return (rval); 8361 } 8362 8363 8364 /* 8365 * Function: sd_get_write_cache_enabled() 8366 * 8367 * Description: This routine is the driver entry point for determining if 8368 * write caching is enabled. It examines the WCE (write cache 8369 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8370 * 8371 * Arguments: un - driver soft state (unit) structure 8372 * is_enabled - pointer to int where write cache enabled state 8373 * is returned (non-zero -> write cache enabled) 8374 * 8375 * 8376 * Return Code: EIO 8377 * code returned by sd_send_scsi_MODE_SENSE 8378 * 8379 * Context: Kernel Thread 8380 * 8381 * NOTE: If ioctl is added to disable write cache, this sequence should 8382 * be followed so that no locking is required for accesses to 8383 * un->un_f_write_cache_enabled: 8384 * do mode select to clear wce 8385 * do synchronize cache to flush cache 8386 * set un->un_f_write_cache_enabled = FALSE 8387 * 8388 * Conversely, an ioctl to enable the write cache should be done 8389 * in this order: 8390 * set un->un_f_write_cache_enabled = TRUE 8391 * do mode select to set wce 8392 */ 8393 8394 static int 8395 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8396 { 8397 struct mode_caching *mode_caching_page; 8398 uchar_t *header; 8399 size_t buflen; 8400 int hdrlen; 8401 int bd_len; 8402 int rval = 0; 8403 8404 ASSERT(un != NULL); 8405 ASSERT(is_enabled != NULL); 8406 8407 /* in case of error, flag as enabled */ 8408 *is_enabled = TRUE; 8409 8410 /* 8411 * Do a test unit ready, otherwise a mode sense may not work if this 8412 * is the first command sent to the device after boot. 8413 */ 8414 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8415 8416 if (un->un_f_cfg_is_atapi == TRUE) { 8417 hdrlen = MODE_HEADER_LENGTH_GRP2; 8418 } else { 8419 hdrlen = MODE_HEADER_LENGTH; 8420 } 8421 8422 /* 8423 * Allocate memory for the retrieved mode page and its headers. Set 8424 * a pointer to the page itself. 8425 */ 8426 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8427 header = kmem_zalloc(buflen, KM_SLEEP); 8428 8429 /* Get the information from the device. */ 8430 if (un->un_f_cfg_is_atapi == TRUE) { 8431 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8432 MODEPAGE_CACHING, SD_PATH_DIRECT); 8433 } else { 8434 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8435 MODEPAGE_CACHING, SD_PATH_DIRECT); 8436 } 8437 if (rval != 0) { 8438 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8439 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8440 kmem_free(header, buflen); 8441 return (rval); 8442 } 8443 8444 /* 8445 * Determine size of Block Descriptors in order to locate 8446 * the mode page data. ATAPI devices return 0, SCSI devices 8447 * should return MODE_BLK_DESC_LENGTH. 8448 */ 8449 if (un->un_f_cfg_is_atapi == TRUE) { 8450 struct mode_header_grp2 *mhp; 8451 mhp = (struct mode_header_grp2 *)header; 8452 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8453 } else { 8454 bd_len = ((struct mode_header *)header)->bdesc_length; 8455 } 8456 8457 if (bd_len > MODE_BLK_DESC_LENGTH) { 8458 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8459 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8460 "block descriptor length\n"); 8461 kmem_free(header, buflen); 8462 return (EIO); 8463 } 8464 8465 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8466 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8467 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8468 " caching page code mismatch %d\n", 8469 mode_caching_page->mode_page.code); 8470 kmem_free(header, buflen); 8471 return (EIO); 8472 } 8473 *is_enabled = mode_caching_page->wce; 8474 8475 kmem_free(header, buflen); 8476 return (0); 8477 } 8478 8479 8480 /* 8481 * Function: sd_make_device 8482 * 8483 * Description: Utility routine to return the Solaris device number from 8484 * the data in the device's dev_info structure. 8485 * 8486 * Return Code: The Solaris device number 8487 * 8488 * Context: Any 8489 */ 8490 8491 static dev_t 8492 sd_make_device(dev_info_t *devi) 8493 { 8494 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8495 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8496 } 8497 8498 8499 /* 8500 * Function: sd_pm_entry 8501 * 8502 * Description: Called at the start of a new command to manage power 8503 * and busy status of a device. This includes determining whether 8504 * the current power state of the device is sufficient for 8505 * performing the command or whether it must be changed. 8506 * The PM framework is notified appropriately. 8507 * Only with a return status of DDI_SUCCESS will the 8508 * component be busy to the framework. 8509 * 8510 * All callers of sd_pm_entry must check the return status 8511 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8512 * of DDI_FAILURE indicates the device failed to power up. 8513 * In this case un_pm_count has been adjusted so the result 8514 * on exit is still powered down, ie. count is less than 0. 8515 * Calling sd_pm_exit with this count value hits an ASSERT. 8516 * 8517 * Return Code: DDI_SUCCESS or DDI_FAILURE 8518 * 8519 * Context: Kernel thread context. 8520 */ 8521 8522 static int 8523 sd_pm_entry(struct sd_lun *un) 8524 { 8525 int return_status = DDI_SUCCESS; 8526 8527 ASSERT(!mutex_owned(SD_MUTEX(un))); 8528 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8529 8530 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8531 8532 if (un->un_f_pm_is_enabled == FALSE) { 8533 SD_TRACE(SD_LOG_IO_PM, un, 8534 "sd_pm_entry: exiting, PM not enabled\n"); 8535 return (return_status); 8536 } 8537 8538 /* 8539 * Just increment a counter if PM is enabled. On the transition from 8540 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8541 * the count with each IO and mark the device as idle when the count 8542 * hits 0. 8543 * 8544 * If the count is less than 0 the device is powered down. If a powered 8545 * down device is successfully powered up then the count must be 8546 * incremented to reflect the power up. Note that it'll get incremented 8547 * a second time to become busy. 8548 * 8549 * Because the following has the potential to change the device state 8550 * and must release the un_pm_mutex to do so, only one thread can be 8551 * allowed through at a time. 8552 */ 8553 8554 mutex_enter(&un->un_pm_mutex); 8555 while (un->un_pm_busy == TRUE) { 8556 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8557 } 8558 un->un_pm_busy = TRUE; 8559 8560 if (un->un_pm_count < 1) { 8561 8562 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8563 8564 /* 8565 * Indicate we are now busy so the framework won't attempt to 8566 * power down the device. This call will only fail if either 8567 * we passed a bad component number or the device has no 8568 * components. Neither of these should ever happen. 8569 */ 8570 mutex_exit(&un->un_pm_mutex); 8571 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8572 ASSERT(return_status == DDI_SUCCESS); 8573 8574 mutex_enter(&un->un_pm_mutex); 8575 8576 if (un->un_pm_count < 0) { 8577 mutex_exit(&un->un_pm_mutex); 8578 8579 SD_TRACE(SD_LOG_IO_PM, un, 8580 "sd_pm_entry: power up component\n"); 8581 8582 /* 8583 * pm_raise_power will cause sdpower to be called 8584 * which brings the device power level to the 8585 * desired state, ON in this case. If successful, 8586 * un_pm_count and un_power_level will be updated 8587 * appropriately. 8588 */ 8589 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8590 SD_SPINDLE_ON); 8591 8592 mutex_enter(&un->un_pm_mutex); 8593 8594 if (return_status != DDI_SUCCESS) { 8595 /* 8596 * Power up failed. 8597 * Idle the device and adjust the count 8598 * so the result on exit is that we're 8599 * still powered down, ie. count is less than 0. 8600 */ 8601 SD_TRACE(SD_LOG_IO_PM, un, 8602 "sd_pm_entry: power up failed," 8603 " idle the component\n"); 8604 8605 (void) pm_idle_component(SD_DEVINFO(un), 0); 8606 un->un_pm_count--; 8607 } else { 8608 /* 8609 * Device is powered up, verify the 8610 * count is non-negative. 8611 * This is debug only. 8612 */ 8613 ASSERT(un->un_pm_count == 0); 8614 } 8615 } 8616 8617 if (return_status == DDI_SUCCESS) { 8618 /* 8619 * For performance, now that the device has been tagged 8620 * as busy, and it's known to be powered up, update the 8621 * chain types to use jump tables that do not include 8622 * pm. This significantly lowers the overhead and 8623 * therefore improves performance. 8624 */ 8625 8626 mutex_exit(&un->un_pm_mutex); 8627 mutex_enter(SD_MUTEX(un)); 8628 SD_TRACE(SD_LOG_IO_PM, un, 8629 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8630 un->un_uscsi_chain_type); 8631 8632 if (un->un_f_non_devbsize_supported) { 8633 un->un_buf_chain_type = 8634 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8635 } else { 8636 un->un_buf_chain_type = 8637 SD_CHAIN_INFO_DISK_NO_PM; 8638 } 8639 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8640 8641 SD_TRACE(SD_LOG_IO_PM, un, 8642 " changed uscsi_chain_type to %d\n", 8643 un->un_uscsi_chain_type); 8644 mutex_exit(SD_MUTEX(un)); 8645 mutex_enter(&un->un_pm_mutex); 8646 8647 if (un->un_pm_idle_timeid == NULL) { 8648 /* 300 ms. */ 8649 un->un_pm_idle_timeid = 8650 timeout(sd_pm_idletimeout_handler, un, 8651 (drv_usectohz((clock_t)300000))); 8652 /* 8653 * Include an extra call to busy which keeps the 8654 * device busy with-respect-to the PM layer 8655 * until the timer fires, at which time it'll 8656 * get the extra idle call. 8657 */ 8658 (void) pm_busy_component(SD_DEVINFO(un), 0); 8659 } 8660 } 8661 } 8662 un->un_pm_busy = FALSE; 8663 /* Next... */ 8664 cv_signal(&un->un_pm_busy_cv); 8665 8666 un->un_pm_count++; 8667 8668 SD_TRACE(SD_LOG_IO_PM, un, 8669 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8670 8671 mutex_exit(&un->un_pm_mutex); 8672 8673 return (return_status); 8674 } 8675 8676 8677 /* 8678 * Function: sd_pm_exit 8679 * 8680 * Description: Called at the completion of a command to manage busy 8681 * status for the device. If the device becomes idle the 8682 * PM framework is notified. 8683 * 8684 * Context: Kernel thread context 8685 */ 8686 8687 static void 8688 sd_pm_exit(struct sd_lun *un) 8689 { 8690 ASSERT(!mutex_owned(SD_MUTEX(un))); 8691 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8692 8693 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8694 8695 /* 8696 * After attach the following flag is only read, so don't 8697 * take the penalty of acquiring a mutex for it. 8698 */ 8699 if (un->un_f_pm_is_enabled == TRUE) { 8700 8701 mutex_enter(&un->un_pm_mutex); 8702 un->un_pm_count--; 8703 8704 SD_TRACE(SD_LOG_IO_PM, un, 8705 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8706 8707 ASSERT(un->un_pm_count >= 0); 8708 if (un->un_pm_count == 0) { 8709 mutex_exit(&un->un_pm_mutex); 8710 8711 SD_TRACE(SD_LOG_IO_PM, un, 8712 "sd_pm_exit: idle component\n"); 8713 8714 (void) pm_idle_component(SD_DEVINFO(un), 0); 8715 8716 } else { 8717 mutex_exit(&un->un_pm_mutex); 8718 } 8719 } 8720 8721 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8722 } 8723 8724 8725 /* 8726 * Function: sdopen 8727 * 8728 * Description: Driver's open(9e) entry point function. 8729 * 8730 * Arguments: dev_i - pointer to device number 8731 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8732 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8733 * cred_p - user credential pointer 8734 * 8735 * Return Code: EINVAL 8736 * ENXIO 8737 * EIO 8738 * EROFS 8739 * EBUSY 8740 * 8741 * Context: Kernel thread context 8742 */ 8743 /* ARGSUSED */ 8744 static int 8745 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8746 { 8747 struct sd_lun *un; 8748 int nodelay; 8749 int part; 8750 uint64_t partmask; 8751 int instance; 8752 dev_t dev; 8753 int rval = EIO; 8754 diskaddr_t nblks = 0; 8755 8756 /* Validate the open type */ 8757 if (otyp >= OTYPCNT) { 8758 return (EINVAL); 8759 } 8760 8761 dev = *dev_p; 8762 instance = SDUNIT(dev); 8763 mutex_enter(&sd_detach_mutex); 8764 8765 /* 8766 * Fail the open if there is no softstate for the instance, or 8767 * if another thread somewhere is trying to detach the instance. 8768 */ 8769 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8770 (un->un_detach_count != 0)) { 8771 mutex_exit(&sd_detach_mutex); 8772 /* 8773 * The probe cache only needs to be cleared when open (9e) fails 8774 * with ENXIO (4238046). 8775 */ 8776 /* 8777 * un-conditionally clearing probe cache is ok with 8778 * separate sd/ssd binaries 8779 * x86 platform can be an issue with both parallel 8780 * and fibre in 1 binary 8781 */ 8782 sd_scsi_clear_probe_cache(); 8783 return (ENXIO); 8784 } 8785 8786 /* 8787 * The un_layer_count is to prevent another thread in specfs from 8788 * trying to detach the instance, which can happen when we are 8789 * called from a higher-layer driver instead of thru specfs. 8790 * This will not be needed when DDI provides a layered driver 8791 * interface that allows specfs to know that an instance is in 8792 * use by a layered driver & should not be detached. 8793 * 8794 * Note: the semantics for layered driver opens are exactly one 8795 * close for every open. 8796 */ 8797 if (otyp == OTYP_LYR) { 8798 un->un_layer_count++; 8799 } 8800 8801 /* 8802 * Keep a count of the current # of opens in progress. This is because 8803 * some layered drivers try to call us as a regular open. This can 8804 * cause problems that we cannot prevent, however by keeping this count 8805 * we can at least keep our open and detach routines from racing against 8806 * each other under such conditions. 8807 */ 8808 un->un_opens_in_progress++; 8809 mutex_exit(&sd_detach_mutex); 8810 8811 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8812 part = SDPART(dev); 8813 partmask = 1 << part; 8814 8815 /* 8816 * We use a semaphore here in order to serialize 8817 * open and close requests on the device. 8818 */ 8819 sema_p(&un->un_semoclose); 8820 8821 mutex_enter(SD_MUTEX(un)); 8822 8823 /* 8824 * All device accesses go thru sdstrategy() where we check 8825 * on suspend status but there could be a scsi_poll command, 8826 * which bypasses sdstrategy(), so we need to check pm 8827 * status. 8828 */ 8829 8830 if (!nodelay) { 8831 while ((un->un_state == SD_STATE_SUSPENDED) || 8832 (un->un_state == SD_STATE_PM_CHANGING)) { 8833 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8834 } 8835 8836 mutex_exit(SD_MUTEX(un)); 8837 if (sd_pm_entry(un) != DDI_SUCCESS) { 8838 rval = EIO; 8839 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8840 "sdopen: sd_pm_entry failed\n"); 8841 goto open_failed_with_pm; 8842 } 8843 mutex_enter(SD_MUTEX(un)); 8844 } 8845 8846 /* check for previous exclusive open */ 8847 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8848 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8849 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8850 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8851 8852 if (un->un_exclopen & (partmask)) { 8853 goto excl_open_fail; 8854 } 8855 8856 if (flag & FEXCL) { 8857 int i; 8858 if (un->un_ocmap.lyropen[part]) { 8859 goto excl_open_fail; 8860 } 8861 for (i = 0; i < (OTYPCNT - 1); i++) { 8862 if (un->un_ocmap.regopen[i] & (partmask)) { 8863 goto excl_open_fail; 8864 } 8865 } 8866 } 8867 8868 /* 8869 * Check the write permission if this is a removable media device, 8870 * NDELAY has not been set, and writable permission is requested. 8871 * 8872 * Note: If NDELAY was set and this is write-protected media the WRITE 8873 * attempt will fail with EIO as part of the I/O processing. This is a 8874 * more permissive implementation that allows the open to succeed and 8875 * WRITE attempts to fail when appropriate. 8876 */ 8877 if (un->un_f_chk_wp_open) { 8878 if ((flag & FWRITE) && (!nodelay)) { 8879 mutex_exit(SD_MUTEX(un)); 8880 /* 8881 * Defer the check for write permission on writable 8882 * DVD drive till sdstrategy and will not fail open even 8883 * if FWRITE is set as the device can be writable 8884 * depending upon the media and the media can change 8885 * after the call to open(). 8886 */ 8887 if (un->un_f_dvdram_writable_device == FALSE) { 8888 if (ISCD(un) || sr_check_wp(dev)) { 8889 rval = EROFS; 8890 mutex_enter(SD_MUTEX(un)); 8891 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8892 "write to cd or write protected media\n"); 8893 goto open_fail; 8894 } 8895 } 8896 mutex_enter(SD_MUTEX(un)); 8897 } 8898 } 8899 8900 /* 8901 * If opening in NDELAY/NONBLOCK mode, just return. 8902 * Check if disk is ready and has a valid geometry later. 8903 */ 8904 if (!nodelay) { 8905 mutex_exit(SD_MUTEX(un)); 8906 rval = sd_ready_and_valid(un); 8907 mutex_enter(SD_MUTEX(un)); 8908 /* 8909 * Fail if device is not ready or if the number of disk 8910 * blocks is zero or negative for non CD devices. 8911 */ 8912 8913 nblks = 0; 8914 8915 if (rval == SD_READY_VALID && (!ISCD(un))) { 8916 /* if cmlb_partinfo fails, nblks remains 0 */ 8917 mutex_exit(SD_MUTEX(un)); 8918 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 8919 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 8920 mutex_enter(SD_MUTEX(un)); 8921 } 8922 8923 if ((rval != SD_READY_VALID) || 8924 (!ISCD(un) && nblks <= 0)) { 8925 rval = un->un_f_has_removable_media ? ENXIO : EIO; 8926 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8927 "device not ready or invalid disk block value\n"); 8928 goto open_fail; 8929 } 8930 #if defined(__i386) || defined(__amd64) 8931 } else { 8932 uchar_t *cp; 8933 /* 8934 * x86 requires special nodelay handling, so that p0 is 8935 * always defined and accessible. 8936 * Invalidate geometry only if device is not already open. 8937 */ 8938 cp = &un->un_ocmap.chkd[0]; 8939 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 8940 if (*cp != (uchar_t)0) { 8941 break; 8942 } 8943 cp++; 8944 } 8945 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 8946 mutex_exit(SD_MUTEX(un)); 8947 cmlb_invalidate(un->un_cmlbhandle, 8948 (void *)SD_PATH_DIRECT); 8949 mutex_enter(SD_MUTEX(un)); 8950 } 8951 8952 #endif 8953 } 8954 8955 if (otyp == OTYP_LYR) { 8956 un->un_ocmap.lyropen[part]++; 8957 } else { 8958 un->un_ocmap.regopen[otyp] |= partmask; 8959 } 8960 8961 /* Set up open and exclusive open flags */ 8962 if (flag & FEXCL) { 8963 un->un_exclopen |= (partmask); 8964 } 8965 8966 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8967 "open of part %d type %d\n", part, otyp); 8968 8969 mutex_exit(SD_MUTEX(un)); 8970 if (!nodelay) { 8971 sd_pm_exit(un); 8972 } 8973 8974 sema_v(&un->un_semoclose); 8975 8976 mutex_enter(&sd_detach_mutex); 8977 un->un_opens_in_progress--; 8978 mutex_exit(&sd_detach_mutex); 8979 8980 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 8981 return (DDI_SUCCESS); 8982 8983 excl_open_fail: 8984 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 8985 rval = EBUSY; 8986 8987 open_fail: 8988 mutex_exit(SD_MUTEX(un)); 8989 8990 /* 8991 * On a failed open we must exit the pm management. 8992 */ 8993 if (!nodelay) { 8994 sd_pm_exit(un); 8995 } 8996 open_failed_with_pm: 8997 sema_v(&un->un_semoclose); 8998 8999 mutex_enter(&sd_detach_mutex); 9000 un->un_opens_in_progress--; 9001 if (otyp == OTYP_LYR) { 9002 un->un_layer_count--; 9003 } 9004 mutex_exit(&sd_detach_mutex); 9005 9006 return (rval); 9007 } 9008 9009 9010 /* 9011 * Function: sdclose 9012 * 9013 * Description: Driver's close(9e) entry point function. 9014 * 9015 * Arguments: dev - device number 9016 * flag - file status flag, informational only 9017 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9018 * cred_p - user credential pointer 9019 * 9020 * Return Code: ENXIO 9021 * 9022 * Context: Kernel thread context 9023 */ 9024 /* ARGSUSED */ 9025 static int 9026 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9027 { 9028 struct sd_lun *un; 9029 uchar_t *cp; 9030 int part; 9031 int nodelay; 9032 int rval = 0; 9033 9034 /* Validate the open type */ 9035 if (otyp >= OTYPCNT) { 9036 return (ENXIO); 9037 } 9038 9039 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9040 return (ENXIO); 9041 } 9042 9043 part = SDPART(dev); 9044 nodelay = flag & (FNDELAY | FNONBLOCK); 9045 9046 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9047 "sdclose: close of part %d type %d\n", part, otyp); 9048 9049 /* 9050 * We use a semaphore here in order to serialize 9051 * open and close requests on the device. 9052 */ 9053 sema_p(&un->un_semoclose); 9054 9055 mutex_enter(SD_MUTEX(un)); 9056 9057 /* Don't proceed if power is being changed. */ 9058 while (un->un_state == SD_STATE_PM_CHANGING) { 9059 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9060 } 9061 9062 if (un->un_exclopen & (1 << part)) { 9063 un->un_exclopen &= ~(1 << part); 9064 } 9065 9066 /* Update the open partition map */ 9067 if (otyp == OTYP_LYR) { 9068 un->un_ocmap.lyropen[part] -= 1; 9069 } else { 9070 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9071 } 9072 9073 cp = &un->un_ocmap.chkd[0]; 9074 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9075 if (*cp != NULL) { 9076 break; 9077 } 9078 cp++; 9079 } 9080 9081 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9082 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9083 9084 /* 9085 * We avoid persistance upon the last close, and set 9086 * the throttle back to the maximum. 9087 */ 9088 un->un_throttle = un->un_saved_throttle; 9089 9090 if (un->un_state == SD_STATE_OFFLINE) { 9091 if (un->un_f_is_fibre == FALSE) { 9092 scsi_log(SD_DEVINFO(un), sd_label, 9093 CE_WARN, "offline\n"); 9094 } 9095 mutex_exit(SD_MUTEX(un)); 9096 cmlb_invalidate(un->un_cmlbhandle, 9097 (void *)SD_PATH_DIRECT); 9098 mutex_enter(SD_MUTEX(un)); 9099 9100 } else { 9101 /* 9102 * Flush any outstanding writes in NVRAM cache. 9103 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9104 * cmd, it may not work for non-Pluto devices. 9105 * SYNCHRONIZE CACHE is not required for removables, 9106 * except DVD-RAM drives. 9107 * 9108 * Also note: because SYNCHRONIZE CACHE is currently 9109 * the only command issued here that requires the 9110 * drive be powered up, only do the power up before 9111 * sending the Sync Cache command. If additional 9112 * commands are added which require a powered up 9113 * drive, the following sequence may have to change. 9114 * 9115 * And finally, note that parallel SCSI on SPARC 9116 * only issues a Sync Cache to DVD-RAM, a newly 9117 * supported device. 9118 */ 9119 #if defined(__i386) || defined(__amd64) 9120 if (un->un_f_sync_cache_supported || 9121 un->un_f_dvdram_writable_device == TRUE) { 9122 #else 9123 if (un->un_f_dvdram_writable_device == TRUE) { 9124 #endif 9125 mutex_exit(SD_MUTEX(un)); 9126 if (sd_pm_entry(un) == DDI_SUCCESS) { 9127 rval = 9128 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9129 NULL); 9130 /* ignore error if not supported */ 9131 if (rval == ENOTSUP) { 9132 rval = 0; 9133 } else if (rval != 0) { 9134 rval = EIO; 9135 } 9136 sd_pm_exit(un); 9137 } else { 9138 rval = EIO; 9139 } 9140 mutex_enter(SD_MUTEX(un)); 9141 } 9142 9143 /* 9144 * For devices which supports DOOR_LOCK, send an ALLOW 9145 * MEDIA REMOVAL command, but don't get upset if it 9146 * fails. We need to raise the power of the drive before 9147 * we can call sd_send_scsi_DOORLOCK() 9148 */ 9149 if (un->un_f_doorlock_supported) { 9150 mutex_exit(SD_MUTEX(un)); 9151 if (sd_pm_entry(un) == DDI_SUCCESS) { 9152 rval = sd_send_scsi_DOORLOCK(un, 9153 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9154 9155 sd_pm_exit(un); 9156 if (ISCD(un) && (rval != 0) && 9157 (nodelay != 0)) { 9158 rval = ENXIO; 9159 } 9160 } else { 9161 rval = EIO; 9162 } 9163 mutex_enter(SD_MUTEX(un)); 9164 } 9165 9166 /* 9167 * If a device has removable media, invalidate all 9168 * parameters related to media, such as geometry, 9169 * blocksize, and blockcount. 9170 */ 9171 if (un->un_f_has_removable_media) { 9172 sr_ejected(un); 9173 } 9174 9175 /* 9176 * Destroy the cache (if it exists) which was 9177 * allocated for the write maps since this is 9178 * the last close for this media. 9179 */ 9180 if (un->un_wm_cache) { 9181 /* 9182 * Check if there are pending commands. 9183 * and if there are give a warning and 9184 * do not destroy the cache. 9185 */ 9186 if (un->un_ncmds_in_driver > 0) { 9187 scsi_log(SD_DEVINFO(un), 9188 sd_label, CE_WARN, 9189 "Unable to clean up memory " 9190 "because of pending I/O\n"); 9191 } else { 9192 kmem_cache_destroy( 9193 un->un_wm_cache); 9194 un->un_wm_cache = NULL; 9195 } 9196 } 9197 mutex_exit(SD_MUTEX(un)); 9198 (void) cmlb_close(un->un_cmlbhandle, 9199 (void *)SD_PATH_DIRECT); 9200 mutex_enter(SD_MUTEX(un)); 9201 9202 } 9203 } 9204 9205 mutex_exit(SD_MUTEX(un)); 9206 sema_v(&un->un_semoclose); 9207 9208 if (otyp == OTYP_LYR) { 9209 mutex_enter(&sd_detach_mutex); 9210 /* 9211 * The detach routine may run when the layer count 9212 * drops to zero. 9213 */ 9214 un->un_layer_count--; 9215 mutex_exit(&sd_detach_mutex); 9216 } 9217 9218 return (rval); 9219 } 9220 9221 9222 /* 9223 * Function: sd_ready_and_valid 9224 * 9225 * Description: Test if device is ready and has a valid geometry. 9226 * 9227 * Arguments: dev - device number 9228 * un - driver soft state (unit) structure 9229 * 9230 * Return Code: SD_READY_VALID ready and valid label 9231 * SD_NOT_READY_VALID not ready, no label 9232 * SD_RESERVED_BY_OTHERS reservation conflict 9233 * 9234 * Context: Never called at interrupt context. 9235 */ 9236 9237 static int 9238 sd_ready_and_valid(struct sd_lun *un) 9239 { 9240 struct sd_errstats *stp; 9241 uint64_t capacity; 9242 uint_t lbasize; 9243 int rval = SD_READY_VALID; 9244 char name_str[48]; 9245 int is_valid; 9246 9247 ASSERT(un != NULL); 9248 ASSERT(!mutex_owned(SD_MUTEX(un))); 9249 9250 mutex_enter(SD_MUTEX(un)); 9251 /* 9252 * If a device has removable media, we must check if media is 9253 * ready when checking if this device is ready and valid. 9254 */ 9255 if (un->un_f_has_removable_media) { 9256 mutex_exit(SD_MUTEX(un)); 9257 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9258 rval = SD_NOT_READY_VALID; 9259 mutex_enter(SD_MUTEX(un)); 9260 goto done; 9261 } 9262 9263 is_valid = SD_IS_VALID_LABEL(un); 9264 mutex_enter(SD_MUTEX(un)); 9265 if (!is_valid || 9266 (un->un_f_blockcount_is_valid == FALSE) || 9267 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9268 9269 /* capacity has to be read every open. */ 9270 mutex_exit(SD_MUTEX(un)); 9271 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9272 &lbasize, SD_PATH_DIRECT) != 0) { 9273 cmlb_invalidate(un->un_cmlbhandle, 9274 (void *)SD_PATH_DIRECT); 9275 mutex_enter(SD_MUTEX(un)); 9276 rval = SD_NOT_READY_VALID; 9277 goto done; 9278 } else { 9279 mutex_enter(SD_MUTEX(un)); 9280 sd_update_block_info(un, lbasize, capacity); 9281 } 9282 } 9283 9284 /* 9285 * Check if the media in the device is writable or not. 9286 */ 9287 if (!is_valid && ISCD(un)) { 9288 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9289 } 9290 9291 } else { 9292 /* 9293 * Do a test unit ready to clear any unit attention from non-cd 9294 * devices. 9295 */ 9296 mutex_exit(SD_MUTEX(un)); 9297 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9298 mutex_enter(SD_MUTEX(un)); 9299 } 9300 9301 9302 /* 9303 * If this is a non 512 block device, allocate space for 9304 * the wmap cache. This is being done here since every time 9305 * a media is changed this routine will be called and the 9306 * block size is a function of media rather than device. 9307 */ 9308 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9309 if (!(un->un_wm_cache)) { 9310 (void) snprintf(name_str, sizeof (name_str), 9311 "%s%d_cache", 9312 ddi_driver_name(SD_DEVINFO(un)), 9313 ddi_get_instance(SD_DEVINFO(un))); 9314 un->un_wm_cache = kmem_cache_create( 9315 name_str, sizeof (struct sd_w_map), 9316 8, sd_wm_cache_constructor, 9317 sd_wm_cache_destructor, NULL, 9318 (void *)un, NULL, 0); 9319 if (!(un->un_wm_cache)) { 9320 rval = ENOMEM; 9321 goto done; 9322 } 9323 } 9324 } 9325 9326 if (un->un_state == SD_STATE_NORMAL) { 9327 /* 9328 * If the target is not yet ready here (defined by a TUR 9329 * failure), invalidate the geometry and print an 'offline' 9330 * message. This is a legacy message, as the state of the 9331 * target is not actually changed to SD_STATE_OFFLINE. 9332 * 9333 * If the TUR fails for EACCES (Reservation Conflict), 9334 * SD_RESERVED_BY_OTHERS will be returned to indicate 9335 * reservation conflict. If the TUR fails for other 9336 * reasons, SD_NOT_READY_VALID will be returned. 9337 */ 9338 int err; 9339 9340 mutex_exit(SD_MUTEX(un)); 9341 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9342 mutex_enter(SD_MUTEX(un)); 9343 9344 if (err != 0) { 9345 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9346 "offline or reservation conflict\n"); 9347 mutex_exit(SD_MUTEX(un)); 9348 cmlb_invalidate(un->un_cmlbhandle, 9349 (void *)SD_PATH_DIRECT); 9350 mutex_enter(SD_MUTEX(un)); 9351 if (err == EACCES) { 9352 rval = SD_RESERVED_BY_OTHERS; 9353 } else { 9354 rval = SD_NOT_READY_VALID; 9355 } 9356 goto done; 9357 } 9358 } 9359 9360 if (un->un_f_format_in_progress == FALSE) { 9361 mutex_exit(SD_MUTEX(un)); 9362 if (cmlb_validate(un->un_cmlbhandle, 0, 9363 (void *)SD_PATH_DIRECT) != 0) { 9364 rval = SD_NOT_READY_VALID; 9365 mutex_enter(SD_MUTEX(un)); 9366 goto done; 9367 } 9368 if (un->un_f_pkstats_enabled) { 9369 sd_set_pstats(un); 9370 SD_TRACE(SD_LOG_IO_PARTITION, un, 9371 "sd_ready_and_valid: un:0x%p pstats created and " 9372 "set\n", un); 9373 } 9374 mutex_enter(SD_MUTEX(un)); 9375 } 9376 9377 /* 9378 * If this device supports DOOR_LOCK command, try and send 9379 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9380 * if it fails. For a CD, however, it is an error 9381 */ 9382 if (un->un_f_doorlock_supported) { 9383 mutex_exit(SD_MUTEX(un)); 9384 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9385 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9386 rval = SD_NOT_READY_VALID; 9387 mutex_enter(SD_MUTEX(un)); 9388 goto done; 9389 } 9390 mutex_enter(SD_MUTEX(un)); 9391 } 9392 9393 /* The state has changed, inform the media watch routines */ 9394 un->un_mediastate = DKIO_INSERTED; 9395 cv_broadcast(&un->un_state_cv); 9396 rval = SD_READY_VALID; 9397 9398 done: 9399 9400 /* 9401 * Initialize the capacity kstat value, if no media previously 9402 * (capacity kstat is 0) and a media has been inserted 9403 * (un_blockcount > 0). 9404 */ 9405 if (un->un_errstats != NULL) { 9406 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9407 if ((stp->sd_capacity.value.ui64 == 0) && 9408 (un->un_f_blockcount_is_valid == TRUE)) { 9409 stp->sd_capacity.value.ui64 = 9410 (uint64_t)((uint64_t)un->un_blockcount * 9411 un->un_sys_blocksize); 9412 } 9413 } 9414 9415 mutex_exit(SD_MUTEX(un)); 9416 return (rval); 9417 } 9418 9419 9420 /* 9421 * Function: sdmin 9422 * 9423 * Description: Routine to limit the size of a data transfer. Used in 9424 * conjunction with physio(9F). 9425 * 9426 * Arguments: bp - pointer to the indicated buf(9S) struct. 9427 * 9428 * Context: Kernel thread context. 9429 */ 9430 9431 static void 9432 sdmin(struct buf *bp) 9433 { 9434 struct sd_lun *un; 9435 int instance; 9436 9437 instance = SDUNIT(bp->b_edev); 9438 9439 un = ddi_get_soft_state(sd_state, instance); 9440 ASSERT(un != NULL); 9441 9442 if (bp->b_bcount > un->un_max_xfer_size) { 9443 bp->b_bcount = un->un_max_xfer_size; 9444 } 9445 } 9446 9447 9448 /* 9449 * Function: sdread 9450 * 9451 * Description: Driver's read(9e) entry point function. 9452 * 9453 * Arguments: dev - device number 9454 * uio - structure pointer describing where data is to be stored 9455 * in user's space 9456 * cred_p - user credential pointer 9457 * 9458 * Return Code: ENXIO 9459 * EIO 9460 * EINVAL 9461 * value returned by physio 9462 * 9463 * Context: Kernel thread context. 9464 */ 9465 /* ARGSUSED */ 9466 static int 9467 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9468 { 9469 struct sd_lun *un = NULL; 9470 int secmask; 9471 int err; 9472 9473 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9474 return (ENXIO); 9475 } 9476 9477 ASSERT(!mutex_owned(SD_MUTEX(un))); 9478 9479 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9480 mutex_enter(SD_MUTEX(un)); 9481 /* 9482 * Because the call to sd_ready_and_valid will issue I/O we 9483 * must wait here if either the device is suspended or 9484 * if it's power level is changing. 9485 */ 9486 while ((un->un_state == SD_STATE_SUSPENDED) || 9487 (un->un_state == SD_STATE_PM_CHANGING)) { 9488 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9489 } 9490 un->un_ncmds_in_driver++; 9491 mutex_exit(SD_MUTEX(un)); 9492 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9493 mutex_enter(SD_MUTEX(un)); 9494 un->un_ncmds_in_driver--; 9495 ASSERT(un->un_ncmds_in_driver >= 0); 9496 mutex_exit(SD_MUTEX(un)); 9497 return (EIO); 9498 } 9499 mutex_enter(SD_MUTEX(un)); 9500 un->un_ncmds_in_driver--; 9501 ASSERT(un->un_ncmds_in_driver >= 0); 9502 mutex_exit(SD_MUTEX(un)); 9503 } 9504 9505 /* 9506 * Read requests are restricted to multiples of the system block size. 9507 */ 9508 secmask = un->un_sys_blocksize - 1; 9509 9510 if (uio->uio_loffset & ((offset_t)(secmask))) { 9511 SD_ERROR(SD_LOG_READ_WRITE, un, 9512 "sdread: file offset not modulo %d\n", 9513 un->un_sys_blocksize); 9514 err = EINVAL; 9515 } else if (uio->uio_iov->iov_len & (secmask)) { 9516 SD_ERROR(SD_LOG_READ_WRITE, un, 9517 "sdread: transfer length not modulo %d\n", 9518 un->un_sys_blocksize); 9519 err = EINVAL; 9520 } else { 9521 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9522 } 9523 return (err); 9524 } 9525 9526 9527 /* 9528 * Function: sdwrite 9529 * 9530 * Description: Driver's write(9e) entry point function. 9531 * 9532 * Arguments: dev - device number 9533 * uio - structure pointer describing where data is stored in 9534 * user's space 9535 * cred_p - user credential pointer 9536 * 9537 * Return Code: ENXIO 9538 * EIO 9539 * EINVAL 9540 * value returned by physio 9541 * 9542 * Context: Kernel thread context. 9543 */ 9544 /* ARGSUSED */ 9545 static int 9546 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9547 { 9548 struct sd_lun *un = NULL; 9549 int secmask; 9550 int err; 9551 9552 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9553 return (ENXIO); 9554 } 9555 9556 ASSERT(!mutex_owned(SD_MUTEX(un))); 9557 9558 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9559 mutex_enter(SD_MUTEX(un)); 9560 /* 9561 * Because the call to sd_ready_and_valid will issue I/O we 9562 * must wait here if either the device is suspended or 9563 * if it's power level is changing. 9564 */ 9565 while ((un->un_state == SD_STATE_SUSPENDED) || 9566 (un->un_state == SD_STATE_PM_CHANGING)) { 9567 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9568 } 9569 un->un_ncmds_in_driver++; 9570 mutex_exit(SD_MUTEX(un)); 9571 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9572 mutex_enter(SD_MUTEX(un)); 9573 un->un_ncmds_in_driver--; 9574 ASSERT(un->un_ncmds_in_driver >= 0); 9575 mutex_exit(SD_MUTEX(un)); 9576 return (EIO); 9577 } 9578 mutex_enter(SD_MUTEX(un)); 9579 un->un_ncmds_in_driver--; 9580 ASSERT(un->un_ncmds_in_driver >= 0); 9581 mutex_exit(SD_MUTEX(un)); 9582 } 9583 9584 /* 9585 * Write requests are restricted to multiples of the system block size. 9586 */ 9587 secmask = un->un_sys_blocksize - 1; 9588 9589 if (uio->uio_loffset & ((offset_t)(secmask))) { 9590 SD_ERROR(SD_LOG_READ_WRITE, un, 9591 "sdwrite: file offset not modulo %d\n", 9592 un->un_sys_blocksize); 9593 err = EINVAL; 9594 } else if (uio->uio_iov->iov_len & (secmask)) { 9595 SD_ERROR(SD_LOG_READ_WRITE, un, 9596 "sdwrite: transfer length not modulo %d\n", 9597 un->un_sys_blocksize); 9598 err = EINVAL; 9599 } else { 9600 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9601 } 9602 return (err); 9603 } 9604 9605 9606 /* 9607 * Function: sdaread 9608 * 9609 * Description: Driver's aread(9e) entry point function. 9610 * 9611 * Arguments: dev - device number 9612 * aio - structure pointer describing where data is to be stored 9613 * cred_p - user credential pointer 9614 * 9615 * Return Code: ENXIO 9616 * EIO 9617 * EINVAL 9618 * value returned by aphysio 9619 * 9620 * Context: Kernel thread context. 9621 */ 9622 /* ARGSUSED */ 9623 static int 9624 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9625 { 9626 struct sd_lun *un = NULL; 9627 struct uio *uio = aio->aio_uio; 9628 int secmask; 9629 int err; 9630 9631 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9632 return (ENXIO); 9633 } 9634 9635 ASSERT(!mutex_owned(SD_MUTEX(un))); 9636 9637 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9638 mutex_enter(SD_MUTEX(un)); 9639 /* 9640 * Because the call to sd_ready_and_valid will issue I/O we 9641 * must wait here if either the device is suspended or 9642 * if it's power level is changing. 9643 */ 9644 while ((un->un_state == SD_STATE_SUSPENDED) || 9645 (un->un_state == SD_STATE_PM_CHANGING)) { 9646 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9647 } 9648 un->un_ncmds_in_driver++; 9649 mutex_exit(SD_MUTEX(un)); 9650 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9651 mutex_enter(SD_MUTEX(un)); 9652 un->un_ncmds_in_driver--; 9653 ASSERT(un->un_ncmds_in_driver >= 0); 9654 mutex_exit(SD_MUTEX(un)); 9655 return (EIO); 9656 } 9657 mutex_enter(SD_MUTEX(un)); 9658 un->un_ncmds_in_driver--; 9659 ASSERT(un->un_ncmds_in_driver >= 0); 9660 mutex_exit(SD_MUTEX(un)); 9661 } 9662 9663 /* 9664 * Read requests are restricted to multiples of the system block size. 9665 */ 9666 secmask = un->un_sys_blocksize - 1; 9667 9668 if (uio->uio_loffset & ((offset_t)(secmask))) { 9669 SD_ERROR(SD_LOG_READ_WRITE, un, 9670 "sdaread: file offset not modulo %d\n", 9671 un->un_sys_blocksize); 9672 err = EINVAL; 9673 } else if (uio->uio_iov->iov_len & (secmask)) { 9674 SD_ERROR(SD_LOG_READ_WRITE, un, 9675 "sdaread: transfer length not modulo %d\n", 9676 un->un_sys_blocksize); 9677 err = EINVAL; 9678 } else { 9679 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9680 } 9681 return (err); 9682 } 9683 9684 9685 /* 9686 * Function: sdawrite 9687 * 9688 * Description: Driver's awrite(9e) entry point function. 9689 * 9690 * Arguments: dev - device number 9691 * aio - structure pointer describing where data is stored 9692 * cred_p - user credential pointer 9693 * 9694 * Return Code: ENXIO 9695 * EIO 9696 * EINVAL 9697 * value returned by aphysio 9698 * 9699 * Context: Kernel thread context. 9700 */ 9701 /* ARGSUSED */ 9702 static int 9703 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9704 { 9705 struct sd_lun *un = NULL; 9706 struct uio *uio = aio->aio_uio; 9707 int secmask; 9708 int err; 9709 9710 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9711 return (ENXIO); 9712 } 9713 9714 ASSERT(!mutex_owned(SD_MUTEX(un))); 9715 9716 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9717 mutex_enter(SD_MUTEX(un)); 9718 /* 9719 * Because the call to sd_ready_and_valid will issue I/O we 9720 * must wait here if either the device is suspended or 9721 * if it's power level is changing. 9722 */ 9723 while ((un->un_state == SD_STATE_SUSPENDED) || 9724 (un->un_state == SD_STATE_PM_CHANGING)) { 9725 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9726 } 9727 un->un_ncmds_in_driver++; 9728 mutex_exit(SD_MUTEX(un)); 9729 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9730 mutex_enter(SD_MUTEX(un)); 9731 un->un_ncmds_in_driver--; 9732 ASSERT(un->un_ncmds_in_driver >= 0); 9733 mutex_exit(SD_MUTEX(un)); 9734 return (EIO); 9735 } 9736 mutex_enter(SD_MUTEX(un)); 9737 un->un_ncmds_in_driver--; 9738 ASSERT(un->un_ncmds_in_driver >= 0); 9739 mutex_exit(SD_MUTEX(un)); 9740 } 9741 9742 /* 9743 * Write requests are restricted to multiples of the system block size. 9744 */ 9745 secmask = un->un_sys_blocksize - 1; 9746 9747 if (uio->uio_loffset & ((offset_t)(secmask))) { 9748 SD_ERROR(SD_LOG_READ_WRITE, un, 9749 "sdawrite: file offset not modulo %d\n", 9750 un->un_sys_blocksize); 9751 err = EINVAL; 9752 } else if (uio->uio_iov->iov_len & (secmask)) { 9753 SD_ERROR(SD_LOG_READ_WRITE, un, 9754 "sdawrite: transfer length not modulo %d\n", 9755 un->un_sys_blocksize); 9756 err = EINVAL; 9757 } else { 9758 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9759 } 9760 return (err); 9761 } 9762 9763 9764 9765 9766 9767 /* 9768 * Driver IO processing follows the following sequence: 9769 * 9770 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9771 * | | ^ 9772 * v v | 9773 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9774 * | | | | 9775 * v | | | 9776 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9777 * | | ^ ^ 9778 * v v | | 9779 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9780 * | | | | 9781 * +---+ | +------------+ +-------+ 9782 * | | | | 9783 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9784 * | v | | 9785 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9786 * | | ^ | 9787 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9788 * | v | | 9789 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9790 * | | ^ | 9791 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9792 * | v | | 9793 * | sd_checksum_iostart() sd_checksum_iodone() | 9794 * | | ^ | 9795 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9796 * | v | | 9797 * | sd_pm_iostart() sd_pm_iodone() | 9798 * | | ^ | 9799 * | | | | 9800 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9801 * | ^ 9802 * v | 9803 * sd_core_iostart() | 9804 * | | 9805 * | +------>(*destroypkt)() 9806 * +-> sd_start_cmds() <-+ | | 9807 * | | | v 9808 * | | | scsi_destroy_pkt(9F) 9809 * | | | 9810 * +->(*initpkt)() +- sdintr() 9811 * | | | | 9812 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9813 * | +-> scsi_setup_cdb(9F) | 9814 * | | 9815 * +--> scsi_transport(9F) | 9816 * | | 9817 * +----> SCSA ---->+ 9818 * 9819 * 9820 * This code is based upon the following presumtions: 9821 * 9822 * - iostart and iodone functions operate on buf(9S) structures. These 9823 * functions perform the necessary operations on the buf(9S) and pass 9824 * them along to the next function in the chain by using the macros 9825 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9826 * (for iodone side functions). 9827 * 9828 * - The iostart side functions may sleep. The iodone side functions 9829 * are called under interrupt context and may NOT sleep. Therefore 9830 * iodone side functions also may not call iostart side functions. 9831 * (NOTE: iostart side functions should NOT sleep for memory, as 9832 * this could result in deadlock.) 9833 * 9834 * - An iostart side function may call its corresponding iodone side 9835 * function directly (if necessary). 9836 * 9837 * - In the event of an error, an iostart side function can return a buf(9S) 9838 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9839 * b_error in the usual way of course). 9840 * 9841 * - The taskq mechanism may be used by the iodone side functions to dispatch 9842 * requests to the iostart side functions. The iostart side functions in 9843 * this case would be called under the context of a taskq thread, so it's 9844 * OK for them to block/sleep/spin in this case. 9845 * 9846 * - iostart side functions may allocate "shadow" buf(9S) structs and 9847 * pass them along to the next function in the chain. The corresponding 9848 * iodone side functions must coalesce the "shadow" bufs and return 9849 * the "original" buf to the next higher layer. 9850 * 9851 * - The b_private field of the buf(9S) struct holds a pointer to 9852 * an sd_xbuf struct, which contains information needed to 9853 * construct the scsi_pkt for the command. 9854 * 9855 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9856 * layer must acquire & release the SD_MUTEX(un) as needed. 9857 */ 9858 9859 9860 /* 9861 * Create taskq for all targets in the system. This is created at 9862 * _init(9E) and destroyed at _fini(9E). 9863 * 9864 * Note: here we set the minalloc to a reasonably high number to ensure that 9865 * we will have an adequate supply of task entries available at interrupt time. 9866 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9867 * sd_create_taskq(). Since we do not want to sleep for allocations at 9868 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9869 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9870 * requests any one instant in time. 9871 */ 9872 #define SD_TASKQ_NUMTHREADS 8 9873 #define SD_TASKQ_MINALLOC 256 9874 #define SD_TASKQ_MAXALLOC 256 9875 9876 static taskq_t *sd_tq = NULL; 9877 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9878 9879 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9880 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9881 9882 /* 9883 * The following task queue is being created for the write part of 9884 * read-modify-write of non-512 block size devices. 9885 * Limit the number of threads to 1 for now. This number has been choosen 9886 * considering the fact that it applies only to dvd ram drives/MO drives 9887 * currently. Performance for which is not main criteria at this stage. 9888 * Note: It needs to be explored if we can use a single taskq in future 9889 */ 9890 #define SD_WMR_TASKQ_NUMTHREADS 1 9891 static taskq_t *sd_wmr_tq = NULL; 9892 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9893 9894 /* 9895 * Function: sd_taskq_create 9896 * 9897 * Description: Create taskq thread(s) and preallocate task entries 9898 * 9899 * Return Code: Returns a pointer to the allocated taskq_t. 9900 * 9901 * Context: Can sleep. Requires blockable context. 9902 * 9903 * Notes: - The taskq() facility currently is NOT part of the DDI. 9904 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9905 * - taskq_create() will block for memory, also it will panic 9906 * if it cannot create the requested number of threads. 9907 * - Currently taskq_create() creates threads that cannot be 9908 * swapped. 9909 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9910 * supply of taskq entries at interrupt time (ie, so that we 9911 * do not have to sleep for memory) 9912 */ 9913 9914 static void 9915 sd_taskq_create(void) 9916 { 9917 char taskq_name[TASKQ_NAMELEN]; 9918 9919 ASSERT(sd_tq == NULL); 9920 ASSERT(sd_wmr_tq == NULL); 9921 9922 (void) snprintf(taskq_name, sizeof (taskq_name), 9923 "%s_drv_taskq", sd_label); 9924 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 9925 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9926 TASKQ_PREPOPULATE)); 9927 9928 (void) snprintf(taskq_name, sizeof (taskq_name), 9929 "%s_rmw_taskq", sd_label); 9930 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 9931 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9932 TASKQ_PREPOPULATE)); 9933 } 9934 9935 9936 /* 9937 * Function: sd_taskq_delete 9938 * 9939 * Description: Complementary cleanup routine for sd_taskq_create(). 9940 * 9941 * Context: Kernel thread context. 9942 */ 9943 9944 static void 9945 sd_taskq_delete(void) 9946 { 9947 ASSERT(sd_tq != NULL); 9948 ASSERT(sd_wmr_tq != NULL); 9949 taskq_destroy(sd_tq); 9950 taskq_destroy(sd_wmr_tq); 9951 sd_tq = NULL; 9952 sd_wmr_tq = NULL; 9953 } 9954 9955 9956 /* 9957 * Function: sdstrategy 9958 * 9959 * Description: Driver's strategy (9E) entry point function. 9960 * 9961 * Arguments: bp - pointer to buf(9S) 9962 * 9963 * Return Code: Always returns zero 9964 * 9965 * Context: Kernel thread context. 9966 */ 9967 9968 static int 9969 sdstrategy(struct buf *bp) 9970 { 9971 struct sd_lun *un; 9972 9973 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 9974 if (un == NULL) { 9975 bioerror(bp, EIO); 9976 bp->b_resid = bp->b_bcount; 9977 biodone(bp); 9978 return (0); 9979 } 9980 /* As was done in the past, fail new cmds. if state is dumping. */ 9981 if (un->un_state == SD_STATE_DUMPING) { 9982 bioerror(bp, ENXIO); 9983 bp->b_resid = bp->b_bcount; 9984 biodone(bp); 9985 return (0); 9986 } 9987 9988 ASSERT(!mutex_owned(SD_MUTEX(un))); 9989 9990 /* 9991 * Commands may sneak in while we released the mutex in 9992 * DDI_SUSPEND, we should block new commands. However, old 9993 * commands that are still in the driver at this point should 9994 * still be allowed to drain. 9995 */ 9996 mutex_enter(SD_MUTEX(un)); 9997 /* 9998 * Must wait here if either the device is suspended or 9999 * if it's power level is changing. 10000 */ 10001 while ((un->un_state == SD_STATE_SUSPENDED) || 10002 (un->un_state == SD_STATE_PM_CHANGING)) { 10003 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10004 } 10005 10006 un->un_ncmds_in_driver++; 10007 10008 /* 10009 * atapi: Since we are running the CD for now in PIO mode we need to 10010 * call bp_mapin here to avoid bp_mapin called interrupt context under 10011 * the HBA's init_pkt routine. 10012 */ 10013 if (un->un_f_cfg_is_atapi == TRUE) { 10014 mutex_exit(SD_MUTEX(un)); 10015 bp_mapin(bp); 10016 mutex_enter(SD_MUTEX(un)); 10017 } 10018 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10019 un->un_ncmds_in_driver); 10020 10021 mutex_exit(SD_MUTEX(un)); 10022 10023 /* 10024 * This will (eventually) allocate the sd_xbuf area and 10025 * call sd_xbuf_strategy(). We just want to return the 10026 * result of ddi_xbuf_qstrategy so that we have an opt- 10027 * imized tail call which saves us a stack frame. 10028 */ 10029 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10030 } 10031 10032 10033 /* 10034 * Function: sd_xbuf_strategy 10035 * 10036 * Description: Function for initiating IO operations via the 10037 * ddi_xbuf_qstrategy() mechanism. 10038 * 10039 * Context: Kernel thread context. 10040 */ 10041 10042 static void 10043 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10044 { 10045 struct sd_lun *un = arg; 10046 10047 ASSERT(bp != NULL); 10048 ASSERT(xp != NULL); 10049 ASSERT(un != NULL); 10050 ASSERT(!mutex_owned(SD_MUTEX(un))); 10051 10052 /* 10053 * Initialize the fields in the xbuf and save a pointer to the 10054 * xbuf in bp->b_private. 10055 */ 10056 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10057 10058 /* Send the buf down the iostart chain */ 10059 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10060 } 10061 10062 10063 /* 10064 * Function: sd_xbuf_init 10065 * 10066 * Description: Prepare the given sd_xbuf struct for use. 10067 * 10068 * Arguments: un - ptr to softstate 10069 * bp - ptr to associated buf(9S) 10070 * xp - ptr to associated sd_xbuf 10071 * chain_type - IO chain type to use: 10072 * SD_CHAIN_NULL 10073 * SD_CHAIN_BUFIO 10074 * SD_CHAIN_USCSI 10075 * SD_CHAIN_DIRECT 10076 * SD_CHAIN_DIRECT_PRIORITY 10077 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10078 * initialization; may be NULL if none. 10079 * 10080 * Context: Kernel thread context 10081 */ 10082 10083 static void 10084 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10085 uchar_t chain_type, void *pktinfop) 10086 { 10087 int index; 10088 10089 ASSERT(un != NULL); 10090 ASSERT(bp != NULL); 10091 ASSERT(xp != NULL); 10092 10093 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10094 bp, chain_type); 10095 10096 xp->xb_un = un; 10097 xp->xb_pktp = NULL; 10098 xp->xb_pktinfo = pktinfop; 10099 xp->xb_private = bp->b_private; 10100 xp->xb_blkno = (daddr_t)bp->b_blkno; 10101 10102 /* 10103 * Set up the iostart and iodone chain indexes in the xbuf, based 10104 * upon the specified chain type to use. 10105 */ 10106 switch (chain_type) { 10107 case SD_CHAIN_NULL: 10108 /* 10109 * Fall thru to just use the values for the buf type, even 10110 * tho for the NULL chain these values will never be used. 10111 */ 10112 /* FALLTHRU */ 10113 case SD_CHAIN_BUFIO: 10114 index = un->un_buf_chain_type; 10115 break; 10116 case SD_CHAIN_USCSI: 10117 index = un->un_uscsi_chain_type; 10118 break; 10119 case SD_CHAIN_DIRECT: 10120 index = un->un_direct_chain_type; 10121 break; 10122 case SD_CHAIN_DIRECT_PRIORITY: 10123 index = un->un_priority_chain_type; 10124 break; 10125 default: 10126 /* We're really broken if we ever get here... */ 10127 panic("sd_xbuf_init: illegal chain type!"); 10128 /*NOTREACHED*/ 10129 } 10130 10131 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10132 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10133 10134 /* 10135 * It might be a bit easier to simply bzero the entire xbuf above, 10136 * but it turns out that since we init a fair number of members anyway, 10137 * we save a fair number cycles by doing explicit assignment of zero. 10138 */ 10139 xp->xb_pkt_flags = 0; 10140 xp->xb_dma_resid = 0; 10141 xp->xb_retry_count = 0; 10142 xp->xb_victim_retry_count = 0; 10143 xp->xb_ua_retry_count = 0; 10144 xp->xb_sense_bp = NULL; 10145 xp->xb_sense_status = 0; 10146 xp->xb_sense_state = 0; 10147 xp->xb_sense_resid = 0; 10148 10149 bp->b_private = xp; 10150 bp->b_flags &= ~(B_DONE | B_ERROR); 10151 bp->b_resid = 0; 10152 bp->av_forw = NULL; 10153 bp->av_back = NULL; 10154 bioerror(bp, 0); 10155 10156 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10157 } 10158 10159 10160 /* 10161 * Function: sd_uscsi_strategy 10162 * 10163 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10164 * 10165 * Arguments: bp - buf struct ptr 10166 * 10167 * Return Code: Always returns 0 10168 * 10169 * Context: Kernel thread context 10170 */ 10171 10172 static int 10173 sd_uscsi_strategy(struct buf *bp) 10174 { 10175 struct sd_lun *un; 10176 struct sd_uscsi_info *uip; 10177 struct sd_xbuf *xp; 10178 uchar_t chain_type; 10179 10180 ASSERT(bp != NULL); 10181 10182 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10183 if (un == NULL) { 10184 bioerror(bp, EIO); 10185 bp->b_resid = bp->b_bcount; 10186 biodone(bp); 10187 return (0); 10188 } 10189 10190 ASSERT(!mutex_owned(SD_MUTEX(un))); 10191 10192 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10193 10194 mutex_enter(SD_MUTEX(un)); 10195 /* 10196 * atapi: Since we are running the CD for now in PIO mode we need to 10197 * call bp_mapin here to avoid bp_mapin called interrupt context under 10198 * the HBA's init_pkt routine. 10199 */ 10200 if (un->un_f_cfg_is_atapi == TRUE) { 10201 mutex_exit(SD_MUTEX(un)); 10202 bp_mapin(bp); 10203 mutex_enter(SD_MUTEX(un)); 10204 } 10205 un->un_ncmds_in_driver++; 10206 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10207 un->un_ncmds_in_driver); 10208 mutex_exit(SD_MUTEX(un)); 10209 10210 /* 10211 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10212 */ 10213 ASSERT(bp->b_private != NULL); 10214 uip = (struct sd_uscsi_info *)bp->b_private; 10215 10216 switch (uip->ui_flags) { 10217 case SD_PATH_DIRECT: 10218 chain_type = SD_CHAIN_DIRECT; 10219 break; 10220 case SD_PATH_DIRECT_PRIORITY: 10221 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10222 break; 10223 default: 10224 chain_type = SD_CHAIN_USCSI; 10225 break; 10226 } 10227 10228 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10229 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10230 10231 /* Use the index obtained within xbuf_init */ 10232 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10233 10234 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10235 10236 return (0); 10237 } 10238 10239 /* 10240 * Function: sd_send_scsi_cmd 10241 * 10242 * Description: Runs a USCSI command for user (when called thru sdioctl), 10243 * or for the driver 10244 * 10245 * Arguments: dev - the dev_t for the device 10246 * incmd - ptr to a valid uscsi_cmd struct 10247 * flag - bit flag, indicating open settings, 32/64 bit type 10248 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10249 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10250 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10251 * to use the USCSI "direct" chain and bypass the normal 10252 * command waitq. 10253 * 10254 * Return Code: 0 - successful completion of the given command 10255 * EIO - scsi_uscsi_handle_command() failed 10256 * ENXIO - soft state not found for specified dev 10257 * EINVAL 10258 * EFAULT - copyin/copyout error 10259 * return code of scsi_uscsi_handle_command(): 10260 * EIO 10261 * ENXIO 10262 * EACCES 10263 * 10264 * Context: Waits for command to complete. Can sleep. 10265 */ 10266 10267 static int 10268 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10269 enum uio_seg dataspace, int path_flag) 10270 { 10271 struct sd_uscsi_info *uip; 10272 struct uscsi_cmd *uscmd; 10273 struct sd_lun *un; 10274 int format = 0; 10275 int rval; 10276 10277 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10278 if (un == NULL) { 10279 return (ENXIO); 10280 } 10281 10282 ASSERT(!mutex_owned(SD_MUTEX(un))); 10283 10284 #ifdef SDDEBUG 10285 switch (dataspace) { 10286 case UIO_USERSPACE: 10287 SD_TRACE(SD_LOG_IO, un, 10288 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10289 break; 10290 case UIO_SYSSPACE: 10291 SD_TRACE(SD_LOG_IO, un, 10292 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10293 break; 10294 default: 10295 SD_TRACE(SD_LOG_IO, un, 10296 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10297 break; 10298 } 10299 #endif 10300 10301 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10302 SD_ADDRESS(un), &uscmd); 10303 if (rval != 0) { 10304 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10305 "scsi_uscsi_alloc_and_copyin failed\n", un); 10306 return (rval); 10307 } 10308 10309 if ((uscmd->uscsi_cdb != NULL) && 10310 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10311 mutex_enter(SD_MUTEX(un)); 10312 un->un_f_format_in_progress = TRUE; 10313 mutex_exit(SD_MUTEX(un)); 10314 format = 1; 10315 } 10316 10317 /* 10318 * Allocate an sd_uscsi_info struct and fill it with the info 10319 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10320 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10321 * since we allocate the buf here in this function, we do not 10322 * need to preserve the prior contents of b_private. 10323 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10324 */ 10325 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10326 uip->ui_flags = path_flag; 10327 uip->ui_cmdp = uscmd; 10328 10329 /* 10330 * Commands sent with priority are intended for error recovery 10331 * situations, and do not have retries performed. 10332 */ 10333 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10334 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10335 } 10336 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10337 10338 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10339 sd_uscsi_strategy, NULL, uip); 10340 10341 #ifdef SDDEBUG 10342 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10343 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10344 uscmd->uscsi_status, uscmd->uscsi_resid); 10345 if (uscmd->uscsi_bufaddr != NULL) { 10346 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10347 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10348 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10349 if (dataspace == UIO_SYSSPACE) { 10350 SD_DUMP_MEMORY(un, SD_LOG_IO, 10351 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10352 uscmd->uscsi_buflen, SD_LOG_HEX); 10353 } 10354 } 10355 #endif 10356 10357 if (format == 1) { 10358 mutex_enter(SD_MUTEX(un)); 10359 un->un_f_format_in_progress = FALSE; 10360 mutex_exit(SD_MUTEX(un)); 10361 } 10362 10363 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10364 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10365 10366 return (rval); 10367 } 10368 10369 10370 /* 10371 * Function: sd_buf_iodone 10372 * 10373 * Description: Frees the sd_xbuf & returns the buf to its originator. 10374 * 10375 * Context: May be called from interrupt context. 10376 */ 10377 /* ARGSUSED */ 10378 static void 10379 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10380 { 10381 struct sd_xbuf *xp; 10382 10383 ASSERT(un != NULL); 10384 ASSERT(bp != NULL); 10385 ASSERT(!mutex_owned(SD_MUTEX(un))); 10386 10387 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10388 10389 xp = SD_GET_XBUF(bp); 10390 ASSERT(xp != NULL); 10391 10392 mutex_enter(SD_MUTEX(un)); 10393 10394 /* 10395 * Grab time when the cmd completed. 10396 * This is used for determining if the system has been 10397 * idle long enough to make it idle to the PM framework. 10398 * This is for lowering the overhead, and therefore improving 10399 * performance per I/O operation. 10400 */ 10401 un->un_pm_idle_time = ddi_get_time(); 10402 10403 un->un_ncmds_in_driver--; 10404 ASSERT(un->un_ncmds_in_driver >= 0); 10405 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10406 un->un_ncmds_in_driver); 10407 10408 mutex_exit(SD_MUTEX(un)); 10409 10410 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10411 biodone(bp); /* bp is gone after this */ 10412 10413 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10414 } 10415 10416 10417 /* 10418 * Function: sd_uscsi_iodone 10419 * 10420 * Description: Frees the sd_xbuf & returns the buf to its originator. 10421 * 10422 * Context: May be called from interrupt context. 10423 */ 10424 /* ARGSUSED */ 10425 static void 10426 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10427 { 10428 struct sd_xbuf *xp; 10429 10430 ASSERT(un != NULL); 10431 ASSERT(bp != NULL); 10432 10433 xp = SD_GET_XBUF(bp); 10434 ASSERT(xp != NULL); 10435 ASSERT(!mutex_owned(SD_MUTEX(un))); 10436 10437 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10438 10439 bp->b_private = xp->xb_private; 10440 10441 mutex_enter(SD_MUTEX(un)); 10442 10443 /* 10444 * Grab time when the cmd completed. 10445 * This is used for determining if the system has been 10446 * idle long enough to make it idle to the PM framework. 10447 * This is for lowering the overhead, and therefore improving 10448 * performance per I/O operation. 10449 */ 10450 un->un_pm_idle_time = ddi_get_time(); 10451 10452 un->un_ncmds_in_driver--; 10453 ASSERT(un->un_ncmds_in_driver >= 0); 10454 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10455 un->un_ncmds_in_driver); 10456 10457 mutex_exit(SD_MUTEX(un)); 10458 10459 kmem_free(xp, sizeof (struct sd_xbuf)); 10460 biodone(bp); 10461 10462 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10463 } 10464 10465 10466 /* 10467 * Function: sd_mapblockaddr_iostart 10468 * 10469 * Description: Verify request lies withing the partition limits for 10470 * the indicated minor device. Issue "overrun" buf if 10471 * request would exceed partition range. Converts 10472 * partition-relative block address to absolute. 10473 * 10474 * Context: Can sleep 10475 * 10476 * Issues: This follows what the old code did, in terms of accessing 10477 * some of the partition info in the unit struct without holding 10478 * the mutext. This is a general issue, if the partition info 10479 * can be altered while IO is in progress... as soon as we send 10480 * a buf, its partitioning can be invalid before it gets to the 10481 * device. Probably the right fix is to move partitioning out 10482 * of the driver entirely. 10483 */ 10484 10485 static void 10486 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10487 { 10488 diskaddr_t nblocks; /* #blocks in the given partition */ 10489 daddr_t blocknum; /* Block number specified by the buf */ 10490 size_t requested_nblocks; 10491 size_t available_nblocks; 10492 int partition; 10493 diskaddr_t partition_offset; 10494 struct sd_xbuf *xp; 10495 10496 10497 ASSERT(un != NULL); 10498 ASSERT(bp != NULL); 10499 ASSERT(!mutex_owned(SD_MUTEX(un))); 10500 10501 SD_TRACE(SD_LOG_IO_PARTITION, un, 10502 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10503 10504 xp = SD_GET_XBUF(bp); 10505 ASSERT(xp != NULL); 10506 10507 /* 10508 * If the geometry is not indicated as valid, attempt to access 10509 * the unit & verify the geometry/label. This can be the case for 10510 * removable-media devices, of if the device was opened in 10511 * NDELAY/NONBLOCK mode. 10512 */ 10513 if (!SD_IS_VALID_LABEL(un) && 10514 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10515 /* 10516 * For removable devices it is possible to start an I/O 10517 * without a media by opening the device in nodelay mode. 10518 * Also for writable CDs there can be many scenarios where 10519 * there is no geometry yet but volume manager is trying to 10520 * issue a read() just because it can see TOC on the CD. So 10521 * do not print a message for removables. 10522 */ 10523 if (!un->un_f_has_removable_media) { 10524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10525 "i/o to invalid geometry\n"); 10526 } 10527 bioerror(bp, EIO); 10528 bp->b_resid = bp->b_bcount; 10529 SD_BEGIN_IODONE(index, un, bp); 10530 return; 10531 } 10532 10533 partition = SDPART(bp->b_edev); 10534 10535 nblocks = 0; 10536 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10537 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10538 10539 /* 10540 * blocknum is the starting block number of the request. At this 10541 * point it is still relative to the start of the minor device. 10542 */ 10543 blocknum = xp->xb_blkno; 10544 10545 /* 10546 * Legacy: If the starting block number is one past the last block 10547 * in the partition, do not set B_ERROR in the buf. 10548 */ 10549 if (blocknum == nblocks) { 10550 goto error_exit; 10551 } 10552 10553 /* 10554 * Confirm that the first block of the request lies within the 10555 * partition limits. Also the requested number of bytes must be 10556 * a multiple of the system block size. 10557 */ 10558 if ((blocknum < 0) || (blocknum >= nblocks) || 10559 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10560 bp->b_flags |= B_ERROR; 10561 goto error_exit; 10562 } 10563 10564 /* 10565 * If the requsted # blocks exceeds the available # blocks, that 10566 * is an overrun of the partition. 10567 */ 10568 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10569 available_nblocks = (size_t)(nblocks - blocknum); 10570 ASSERT(nblocks >= blocknum); 10571 10572 if (requested_nblocks > available_nblocks) { 10573 /* 10574 * Allocate an "overrun" buf to allow the request to proceed 10575 * for the amount of space available in the partition. The 10576 * amount not transferred will be added into the b_resid 10577 * when the operation is complete. The overrun buf 10578 * replaces the original buf here, and the original buf 10579 * is saved inside the overrun buf, for later use. 10580 */ 10581 size_t resid = SD_SYSBLOCKS2BYTES(un, 10582 (offset_t)(requested_nblocks - available_nblocks)); 10583 size_t count = bp->b_bcount - resid; 10584 /* 10585 * Note: count is an unsigned entity thus it'll NEVER 10586 * be less than 0 so ASSERT the original values are 10587 * correct. 10588 */ 10589 ASSERT(bp->b_bcount >= resid); 10590 10591 bp = sd_bioclone_alloc(bp, count, blocknum, 10592 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10593 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10594 ASSERT(xp != NULL); 10595 } 10596 10597 /* At this point there should be no residual for this buf. */ 10598 ASSERT(bp->b_resid == 0); 10599 10600 /* Convert the block number to an absolute address. */ 10601 xp->xb_blkno += partition_offset; 10602 10603 SD_NEXT_IOSTART(index, un, bp); 10604 10605 SD_TRACE(SD_LOG_IO_PARTITION, un, 10606 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10607 10608 return; 10609 10610 error_exit: 10611 bp->b_resid = bp->b_bcount; 10612 SD_BEGIN_IODONE(index, un, bp); 10613 SD_TRACE(SD_LOG_IO_PARTITION, un, 10614 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10615 } 10616 10617 10618 /* 10619 * Function: sd_mapblockaddr_iodone 10620 * 10621 * Description: Completion-side processing for partition management. 10622 * 10623 * Context: May be called under interrupt context 10624 */ 10625 10626 static void 10627 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10628 { 10629 /* int partition; */ /* Not used, see below. */ 10630 ASSERT(un != NULL); 10631 ASSERT(bp != NULL); 10632 ASSERT(!mutex_owned(SD_MUTEX(un))); 10633 10634 SD_TRACE(SD_LOG_IO_PARTITION, un, 10635 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10636 10637 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10638 /* 10639 * We have an "overrun" buf to deal with... 10640 */ 10641 struct sd_xbuf *xp; 10642 struct buf *obp; /* ptr to the original buf */ 10643 10644 xp = SD_GET_XBUF(bp); 10645 ASSERT(xp != NULL); 10646 10647 /* Retrieve the pointer to the original buf */ 10648 obp = (struct buf *)xp->xb_private; 10649 ASSERT(obp != NULL); 10650 10651 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10652 bioerror(obp, bp->b_error); 10653 10654 sd_bioclone_free(bp); 10655 10656 /* 10657 * Get back the original buf. 10658 * Note that since the restoration of xb_blkno below 10659 * was removed, the sd_xbuf is not needed. 10660 */ 10661 bp = obp; 10662 /* 10663 * xp = SD_GET_XBUF(bp); 10664 * ASSERT(xp != NULL); 10665 */ 10666 } 10667 10668 /* 10669 * Convert sd->xb_blkno back to a minor-device relative value. 10670 * Note: this has been commented out, as it is not needed in the 10671 * current implementation of the driver (ie, since this function 10672 * is at the top of the layering chains, so the info will be 10673 * discarded) and it is in the "hot" IO path. 10674 * 10675 * partition = getminor(bp->b_edev) & SDPART_MASK; 10676 * xp->xb_blkno -= un->un_offset[partition]; 10677 */ 10678 10679 SD_NEXT_IODONE(index, un, bp); 10680 10681 SD_TRACE(SD_LOG_IO_PARTITION, un, 10682 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10683 } 10684 10685 10686 /* 10687 * Function: sd_mapblocksize_iostart 10688 * 10689 * Description: Convert between system block size (un->un_sys_blocksize) 10690 * and target block size (un->un_tgt_blocksize). 10691 * 10692 * Context: Can sleep to allocate resources. 10693 * 10694 * Assumptions: A higher layer has already performed any partition validation, 10695 * and converted the xp->xb_blkno to an absolute value relative 10696 * to the start of the device. 10697 * 10698 * It is also assumed that the higher layer has implemented 10699 * an "overrun" mechanism for the case where the request would 10700 * read/write beyond the end of a partition. In this case we 10701 * assume (and ASSERT) that bp->b_resid == 0. 10702 * 10703 * Note: The implementation for this routine assumes the target 10704 * block size remains constant between allocation and transport. 10705 */ 10706 10707 static void 10708 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10709 { 10710 struct sd_mapblocksize_info *bsp; 10711 struct sd_xbuf *xp; 10712 offset_t first_byte; 10713 daddr_t start_block, end_block; 10714 daddr_t request_bytes; 10715 ushort_t is_aligned = FALSE; 10716 10717 ASSERT(un != NULL); 10718 ASSERT(bp != NULL); 10719 ASSERT(!mutex_owned(SD_MUTEX(un))); 10720 ASSERT(bp->b_resid == 0); 10721 10722 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10723 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10724 10725 /* 10726 * For a non-writable CD, a write request is an error 10727 */ 10728 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10729 (un->un_f_mmc_writable_media == FALSE)) { 10730 bioerror(bp, EIO); 10731 bp->b_resid = bp->b_bcount; 10732 SD_BEGIN_IODONE(index, un, bp); 10733 return; 10734 } 10735 10736 /* 10737 * We do not need a shadow buf if the device is using 10738 * un->un_sys_blocksize as its block size or if bcount == 0. 10739 * In this case there is no layer-private data block allocated. 10740 */ 10741 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10742 (bp->b_bcount == 0)) { 10743 goto done; 10744 } 10745 10746 #if defined(__i386) || defined(__amd64) 10747 /* We do not support non-block-aligned transfers for ROD devices */ 10748 ASSERT(!ISROD(un)); 10749 #endif 10750 10751 xp = SD_GET_XBUF(bp); 10752 ASSERT(xp != NULL); 10753 10754 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10755 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10756 un->un_tgt_blocksize, un->un_sys_blocksize); 10757 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10758 "request start block:0x%x\n", xp->xb_blkno); 10759 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10760 "request len:0x%x\n", bp->b_bcount); 10761 10762 /* 10763 * Allocate the layer-private data area for the mapblocksize layer. 10764 * Layers are allowed to use the xp_private member of the sd_xbuf 10765 * struct to store the pointer to their layer-private data block, but 10766 * each layer also has the responsibility of restoring the prior 10767 * contents of xb_private before returning the buf/xbuf to the 10768 * higher layer that sent it. 10769 * 10770 * Here we save the prior contents of xp->xb_private into the 10771 * bsp->mbs_oprivate field of our layer-private data area. This value 10772 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10773 * the layer-private area and returning the buf/xbuf to the layer 10774 * that sent it. 10775 * 10776 * Note that here we use kmem_zalloc for the allocation as there are 10777 * parts of the mapblocksize code that expect certain fields to be 10778 * zero unless explicitly set to a required value. 10779 */ 10780 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10781 bsp->mbs_oprivate = xp->xb_private; 10782 xp->xb_private = bsp; 10783 10784 /* 10785 * This treats the data on the disk (target) as an array of bytes. 10786 * first_byte is the byte offset, from the beginning of the device, 10787 * to the location of the request. This is converted from a 10788 * un->un_sys_blocksize block address to a byte offset, and then back 10789 * to a block address based upon a un->un_tgt_blocksize block size. 10790 * 10791 * xp->xb_blkno should be absolute upon entry into this function, 10792 * but, but it is based upon partitions that use the "system" 10793 * block size. It must be adjusted to reflect the block size of 10794 * the target. 10795 * 10796 * Note that end_block is actually the block that follows the last 10797 * block of the request, but that's what is needed for the computation. 10798 */ 10799 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10800 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10801 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10802 un->un_tgt_blocksize; 10803 10804 /* request_bytes is rounded up to a multiple of the target block size */ 10805 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10806 10807 /* 10808 * See if the starting address of the request and the request 10809 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10810 * then we do not need to allocate a shadow buf to handle the request. 10811 */ 10812 if (((first_byte % un->un_tgt_blocksize) == 0) && 10813 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10814 is_aligned = TRUE; 10815 } 10816 10817 if ((bp->b_flags & B_READ) == 0) { 10818 /* 10819 * Lock the range for a write operation. An aligned request is 10820 * considered a simple write; otherwise the request must be a 10821 * read-modify-write. 10822 */ 10823 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10824 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10825 } 10826 10827 /* 10828 * Alloc a shadow buf if the request is not aligned. Also, this is 10829 * where the READ command is generated for a read-modify-write. (The 10830 * write phase is deferred until after the read completes.) 10831 */ 10832 if (is_aligned == FALSE) { 10833 10834 struct sd_mapblocksize_info *shadow_bsp; 10835 struct sd_xbuf *shadow_xp; 10836 struct buf *shadow_bp; 10837 10838 /* 10839 * Allocate the shadow buf and it associated xbuf. Note that 10840 * after this call the xb_blkno value in both the original 10841 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10842 * same: absolute relative to the start of the device, and 10843 * adjusted for the target block size. The b_blkno in the 10844 * shadow buf will also be set to this value. We should never 10845 * change b_blkno in the original bp however. 10846 * 10847 * Note also that the shadow buf will always need to be a 10848 * READ command, regardless of whether the incoming command 10849 * is a READ or a WRITE. 10850 */ 10851 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10852 xp->xb_blkno, 10853 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10854 10855 shadow_xp = SD_GET_XBUF(shadow_bp); 10856 10857 /* 10858 * Allocate the layer-private data for the shadow buf. 10859 * (No need to preserve xb_private in the shadow xbuf.) 10860 */ 10861 shadow_xp->xb_private = shadow_bsp = 10862 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10863 10864 /* 10865 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10866 * to figure out where the start of the user data is (based upon 10867 * the system block size) in the data returned by the READ 10868 * command (which will be based upon the target blocksize). Note 10869 * that this is only really used if the request is unaligned. 10870 */ 10871 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10872 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10873 ASSERT((bsp->mbs_copy_offset >= 0) && 10874 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10875 10876 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10877 10878 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10879 10880 /* Transfer the wmap (if any) to the shadow buf */ 10881 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10882 bsp->mbs_wmp = NULL; 10883 10884 /* 10885 * The shadow buf goes on from here in place of the 10886 * original buf. 10887 */ 10888 shadow_bsp->mbs_orig_bp = bp; 10889 bp = shadow_bp; 10890 } 10891 10892 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10893 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10894 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10895 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10896 request_bytes); 10897 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10898 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10899 10900 done: 10901 SD_NEXT_IOSTART(index, un, bp); 10902 10903 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10904 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10905 } 10906 10907 10908 /* 10909 * Function: sd_mapblocksize_iodone 10910 * 10911 * Description: Completion side processing for block-size mapping. 10912 * 10913 * Context: May be called under interrupt context 10914 */ 10915 10916 static void 10917 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 10918 { 10919 struct sd_mapblocksize_info *bsp; 10920 struct sd_xbuf *xp; 10921 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 10922 struct buf *orig_bp; /* ptr to the original buf */ 10923 offset_t shadow_end; 10924 offset_t request_end; 10925 offset_t shadow_start; 10926 ssize_t copy_offset; 10927 size_t copy_length; 10928 size_t shortfall; 10929 uint_t is_write; /* TRUE if this bp is a WRITE */ 10930 uint_t has_wmap; /* TRUE is this bp has a wmap */ 10931 10932 ASSERT(un != NULL); 10933 ASSERT(bp != NULL); 10934 10935 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10936 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 10937 10938 /* 10939 * There is no shadow buf or layer-private data if the target is 10940 * using un->un_sys_blocksize as its block size or if bcount == 0. 10941 */ 10942 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10943 (bp->b_bcount == 0)) { 10944 goto exit; 10945 } 10946 10947 xp = SD_GET_XBUF(bp); 10948 ASSERT(xp != NULL); 10949 10950 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 10951 bsp = xp->xb_private; 10952 10953 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 10954 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 10955 10956 if (is_write) { 10957 /* 10958 * For a WRITE request we must free up the block range that 10959 * we have locked up. This holds regardless of whether this is 10960 * an aligned write request or a read-modify-write request. 10961 */ 10962 sd_range_unlock(un, bsp->mbs_wmp); 10963 bsp->mbs_wmp = NULL; 10964 } 10965 10966 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 10967 /* 10968 * An aligned read or write command will have no shadow buf; 10969 * there is not much else to do with it. 10970 */ 10971 goto done; 10972 } 10973 10974 orig_bp = bsp->mbs_orig_bp; 10975 ASSERT(orig_bp != NULL); 10976 orig_xp = SD_GET_XBUF(orig_bp); 10977 ASSERT(orig_xp != NULL); 10978 ASSERT(!mutex_owned(SD_MUTEX(un))); 10979 10980 if (!is_write && has_wmap) { 10981 /* 10982 * A READ with a wmap means this is the READ phase of a 10983 * read-modify-write. If an error occurred on the READ then 10984 * we do not proceed with the WRITE phase or copy any data. 10985 * Just release the write maps and return with an error. 10986 */ 10987 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 10988 orig_bp->b_resid = orig_bp->b_bcount; 10989 bioerror(orig_bp, bp->b_error); 10990 sd_range_unlock(un, bsp->mbs_wmp); 10991 goto freebuf_done; 10992 } 10993 } 10994 10995 /* 10996 * Here is where we set up to copy the data from the shadow buf 10997 * into the space associated with the original buf. 10998 * 10999 * To deal with the conversion between block sizes, these 11000 * computations treat the data as an array of bytes, with the 11001 * first byte (byte 0) corresponding to the first byte in the 11002 * first block on the disk. 11003 */ 11004 11005 /* 11006 * shadow_start and shadow_len indicate the location and size of 11007 * the data returned with the shadow IO request. 11008 */ 11009 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11010 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11011 11012 /* 11013 * copy_offset gives the offset (in bytes) from the start of the first 11014 * block of the READ request to the beginning of the data. We retrieve 11015 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11016 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11017 * data to be copied (in bytes). 11018 */ 11019 copy_offset = bsp->mbs_copy_offset; 11020 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11021 copy_length = orig_bp->b_bcount; 11022 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11023 11024 /* 11025 * Set up the resid and error fields of orig_bp as appropriate. 11026 */ 11027 if (shadow_end >= request_end) { 11028 /* We got all the requested data; set resid to zero */ 11029 orig_bp->b_resid = 0; 11030 } else { 11031 /* 11032 * We failed to get enough data to fully satisfy the original 11033 * request. Just copy back whatever data we got and set 11034 * up the residual and error code as required. 11035 * 11036 * 'shortfall' is the amount by which the data received with the 11037 * shadow buf has "fallen short" of the requested amount. 11038 */ 11039 shortfall = (size_t)(request_end - shadow_end); 11040 11041 if (shortfall > orig_bp->b_bcount) { 11042 /* 11043 * We did not get enough data to even partially 11044 * fulfill the original request. The residual is 11045 * equal to the amount requested. 11046 */ 11047 orig_bp->b_resid = orig_bp->b_bcount; 11048 } else { 11049 /* 11050 * We did not get all the data that we requested 11051 * from the device, but we will try to return what 11052 * portion we did get. 11053 */ 11054 orig_bp->b_resid = shortfall; 11055 } 11056 ASSERT(copy_length >= orig_bp->b_resid); 11057 copy_length -= orig_bp->b_resid; 11058 } 11059 11060 /* Propagate the error code from the shadow buf to the original buf */ 11061 bioerror(orig_bp, bp->b_error); 11062 11063 if (is_write) { 11064 goto freebuf_done; /* No data copying for a WRITE */ 11065 } 11066 11067 if (has_wmap) { 11068 /* 11069 * This is a READ command from the READ phase of a 11070 * read-modify-write request. We have to copy the data given 11071 * by the user OVER the data returned by the READ command, 11072 * then convert the command from a READ to a WRITE and send 11073 * it back to the target. 11074 */ 11075 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11076 copy_length); 11077 11078 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11079 11080 /* 11081 * Dispatch the WRITE command to the taskq thread, which 11082 * will in turn send the command to the target. When the 11083 * WRITE command completes, we (sd_mapblocksize_iodone()) 11084 * will get called again as part of the iodone chain 11085 * processing for it. Note that we will still be dealing 11086 * with the shadow buf at that point. 11087 */ 11088 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11089 KM_NOSLEEP) != 0) { 11090 /* 11091 * Dispatch was successful so we are done. Return 11092 * without going any higher up the iodone chain. Do 11093 * not free up any layer-private data until after the 11094 * WRITE completes. 11095 */ 11096 return; 11097 } 11098 11099 /* 11100 * Dispatch of the WRITE command failed; set up the error 11101 * condition and send this IO back up the iodone chain. 11102 */ 11103 bioerror(orig_bp, EIO); 11104 orig_bp->b_resid = orig_bp->b_bcount; 11105 11106 } else { 11107 /* 11108 * This is a regular READ request (ie, not a RMW). Copy the 11109 * data from the shadow buf into the original buf. The 11110 * copy_offset compensates for any "misalignment" between the 11111 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11112 * original buf (with its un->un_sys_blocksize blocks). 11113 */ 11114 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11115 copy_length); 11116 } 11117 11118 freebuf_done: 11119 11120 /* 11121 * At this point we still have both the shadow buf AND the original 11122 * buf to deal with, as well as the layer-private data area in each. 11123 * Local variables are as follows: 11124 * 11125 * bp -- points to shadow buf 11126 * xp -- points to xbuf of shadow buf 11127 * bsp -- points to layer-private data area of shadow buf 11128 * orig_bp -- points to original buf 11129 * 11130 * First free the shadow buf and its associated xbuf, then free the 11131 * layer-private data area from the shadow buf. There is no need to 11132 * restore xb_private in the shadow xbuf. 11133 */ 11134 sd_shadow_buf_free(bp); 11135 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11136 11137 /* 11138 * Now update the local variables to point to the original buf, xbuf, 11139 * and layer-private area. 11140 */ 11141 bp = orig_bp; 11142 xp = SD_GET_XBUF(bp); 11143 ASSERT(xp != NULL); 11144 ASSERT(xp == orig_xp); 11145 bsp = xp->xb_private; 11146 ASSERT(bsp != NULL); 11147 11148 done: 11149 /* 11150 * Restore xb_private to whatever it was set to by the next higher 11151 * layer in the chain, then free the layer-private data area. 11152 */ 11153 xp->xb_private = bsp->mbs_oprivate; 11154 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11155 11156 exit: 11157 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11158 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11159 11160 SD_NEXT_IODONE(index, un, bp); 11161 } 11162 11163 11164 /* 11165 * Function: sd_checksum_iostart 11166 * 11167 * Description: A stub function for a layer that's currently not used. 11168 * For now just a placeholder. 11169 * 11170 * Context: Kernel thread context 11171 */ 11172 11173 static void 11174 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11175 { 11176 ASSERT(un != NULL); 11177 ASSERT(bp != NULL); 11178 ASSERT(!mutex_owned(SD_MUTEX(un))); 11179 SD_NEXT_IOSTART(index, un, bp); 11180 } 11181 11182 11183 /* 11184 * Function: sd_checksum_iodone 11185 * 11186 * Description: A stub function for a layer that's currently not used. 11187 * For now just a placeholder. 11188 * 11189 * Context: May be called under interrupt context 11190 */ 11191 11192 static void 11193 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11194 { 11195 ASSERT(un != NULL); 11196 ASSERT(bp != NULL); 11197 ASSERT(!mutex_owned(SD_MUTEX(un))); 11198 SD_NEXT_IODONE(index, un, bp); 11199 } 11200 11201 11202 /* 11203 * Function: sd_checksum_uscsi_iostart 11204 * 11205 * Description: A stub function for a layer that's currently not used. 11206 * For now just a placeholder. 11207 * 11208 * Context: Kernel thread context 11209 */ 11210 11211 static void 11212 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11213 { 11214 ASSERT(un != NULL); 11215 ASSERT(bp != NULL); 11216 ASSERT(!mutex_owned(SD_MUTEX(un))); 11217 SD_NEXT_IOSTART(index, un, bp); 11218 } 11219 11220 11221 /* 11222 * Function: sd_checksum_uscsi_iodone 11223 * 11224 * Description: A stub function for a layer that's currently not used. 11225 * For now just a placeholder. 11226 * 11227 * Context: May be called under interrupt context 11228 */ 11229 11230 static void 11231 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11232 { 11233 ASSERT(un != NULL); 11234 ASSERT(bp != NULL); 11235 ASSERT(!mutex_owned(SD_MUTEX(un))); 11236 SD_NEXT_IODONE(index, un, bp); 11237 } 11238 11239 11240 /* 11241 * Function: sd_pm_iostart 11242 * 11243 * Description: iostart-side routine for Power mangement. 11244 * 11245 * Context: Kernel thread context 11246 */ 11247 11248 static void 11249 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11250 { 11251 ASSERT(un != NULL); 11252 ASSERT(bp != NULL); 11253 ASSERT(!mutex_owned(SD_MUTEX(un))); 11254 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11255 11256 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11257 11258 if (sd_pm_entry(un) != DDI_SUCCESS) { 11259 /* 11260 * Set up to return the failed buf back up the 'iodone' 11261 * side of the calling chain. 11262 */ 11263 bioerror(bp, EIO); 11264 bp->b_resid = bp->b_bcount; 11265 11266 SD_BEGIN_IODONE(index, un, bp); 11267 11268 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11269 return; 11270 } 11271 11272 SD_NEXT_IOSTART(index, un, bp); 11273 11274 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11275 } 11276 11277 11278 /* 11279 * Function: sd_pm_iodone 11280 * 11281 * Description: iodone-side routine for power mangement. 11282 * 11283 * Context: may be called from interrupt context 11284 */ 11285 11286 static void 11287 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11288 { 11289 ASSERT(un != NULL); 11290 ASSERT(bp != NULL); 11291 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11292 11293 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11294 11295 /* 11296 * After attach the following flag is only read, so don't 11297 * take the penalty of acquiring a mutex for it. 11298 */ 11299 if (un->un_f_pm_is_enabled == TRUE) { 11300 sd_pm_exit(un); 11301 } 11302 11303 SD_NEXT_IODONE(index, un, bp); 11304 11305 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11306 } 11307 11308 11309 /* 11310 * Function: sd_core_iostart 11311 * 11312 * Description: Primary driver function for enqueuing buf(9S) structs from 11313 * the system and initiating IO to the target device 11314 * 11315 * Context: Kernel thread context. Can sleep. 11316 * 11317 * Assumptions: - The given xp->xb_blkno is absolute 11318 * (ie, relative to the start of the device). 11319 * - The IO is to be done using the native blocksize of 11320 * the device, as specified in un->un_tgt_blocksize. 11321 */ 11322 /* ARGSUSED */ 11323 static void 11324 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11325 { 11326 struct sd_xbuf *xp; 11327 11328 ASSERT(un != NULL); 11329 ASSERT(bp != NULL); 11330 ASSERT(!mutex_owned(SD_MUTEX(un))); 11331 ASSERT(bp->b_resid == 0); 11332 11333 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11334 11335 xp = SD_GET_XBUF(bp); 11336 ASSERT(xp != NULL); 11337 11338 mutex_enter(SD_MUTEX(un)); 11339 11340 /* 11341 * If we are currently in the failfast state, fail any new IO 11342 * that has B_FAILFAST set, then return. 11343 */ 11344 if ((bp->b_flags & B_FAILFAST) && 11345 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11346 mutex_exit(SD_MUTEX(un)); 11347 bioerror(bp, EIO); 11348 bp->b_resid = bp->b_bcount; 11349 SD_BEGIN_IODONE(index, un, bp); 11350 return; 11351 } 11352 11353 if (SD_IS_DIRECT_PRIORITY(xp)) { 11354 /* 11355 * Priority command -- transport it immediately. 11356 * 11357 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11358 * because all direct priority commands should be associated 11359 * with error recovery actions which we don't want to retry. 11360 */ 11361 sd_start_cmds(un, bp); 11362 } else { 11363 /* 11364 * Normal command -- add it to the wait queue, then start 11365 * transporting commands from the wait queue. 11366 */ 11367 sd_add_buf_to_waitq(un, bp); 11368 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11369 sd_start_cmds(un, NULL); 11370 } 11371 11372 mutex_exit(SD_MUTEX(un)); 11373 11374 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11375 } 11376 11377 11378 /* 11379 * Function: sd_init_cdb_limits 11380 * 11381 * Description: This is to handle scsi_pkt initialization differences 11382 * between the driver platforms. 11383 * 11384 * Legacy behaviors: 11385 * 11386 * If the block number or the sector count exceeds the 11387 * capabilities of a Group 0 command, shift over to a 11388 * Group 1 command. We don't blindly use Group 1 11389 * commands because a) some drives (CDC Wren IVs) get a 11390 * bit confused, and b) there is probably a fair amount 11391 * of speed difference for a target to receive and decode 11392 * a 10 byte command instead of a 6 byte command. 11393 * 11394 * The xfer time difference of 6 vs 10 byte CDBs is 11395 * still significant so this code is still worthwhile. 11396 * 10 byte CDBs are very inefficient with the fas HBA driver 11397 * and older disks. Each CDB byte took 1 usec with some 11398 * popular disks. 11399 * 11400 * Context: Must be called at attach time 11401 */ 11402 11403 static void 11404 sd_init_cdb_limits(struct sd_lun *un) 11405 { 11406 int hba_cdb_limit; 11407 11408 /* 11409 * Use CDB_GROUP1 commands for most devices except for 11410 * parallel SCSI fixed drives in which case we get better 11411 * performance using CDB_GROUP0 commands (where applicable). 11412 */ 11413 un->un_mincdb = SD_CDB_GROUP1; 11414 #if !defined(__fibre) 11415 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11416 !un->un_f_has_removable_media) { 11417 un->un_mincdb = SD_CDB_GROUP0; 11418 } 11419 #endif 11420 11421 /* 11422 * Try to read the max-cdb-length supported by HBA. 11423 */ 11424 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11425 if (0 >= un->un_max_hba_cdb) { 11426 un->un_max_hba_cdb = CDB_GROUP4; 11427 hba_cdb_limit = SD_CDB_GROUP4; 11428 } else if (0 < un->un_max_hba_cdb && 11429 un->un_max_hba_cdb < CDB_GROUP1) { 11430 hba_cdb_limit = SD_CDB_GROUP0; 11431 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11432 un->un_max_hba_cdb < CDB_GROUP5) { 11433 hba_cdb_limit = SD_CDB_GROUP1; 11434 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11435 un->un_max_hba_cdb < CDB_GROUP4) { 11436 hba_cdb_limit = SD_CDB_GROUP5; 11437 } else { 11438 hba_cdb_limit = SD_CDB_GROUP4; 11439 } 11440 11441 /* 11442 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11443 * commands for fixed disks unless we are building for a 32 bit 11444 * kernel. 11445 */ 11446 #ifdef _LP64 11447 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11448 min(hba_cdb_limit, SD_CDB_GROUP4); 11449 #else 11450 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11451 min(hba_cdb_limit, SD_CDB_GROUP1); 11452 #endif 11453 11454 /* 11455 * x86 systems require the PKT_DMA_PARTIAL flag 11456 */ 11457 #if defined(__x86) 11458 un->un_pkt_flags = PKT_DMA_PARTIAL; 11459 #else 11460 un->un_pkt_flags = 0; 11461 #endif 11462 11463 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11464 ? sizeof (struct scsi_arq_status) : 1); 11465 un->un_cmd_timeout = (ushort_t)sd_io_time; 11466 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11467 } 11468 11469 11470 /* 11471 * Function: sd_initpkt_for_buf 11472 * 11473 * Description: Allocate and initialize for transport a scsi_pkt struct, 11474 * based upon the info specified in the given buf struct. 11475 * 11476 * Assumes the xb_blkno in the request is absolute (ie, 11477 * relative to the start of the device (NOT partition!). 11478 * Also assumes that the request is using the native block 11479 * size of the device (as returned by the READ CAPACITY 11480 * command). 11481 * 11482 * Return Code: SD_PKT_ALLOC_SUCCESS 11483 * SD_PKT_ALLOC_FAILURE 11484 * SD_PKT_ALLOC_FAILURE_NO_DMA 11485 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11486 * 11487 * Context: Kernel thread and may be called from software interrupt context 11488 * as part of a sdrunout callback. This function may not block or 11489 * call routines that block 11490 */ 11491 11492 static int 11493 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11494 { 11495 struct sd_xbuf *xp; 11496 struct scsi_pkt *pktp = NULL; 11497 struct sd_lun *un; 11498 size_t blockcount; 11499 daddr_t startblock; 11500 int rval; 11501 int cmd_flags; 11502 11503 ASSERT(bp != NULL); 11504 ASSERT(pktpp != NULL); 11505 xp = SD_GET_XBUF(bp); 11506 ASSERT(xp != NULL); 11507 un = SD_GET_UN(bp); 11508 ASSERT(un != NULL); 11509 ASSERT(mutex_owned(SD_MUTEX(un))); 11510 ASSERT(bp->b_resid == 0); 11511 11512 SD_TRACE(SD_LOG_IO_CORE, un, 11513 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11514 11515 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11516 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11517 /* 11518 * Already have a scsi_pkt -- just need DMA resources. 11519 * We must recompute the CDB in case the mapping returns 11520 * a nonzero pkt_resid. 11521 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11522 * that is being retried, the unmap/remap of the DMA resouces 11523 * will result in the entire transfer starting over again 11524 * from the very first block. 11525 */ 11526 ASSERT(xp->xb_pktp != NULL); 11527 pktp = xp->xb_pktp; 11528 } else { 11529 pktp = NULL; 11530 } 11531 #endif /* __i386 || __amd64 */ 11532 11533 startblock = xp->xb_blkno; /* Absolute block num. */ 11534 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11535 11536 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11537 11538 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11539 11540 #else 11541 11542 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11543 11544 #endif 11545 11546 /* 11547 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11548 * call scsi_init_pkt, and build the CDB. 11549 */ 11550 rval = sd_setup_rw_pkt(un, &pktp, bp, 11551 cmd_flags, sdrunout, (caddr_t)un, 11552 startblock, blockcount); 11553 11554 if (rval == 0) { 11555 /* 11556 * Success. 11557 * 11558 * If partial DMA is being used and required for this transfer. 11559 * set it up here. 11560 */ 11561 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11562 (pktp->pkt_resid != 0)) { 11563 11564 /* 11565 * Save the CDB length and pkt_resid for the 11566 * next xfer 11567 */ 11568 xp->xb_dma_resid = pktp->pkt_resid; 11569 11570 /* rezero resid */ 11571 pktp->pkt_resid = 0; 11572 11573 } else { 11574 xp->xb_dma_resid = 0; 11575 } 11576 11577 pktp->pkt_flags = un->un_tagflags; 11578 pktp->pkt_time = un->un_cmd_timeout; 11579 pktp->pkt_comp = sdintr; 11580 11581 pktp->pkt_private = bp; 11582 *pktpp = pktp; 11583 11584 SD_TRACE(SD_LOG_IO_CORE, un, 11585 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11586 11587 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11588 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11589 #endif 11590 11591 return (SD_PKT_ALLOC_SUCCESS); 11592 11593 } 11594 11595 /* 11596 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11597 * from sd_setup_rw_pkt. 11598 */ 11599 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11600 11601 if (rval == SD_PKT_ALLOC_FAILURE) { 11602 *pktpp = NULL; 11603 /* 11604 * Set the driver state to RWAIT to indicate the driver 11605 * is waiting on resource allocations. The driver will not 11606 * suspend, pm_suspend, or detatch while the state is RWAIT. 11607 */ 11608 New_state(un, SD_STATE_RWAIT); 11609 11610 SD_ERROR(SD_LOG_IO_CORE, un, 11611 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11612 11613 if ((bp->b_flags & B_ERROR) != 0) { 11614 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11615 } 11616 return (SD_PKT_ALLOC_FAILURE); 11617 } else { 11618 /* 11619 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11620 * 11621 * This should never happen. Maybe someone messed with the 11622 * kernel's minphys? 11623 */ 11624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11625 "Request rejected: too large for CDB: " 11626 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11627 SD_ERROR(SD_LOG_IO_CORE, un, 11628 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11629 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11630 11631 } 11632 } 11633 11634 11635 /* 11636 * Function: sd_destroypkt_for_buf 11637 * 11638 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11639 * 11640 * Context: Kernel thread or interrupt context 11641 */ 11642 11643 static void 11644 sd_destroypkt_for_buf(struct buf *bp) 11645 { 11646 ASSERT(bp != NULL); 11647 ASSERT(SD_GET_UN(bp) != NULL); 11648 11649 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11650 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11651 11652 ASSERT(SD_GET_PKTP(bp) != NULL); 11653 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11654 11655 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11656 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11657 } 11658 11659 /* 11660 * Function: sd_setup_rw_pkt 11661 * 11662 * Description: Determines appropriate CDB group for the requested LBA 11663 * and transfer length, calls scsi_init_pkt, and builds 11664 * the CDB. Do not use for partial DMA transfers except 11665 * for the initial transfer since the CDB size must 11666 * remain constant. 11667 * 11668 * Context: Kernel thread and may be called from software interrupt 11669 * context as part of a sdrunout callback. This function may not 11670 * block or call routines that block 11671 */ 11672 11673 11674 int 11675 sd_setup_rw_pkt(struct sd_lun *un, 11676 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11677 int (*callback)(caddr_t), caddr_t callback_arg, 11678 diskaddr_t lba, uint32_t blockcount) 11679 { 11680 struct scsi_pkt *return_pktp; 11681 union scsi_cdb *cdbp; 11682 struct sd_cdbinfo *cp = NULL; 11683 int i; 11684 11685 /* 11686 * See which size CDB to use, based upon the request. 11687 */ 11688 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11689 11690 /* 11691 * Check lba and block count against sd_cdbtab limits. 11692 * In the partial DMA case, we have to use the same size 11693 * CDB for all the transfers. Check lba + blockcount 11694 * against the max LBA so we know that segment of the 11695 * transfer can use the CDB we select. 11696 */ 11697 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11698 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11699 11700 /* 11701 * The command will fit into the CDB type 11702 * specified by sd_cdbtab[i]. 11703 */ 11704 cp = sd_cdbtab + i; 11705 11706 /* 11707 * Call scsi_init_pkt so we can fill in the 11708 * CDB. 11709 */ 11710 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11711 bp, cp->sc_grpcode, un->un_status_len, 0, 11712 flags, callback, callback_arg); 11713 11714 if (return_pktp != NULL) { 11715 11716 /* 11717 * Return new value of pkt 11718 */ 11719 *pktpp = return_pktp; 11720 11721 /* 11722 * To be safe, zero the CDB insuring there is 11723 * no leftover data from a previous command. 11724 */ 11725 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11726 11727 /* 11728 * Handle partial DMA mapping 11729 */ 11730 if (return_pktp->pkt_resid != 0) { 11731 11732 /* 11733 * Not going to xfer as many blocks as 11734 * originally expected 11735 */ 11736 blockcount -= 11737 SD_BYTES2TGTBLOCKS(un, 11738 return_pktp->pkt_resid); 11739 } 11740 11741 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11742 11743 /* 11744 * Set command byte based on the CDB 11745 * type we matched. 11746 */ 11747 cdbp->scc_cmd = cp->sc_grpmask | 11748 ((bp->b_flags & B_READ) ? 11749 SCMD_READ : SCMD_WRITE); 11750 11751 SD_FILL_SCSI1_LUN(un, return_pktp); 11752 11753 /* 11754 * Fill in LBA and length 11755 */ 11756 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11757 (cp->sc_grpcode == CDB_GROUP4) || 11758 (cp->sc_grpcode == CDB_GROUP0) || 11759 (cp->sc_grpcode == CDB_GROUP5)); 11760 11761 if (cp->sc_grpcode == CDB_GROUP1) { 11762 FORMG1ADDR(cdbp, lba); 11763 FORMG1COUNT(cdbp, blockcount); 11764 return (0); 11765 } else if (cp->sc_grpcode == CDB_GROUP4) { 11766 FORMG4LONGADDR(cdbp, lba); 11767 FORMG4COUNT(cdbp, blockcount); 11768 return (0); 11769 } else if (cp->sc_grpcode == CDB_GROUP0) { 11770 FORMG0ADDR(cdbp, lba); 11771 FORMG0COUNT(cdbp, blockcount); 11772 return (0); 11773 } else if (cp->sc_grpcode == CDB_GROUP5) { 11774 FORMG5ADDR(cdbp, lba); 11775 FORMG5COUNT(cdbp, blockcount); 11776 return (0); 11777 } 11778 11779 /* 11780 * It should be impossible to not match one 11781 * of the CDB types above, so we should never 11782 * reach this point. Set the CDB command byte 11783 * to test-unit-ready to avoid writing 11784 * to somewhere we don't intend. 11785 */ 11786 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11787 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11788 } else { 11789 /* 11790 * Couldn't get scsi_pkt 11791 */ 11792 return (SD_PKT_ALLOC_FAILURE); 11793 } 11794 } 11795 } 11796 11797 /* 11798 * None of the available CDB types were suitable. This really 11799 * should never happen: on a 64 bit system we support 11800 * READ16/WRITE16 which will hold an entire 64 bit disk address 11801 * and on a 32 bit system we will refuse to bind to a device 11802 * larger than 2TB so addresses will never be larger than 32 bits. 11803 */ 11804 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11805 } 11806 11807 #if defined(__i386) || defined(__amd64) 11808 /* 11809 * Function: sd_setup_next_rw_pkt 11810 * 11811 * Description: Setup packet for partial DMA transfers, except for the 11812 * initial transfer. sd_setup_rw_pkt should be used for 11813 * the initial transfer. 11814 * 11815 * Context: Kernel thread and may be called from interrupt context. 11816 */ 11817 11818 int 11819 sd_setup_next_rw_pkt(struct sd_lun *un, 11820 struct scsi_pkt *pktp, struct buf *bp, 11821 diskaddr_t lba, uint32_t blockcount) 11822 { 11823 uchar_t com; 11824 union scsi_cdb *cdbp; 11825 uchar_t cdb_group_id; 11826 11827 ASSERT(pktp != NULL); 11828 ASSERT(pktp->pkt_cdbp != NULL); 11829 11830 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11831 com = cdbp->scc_cmd; 11832 cdb_group_id = CDB_GROUPID(com); 11833 11834 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11835 (cdb_group_id == CDB_GROUPID_1) || 11836 (cdb_group_id == CDB_GROUPID_4) || 11837 (cdb_group_id == CDB_GROUPID_5)); 11838 11839 /* 11840 * Move pkt to the next portion of the xfer. 11841 * func is NULL_FUNC so we do not have to release 11842 * the disk mutex here. 11843 */ 11844 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11845 NULL_FUNC, NULL) == pktp) { 11846 /* Success. Handle partial DMA */ 11847 if (pktp->pkt_resid != 0) { 11848 blockcount -= 11849 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11850 } 11851 11852 cdbp->scc_cmd = com; 11853 SD_FILL_SCSI1_LUN(un, pktp); 11854 if (cdb_group_id == CDB_GROUPID_1) { 11855 FORMG1ADDR(cdbp, lba); 11856 FORMG1COUNT(cdbp, blockcount); 11857 return (0); 11858 } else if (cdb_group_id == CDB_GROUPID_4) { 11859 FORMG4LONGADDR(cdbp, lba); 11860 FORMG4COUNT(cdbp, blockcount); 11861 return (0); 11862 } else if (cdb_group_id == CDB_GROUPID_0) { 11863 FORMG0ADDR(cdbp, lba); 11864 FORMG0COUNT(cdbp, blockcount); 11865 return (0); 11866 } else if (cdb_group_id == CDB_GROUPID_5) { 11867 FORMG5ADDR(cdbp, lba); 11868 FORMG5COUNT(cdbp, blockcount); 11869 return (0); 11870 } 11871 11872 /* Unreachable */ 11873 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11874 } 11875 11876 /* 11877 * Error setting up next portion of cmd transfer. 11878 * Something is definitely very wrong and this 11879 * should not happen. 11880 */ 11881 return (SD_PKT_ALLOC_FAILURE); 11882 } 11883 #endif /* defined(__i386) || defined(__amd64) */ 11884 11885 /* 11886 * Function: sd_initpkt_for_uscsi 11887 * 11888 * Description: Allocate and initialize for transport a scsi_pkt struct, 11889 * based upon the info specified in the given uscsi_cmd struct. 11890 * 11891 * Return Code: SD_PKT_ALLOC_SUCCESS 11892 * SD_PKT_ALLOC_FAILURE 11893 * SD_PKT_ALLOC_FAILURE_NO_DMA 11894 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11895 * 11896 * Context: Kernel thread and may be called from software interrupt context 11897 * as part of a sdrunout callback. This function may not block or 11898 * call routines that block 11899 */ 11900 11901 static int 11902 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11903 { 11904 struct uscsi_cmd *uscmd; 11905 struct sd_xbuf *xp; 11906 struct scsi_pkt *pktp; 11907 struct sd_lun *un; 11908 uint32_t flags = 0; 11909 11910 ASSERT(bp != NULL); 11911 ASSERT(pktpp != NULL); 11912 xp = SD_GET_XBUF(bp); 11913 ASSERT(xp != NULL); 11914 un = SD_GET_UN(bp); 11915 ASSERT(un != NULL); 11916 ASSERT(mutex_owned(SD_MUTEX(un))); 11917 11918 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 11919 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 11920 ASSERT(uscmd != NULL); 11921 11922 SD_TRACE(SD_LOG_IO_CORE, un, 11923 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 11924 11925 /* 11926 * Allocate the scsi_pkt for the command. 11927 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 11928 * during scsi_init_pkt time and will continue to use the 11929 * same path as long as the same scsi_pkt is used without 11930 * intervening scsi_dma_free(). Since uscsi command does 11931 * not call scsi_dmafree() before retry failed command, it 11932 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 11933 * set such that scsi_vhci can use other available path for 11934 * retry. Besides, ucsci command does not allow DMA breakup, 11935 * so there is no need to set PKT_DMA_PARTIAL flag. 11936 */ 11937 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 11938 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 11939 sizeof (struct scsi_arq_status), 0, 11940 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 11941 sdrunout, (caddr_t)un); 11942 11943 if (pktp == NULL) { 11944 *pktpp = NULL; 11945 /* 11946 * Set the driver state to RWAIT to indicate the driver 11947 * is waiting on resource allocations. The driver will not 11948 * suspend, pm_suspend, or detatch while the state is RWAIT. 11949 */ 11950 New_state(un, SD_STATE_RWAIT); 11951 11952 SD_ERROR(SD_LOG_IO_CORE, un, 11953 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 11954 11955 if ((bp->b_flags & B_ERROR) != 0) { 11956 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11957 } 11958 return (SD_PKT_ALLOC_FAILURE); 11959 } 11960 11961 /* 11962 * We do not do DMA breakup for USCSI commands, so return failure 11963 * here if all the needed DMA resources were not allocated. 11964 */ 11965 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 11966 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 11967 scsi_destroy_pkt(pktp); 11968 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 11969 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 11970 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 11971 } 11972 11973 /* Init the cdb from the given uscsi struct */ 11974 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 11975 uscmd->uscsi_cdb[0], 0, 0, 0); 11976 11977 SD_FILL_SCSI1_LUN(un, pktp); 11978 11979 /* 11980 * Set up the optional USCSI flags. See the uscsi (7I) man page 11981 * for listing of the supported flags. 11982 */ 11983 11984 if (uscmd->uscsi_flags & USCSI_SILENT) { 11985 flags |= FLAG_SILENT; 11986 } 11987 11988 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 11989 flags |= FLAG_DIAGNOSE; 11990 } 11991 11992 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 11993 flags |= FLAG_ISOLATE; 11994 } 11995 11996 if (un->un_f_is_fibre == FALSE) { 11997 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 11998 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 11999 } 12000 } 12001 12002 /* 12003 * Set the pkt flags here so we save time later. 12004 * Note: These flags are NOT in the uscsi man page!!! 12005 */ 12006 if (uscmd->uscsi_flags & USCSI_HEAD) { 12007 flags |= FLAG_HEAD; 12008 } 12009 12010 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12011 flags |= FLAG_NOINTR; 12012 } 12013 12014 /* 12015 * For tagged queueing, things get a bit complicated. 12016 * Check first for head of queue and last for ordered queue. 12017 * If neither head nor order, use the default driver tag flags. 12018 */ 12019 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12020 if (uscmd->uscsi_flags & USCSI_HTAG) { 12021 flags |= FLAG_HTAG; 12022 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12023 flags |= FLAG_OTAG; 12024 } else { 12025 flags |= un->un_tagflags & FLAG_TAGMASK; 12026 } 12027 } 12028 12029 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12030 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12031 } 12032 12033 pktp->pkt_flags = flags; 12034 12035 /* Copy the caller's CDB into the pkt... */ 12036 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12037 12038 if (uscmd->uscsi_timeout == 0) { 12039 pktp->pkt_time = un->un_uscsi_timeout; 12040 } else { 12041 pktp->pkt_time = uscmd->uscsi_timeout; 12042 } 12043 12044 /* need it later to identify USCSI request in sdintr */ 12045 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12046 12047 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12048 12049 pktp->pkt_private = bp; 12050 pktp->pkt_comp = sdintr; 12051 *pktpp = pktp; 12052 12053 SD_TRACE(SD_LOG_IO_CORE, un, 12054 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12055 12056 return (SD_PKT_ALLOC_SUCCESS); 12057 } 12058 12059 12060 /* 12061 * Function: sd_destroypkt_for_uscsi 12062 * 12063 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12064 * IOs.. Also saves relevant info into the associated uscsi_cmd 12065 * struct. 12066 * 12067 * Context: May be called under interrupt context 12068 */ 12069 12070 static void 12071 sd_destroypkt_for_uscsi(struct buf *bp) 12072 { 12073 struct uscsi_cmd *uscmd; 12074 struct sd_xbuf *xp; 12075 struct scsi_pkt *pktp; 12076 struct sd_lun *un; 12077 12078 ASSERT(bp != NULL); 12079 xp = SD_GET_XBUF(bp); 12080 ASSERT(xp != NULL); 12081 un = SD_GET_UN(bp); 12082 ASSERT(un != NULL); 12083 ASSERT(!mutex_owned(SD_MUTEX(un))); 12084 pktp = SD_GET_PKTP(bp); 12085 ASSERT(pktp != NULL); 12086 12087 SD_TRACE(SD_LOG_IO_CORE, un, 12088 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12089 12090 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12091 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12092 ASSERT(uscmd != NULL); 12093 12094 /* Save the status and the residual into the uscsi_cmd struct */ 12095 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12096 uscmd->uscsi_resid = bp->b_resid; 12097 12098 /* 12099 * If enabled, copy any saved sense data into the area specified 12100 * by the uscsi command. 12101 */ 12102 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12103 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12104 /* 12105 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12106 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12107 */ 12108 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12109 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12110 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12111 } 12112 12113 /* We are done with the scsi_pkt; free it now */ 12114 ASSERT(SD_GET_PKTP(bp) != NULL); 12115 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12116 12117 SD_TRACE(SD_LOG_IO_CORE, un, 12118 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12119 } 12120 12121 12122 /* 12123 * Function: sd_bioclone_alloc 12124 * 12125 * Description: Allocate a buf(9S) and init it as per the given buf 12126 * and the various arguments. The associated sd_xbuf 12127 * struct is (nearly) duplicated. The struct buf *bp 12128 * argument is saved in new_xp->xb_private. 12129 * 12130 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12131 * datalen - size of data area for the shadow bp 12132 * blkno - starting LBA 12133 * func - function pointer for b_iodone in the shadow buf. (May 12134 * be NULL if none.) 12135 * 12136 * Return Code: Pointer to allocates buf(9S) struct 12137 * 12138 * Context: Can sleep. 12139 */ 12140 12141 static struct buf * 12142 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12143 daddr_t blkno, int (*func)(struct buf *)) 12144 { 12145 struct sd_lun *un; 12146 struct sd_xbuf *xp; 12147 struct sd_xbuf *new_xp; 12148 struct buf *new_bp; 12149 12150 ASSERT(bp != NULL); 12151 xp = SD_GET_XBUF(bp); 12152 ASSERT(xp != NULL); 12153 un = SD_GET_UN(bp); 12154 ASSERT(un != NULL); 12155 ASSERT(!mutex_owned(SD_MUTEX(un))); 12156 12157 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12158 NULL, KM_SLEEP); 12159 12160 new_bp->b_lblkno = blkno; 12161 12162 /* 12163 * Allocate an xbuf for the shadow bp and copy the contents of the 12164 * original xbuf into it. 12165 */ 12166 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12167 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12168 12169 /* 12170 * The given bp is automatically saved in the xb_private member 12171 * of the new xbuf. Callers are allowed to depend on this. 12172 */ 12173 new_xp->xb_private = bp; 12174 12175 new_bp->b_private = new_xp; 12176 12177 return (new_bp); 12178 } 12179 12180 /* 12181 * Function: sd_shadow_buf_alloc 12182 * 12183 * Description: Allocate a buf(9S) and init it as per the given buf 12184 * and the various arguments. The associated sd_xbuf 12185 * struct is (nearly) duplicated. The struct buf *bp 12186 * argument is saved in new_xp->xb_private. 12187 * 12188 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12189 * datalen - size of data area for the shadow bp 12190 * bflags - B_READ or B_WRITE (pseudo flag) 12191 * blkno - starting LBA 12192 * func - function pointer for b_iodone in the shadow buf. (May 12193 * be NULL if none.) 12194 * 12195 * Return Code: Pointer to allocates buf(9S) struct 12196 * 12197 * Context: Can sleep. 12198 */ 12199 12200 static struct buf * 12201 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12202 daddr_t blkno, int (*func)(struct buf *)) 12203 { 12204 struct sd_lun *un; 12205 struct sd_xbuf *xp; 12206 struct sd_xbuf *new_xp; 12207 struct buf *new_bp; 12208 12209 ASSERT(bp != NULL); 12210 xp = SD_GET_XBUF(bp); 12211 ASSERT(xp != NULL); 12212 un = SD_GET_UN(bp); 12213 ASSERT(un != NULL); 12214 ASSERT(!mutex_owned(SD_MUTEX(un))); 12215 12216 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12217 bp_mapin(bp); 12218 } 12219 12220 bflags &= (B_READ | B_WRITE); 12221 #if defined(__i386) || defined(__amd64) 12222 new_bp = getrbuf(KM_SLEEP); 12223 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12224 new_bp->b_bcount = datalen; 12225 new_bp->b_flags = bflags | 12226 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12227 #else 12228 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12229 datalen, bflags, SLEEP_FUNC, NULL); 12230 #endif 12231 new_bp->av_forw = NULL; 12232 new_bp->av_back = NULL; 12233 new_bp->b_dev = bp->b_dev; 12234 new_bp->b_blkno = blkno; 12235 new_bp->b_iodone = func; 12236 new_bp->b_edev = bp->b_edev; 12237 new_bp->b_resid = 0; 12238 12239 /* We need to preserve the B_FAILFAST flag */ 12240 if (bp->b_flags & B_FAILFAST) { 12241 new_bp->b_flags |= B_FAILFAST; 12242 } 12243 12244 /* 12245 * Allocate an xbuf for the shadow bp and copy the contents of the 12246 * original xbuf into it. 12247 */ 12248 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12249 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12250 12251 /* Need later to copy data between the shadow buf & original buf! */ 12252 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12253 12254 /* 12255 * The given bp is automatically saved in the xb_private member 12256 * of the new xbuf. Callers are allowed to depend on this. 12257 */ 12258 new_xp->xb_private = bp; 12259 12260 new_bp->b_private = new_xp; 12261 12262 return (new_bp); 12263 } 12264 12265 /* 12266 * Function: sd_bioclone_free 12267 * 12268 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12269 * in the larger than partition operation. 12270 * 12271 * Context: May be called under interrupt context 12272 */ 12273 12274 static void 12275 sd_bioclone_free(struct buf *bp) 12276 { 12277 struct sd_xbuf *xp; 12278 12279 ASSERT(bp != NULL); 12280 xp = SD_GET_XBUF(bp); 12281 ASSERT(xp != NULL); 12282 12283 /* 12284 * Call bp_mapout() before freeing the buf, in case a lower 12285 * layer or HBA had done a bp_mapin(). we must do this here 12286 * as we are the "originator" of the shadow buf. 12287 */ 12288 bp_mapout(bp); 12289 12290 /* 12291 * Null out b_iodone before freeing the bp, to ensure that the driver 12292 * never gets confused by a stale value in this field. (Just a little 12293 * extra defensiveness here.) 12294 */ 12295 bp->b_iodone = NULL; 12296 12297 freerbuf(bp); 12298 12299 kmem_free(xp, sizeof (struct sd_xbuf)); 12300 } 12301 12302 /* 12303 * Function: sd_shadow_buf_free 12304 * 12305 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12306 * 12307 * Context: May be called under interrupt context 12308 */ 12309 12310 static void 12311 sd_shadow_buf_free(struct buf *bp) 12312 { 12313 struct sd_xbuf *xp; 12314 12315 ASSERT(bp != NULL); 12316 xp = SD_GET_XBUF(bp); 12317 ASSERT(xp != NULL); 12318 12319 #if defined(__sparc) 12320 /* 12321 * Call bp_mapout() before freeing the buf, in case a lower 12322 * layer or HBA had done a bp_mapin(). we must do this here 12323 * as we are the "originator" of the shadow buf. 12324 */ 12325 bp_mapout(bp); 12326 #endif 12327 12328 /* 12329 * Null out b_iodone before freeing the bp, to ensure that the driver 12330 * never gets confused by a stale value in this field. (Just a little 12331 * extra defensiveness here.) 12332 */ 12333 bp->b_iodone = NULL; 12334 12335 #if defined(__i386) || defined(__amd64) 12336 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12337 freerbuf(bp); 12338 #else 12339 scsi_free_consistent_buf(bp); 12340 #endif 12341 12342 kmem_free(xp, sizeof (struct sd_xbuf)); 12343 } 12344 12345 12346 /* 12347 * Function: sd_print_transport_rejected_message 12348 * 12349 * Description: This implements the ludicrously complex rules for printing 12350 * a "transport rejected" message. This is to address the 12351 * specific problem of having a flood of this error message 12352 * produced when a failover occurs. 12353 * 12354 * Context: Any. 12355 */ 12356 12357 static void 12358 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12359 int code) 12360 { 12361 ASSERT(un != NULL); 12362 ASSERT(mutex_owned(SD_MUTEX(un))); 12363 ASSERT(xp != NULL); 12364 12365 /* 12366 * Print the "transport rejected" message under the following 12367 * conditions: 12368 * 12369 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12370 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12371 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12372 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12373 * scsi_transport(9F) (which indicates that the target might have 12374 * gone off-line). This uses the un->un_tran_fatal_count 12375 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12376 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12377 * from scsi_transport(). 12378 * 12379 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12380 * the preceeding cases in order for the message to be printed. 12381 */ 12382 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12383 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12384 (code != TRAN_FATAL_ERROR) || 12385 (un->un_tran_fatal_count == 1)) { 12386 switch (code) { 12387 case TRAN_BADPKT: 12388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12389 "transport rejected bad packet\n"); 12390 break; 12391 case TRAN_FATAL_ERROR: 12392 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12393 "transport rejected fatal error\n"); 12394 break; 12395 default: 12396 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12397 "transport rejected (%d)\n", code); 12398 break; 12399 } 12400 } 12401 } 12402 } 12403 12404 12405 /* 12406 * Function: sd_add_buf_to_waitq 12407 * 12408 * Description: Add the given buf(9S) struct to the wait queue for the 12409 * instance. If sorting is enabled, then the buf is added 12410 * to the queue via an elevator sort algorithm (a la 12411 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12412 * If sorting is not enabled, then the buf is just added 12413 * to the end of the wait queue. 12414 * 12415 * Return Code: void 12416 * 12417 * Context: Does not sleep/block, therefore technically can be called 12418 * from any context. However if sorting is enabled then the 12419 * execution time is indeterminate, and may take long if 12420 * the wait queue grows large. 12421 */ 12422 12423 static void 12424 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12425 { 12426 struct buf *ap; 12427 12428 ASSERT(bp != NULL); 12429 ASSERT(un != NULL); 12430 ASSERT(mutex_owned(SD_MUTEX(un))); 12431 12432 /* If the queue is empty, add the buf as the only entry & return. */ 12433 if (un->un_waitq_headp == NULL) { 12434 ASSERT(un->un_waitq_tailp == NULL); 12435 un->un_waitq_headp = un->un_waitq_tailp = bp; 12436 bp->av_forw = NULL; 12437 return; 12438 } 12439 12440 ASSERT(un->un_waitq_tailp != NULL); 12441 12442 /* 12443 * If sorting is disabled, just add the buf to the tail end of 12444 * the wait queue and return. 12445 */ 12446 if (un->un_f_disksort_disabled) { 12447 un->un_waitq_tailp->av_forw = bp; 12448 un->un_waitq_tailp = bp; 12449 bp->av_forw = NULL; 12450 return; 12451 } 12452 12453 /* 12454 * Sort thru the list of requests currently on the wait queue 12455 * and add the new buf request at the appropriate position. 12456 * 12457 * The un->un_waitq_headp is an activity chain pointer on which 12458 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12459 * first queue holds those requests which are positioned after 12460 * the current SD_GET_BLKNO() (in the first request); the second holds 12461 * requests which came in after their SD_GET_BLKNO() number was passed. 12462 * Thus we implement a one way scan, retracting after reaching 12463 * the end of the drive to the first request on the second 12464 * queue, at which time it becomes the first queue. 12465 * A one-way scan is natural because of the way UNIX read-ahead 12466 * blocks are allocated. 12467 * 12468 * If we lie after the first request, then we must locate the 12469 * second request list and add ourselves to it. 12470 */ 12471 ap = un->un_waitq_headp; 12472 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12473 while (ap->av_forw != NULL) { 12474 /* 12475 * Look for an "inversion" in the (normally 12476 * ascending) block numbers. This indicates 12477 * the start of the second request list. 12478 */ 12479 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12480 /* 12481 * Search the second request list for the 12482 * first request at a larger block number. 12483 * We go before that; however if there is 12484 * no such request, we go at the end. 12485 */ 12486 do { 12487 if (SD_GET_BLKNO(bp) < 12488 SD_GET_BLKNO(ap->av_forw)) { 12489 goto insert; 12490 } 12491 ap = ap->av_forw; 12492 } while (ap->av_forw != NULL); 12493 goto insert; /* after last */ 12494 } 12495 ap = ap->av_forw; 12496 } 12497 12498 /* 12499 * No inversions... we will go after the last, and 12500 * be the first request in the second request list. 12501 */ 12502 goto insert; 12503 } 12504 12505 /* 12506 * Request is at/after the current request... 12507 * sort in the first request list. 12508 */ 12509 while (ap->av_forw != NULL) { 12510 /* 12511 * We want to go after the current request (1) if 12512 * there is an inversion after it (i.e. it is the end 12513 * of the first request list), or (2) if the next 12514 * request is a larger block no. than our request. 12515 */ 12516 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12517 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12518 goto insert; 12519 } 12520 ap = ap->av_forw; 12521 } 12522 12523 /* 12524 * Neither a second list nor a larger request, therefore 12525 * we go at the end of the first list (which is the same 12526 * as the end of the whole schebang). 12527 */ 12528 insert: 12529 bp->av_forw = ap->av_forw; 12530 ap->av_forw = bp; 12531 12532 /* 12533 * If we inserted onto the tail end of the waitq, make sure the 12534 * tail pointer is updated. 12535 */ 12536 if (ap == un->un_waitq_tailp) { 12537 un->un_waitq_tailp = bp; 12538 } 12539 } 12540 12541 12542 /* 12543 * Function: sd_start_cmds 12544 * 12545 * Description: Remove and transport cmds from the driver queues. 12546 * 12547 * Arguments: un - pointer to the unit (soft state) struct for the target. 12548 * 12549 * immed_bp - ptr to a buf to be transported immediately. Only 12550 * the immed_bp is transported; bufs on the waitq are not 12551 * processed and the un_retry_bp is not checked. If immed_bp is 12552 * NULL, then normal queue processing is performed. 12553 * 12554 * Context: May be called from kernel thread context, interrupt context, 12555 * or runout callback context. This function may not block or 12556 * call routines that block. 12557 */ 12558 12559 static void 12560 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12561 { 12562 struct sd_xbuf *xp; 12563 struct buf *bp; 12564 void (*statp)(kstat_io_t *); 12565 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12566 void (*saved_statp)(kstat_io_t *); 12567 #endif 12568 int rval; 12569 12570 ASSERT(un != NULL); 12571 ASSERT(mutex_owned(SD_MUTEX(un))); 12572 ASSERT(un->un_ncmds_in_transport >= 0); 12573 ASSERT(un->un_throttle >= 0); 12574 12575 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12576 12577 do { 12578 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12579 saved_statp = NULL; 12580 #endif 12581 12582 /* 12583 * If we are syncing or dumping, fail the command to 12584 * avoid recursively calling back into scsi_transport(). 12585 * The dump I/O itself uses a separate code path so this 12586 * only prevents non-dump I/O from being sent while dumping. 12587 * File system sync takes place before dumping begins. 12588 * During panic, filesystem I/O is allowed provided 12589 * un_in_callback is <= 1. This is to prevent recursion 12590 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12591 * sd_start_cmds and so on. See panic.c for more information 12592 * about the states the system can be in during panic. 12593 */ 12594 if ((un->un_state == SD_STATE_DUMPING) || 12595 (ddi_in_panic() && (un->un_in_callback > 1))) { 12596 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12597 "sd_start_cmds: panicking\n"); 12598 goto exit; 12599 } 12600 12601 if ((bp = immed_bp) != NULL) { 12602 /* 12603 * We have a bp that must be transported immediately. 12604 * It's OK to transport the immed_bp here without doing 12605 * the throttle limit check because the immed_bp is 12606 * always used in a retry/recovery case. This means 12607 * that we know we are not at the throttle limit by 12608 * virtue of the fact that to get here we must have 12609 * already gotten a command back via sdintr(). This also 12610 * relies on (1) the command on un_retry_bp preventing 12611 * further commands from the waitq from being issued; 12612 * and (2) the code in sd_retry_command checking the 12613 * throttle limit before issuing a delayed or immediate 12614 * retry. This holds even if the throttle limit is 12615 * currently ratcheted down from its maximum value. 12616 */ 12617 statp = kstat_runq_enter; 12618 if (bp == un->un_retry_bp) { 12619 ASSERT((un->un_retry_statp == NULL) || 12620 (un->un_retry_statp == kstat_waitq_enter) || 12621 (un->un_retry_statp == 12622 kstat_runq_back_to_waitq)); 12623 /* 12624 * If the waitq kstat was incremented when 12625 * sd_set_retry_bp() queued this bp for a retry, 12626 * then we must set up statp so that the waitq 12627 * count will get decremented correctly below. 12628 * Also we must clear un->un_retry_statp to 12629 * ensure that we do not act on a stale value 12630 * in this field. 12631 */ 12632 if ((un->un_retry_statp == kstat_waitq_enter) || 12633 (un->un_retry_statp == 12634 kstat_runq_back_to_waitq)) { 12635 statp = kstat_waitq_to_runq; 12636 } 12637 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12638 saved_statp = un->un_retry_statp; 12639 #endif 12640 un->un_retry_statp = NULL; 12641 12642 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12643 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12644 "un_throttle:%d un_ncmds_in_transport:%d\n", 12645 un, un->un_retry_bp, un->un_throttle, 12646 un->un_ncmds_in_transport); 12647 } else { 12648 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12649 "processing priority bp:0x%p\n", bp); 12650 } 12651 12652 } else if ((bp = un->un_waitq_headp) != NULL) { 12653 /* 12654 * A command on the waitq is ready to go, but do not 12655 * send it if: 12656 * 12657 * (1) the throttle limit has been reached, or 12658 * (2) a retry is pending, or 12659 * (3) a START_STOP_UNIT callback pending, or 12660 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12661 * command is pending. 12662 * 12663 * For all of these conditions, IO processing will 12664 * restart after the condition is cleared. 12665 */ 12666 if (un->un_ncmds_in_transport >= un->un_throttle) { 12667 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12668 "sd_start_cmds: exiting, " 12669 "throttle limit reached!\n"); 12670 goto exit; 12671 } 12672 if (un->un_retry_bp != NULL) { 12673 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12674 "sd_start_cmds: exiting, retry pending!\n"); 12675 goto exit; 12676 } 12677 if (un->un_startstop_timeid != NULL) { 12678 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12679 "sd_start_cmds: exiting, " 12680 "START_STOP pending!\n"); 12681 goto exit; 12682 } 12683 if (un->un_direct_priority_timeid != NULL) { 12684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12685 "sd_start_cmds: exiting, " 12686 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12687 goto exit; 12688 } 12689 12690 /* Dequeue the command */ 12691 un->un_waitq_headp = bp->av_forw; 12692 if (un->un_waitq_headp == NULL) { 12693 un->un_waitq_tailp = NULL; 12694 } 12695 bp->av_forw = NULL; 12696 statp = kstat_waitq_to_runq; 12697 SD_TRACE(SD_LOG_IO_CORE, un, 12698 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12699 12700 } else { 12701 /* No work to do so bail out now */ 12702 SD_TRACE(SD_LOG_IO_CORE, un, 12703 "sd_start_cmds: no more work, exiting!\n"); 12704 goto exit; 12705 } 12706 12707 /* 12708 * Reset the state to normal. This is the mechanism by which 12709 * the state transitions from either SD_STATE_RWAIT or 12710 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12711 * If state is SD_STATE_PM_CHANGING then this command is 12712 * part of the device power control and the state must 12713 * not be put back to normal. Doing so would would 12714 * allow new commands to proceed when they shouldn't, 12715 * the device may be going off. 12716 */ 12717 if ((un->un_state != SD_STATE_SUSPENDED) && 12718 (un->un_state != SD_STATE_PM_CHANGING)) { 12719 New_state(un, SD_STATE_NORMAL); 12720 } 12721 12722 xp = SD_GET_XBUF(bp); 12723 ASSERT(xp != NULL); 12724 12725 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12726 /* 12727 * Allocate the scsi_pkt if we need one, or attach DMA 12728 * resources if we have a scsi_pkt that needs them. The 12729 * latter should only occur for commands that are being 12730 * retried. 12731 */ 12732 if ((xp->xb_pktp == NULL) || 12733 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12734 #else 12735 if (xp->xb_pktp == NULL) { 12736 #endif 12737 /* 12738 * There is no scsi_pkt allocated for this buf. Call 12739 * the initpkt function to allocate & init one. 12740 * 12741 * The scsi_init_pkt runout callback functionality is 12742 * implemented as follows: 12743 * 12744 * 1) The initpkt function always calls 12745 * scsi_init_pkt(9F) with sdrunout specified as the 12746 * callback routine. 12747 * 2) A successful packet allocation is initialized and 12748 * the I/O is transported. 12749 * 3) The I/O associated with an allocation resource 12750 * failure is left on its queue to be retried via 12751 * runout or the next I/O. 12752 * 4) The I/O associated with a DMA error is removed 12753 * from the queue and failed with EIO. Processing of 12754 * the transport queues is also halted to be 12755 * restarted via runout or the next I/O. 12756 * 5) The I/O associated with a CDB size or packet 12757 * size error is removed from the queue and failed 12758 * with EIO. Processing of the transport queues is 12759 * continued. 12760 * 12761 * Note: there is no interface for canceling a runout 12762 * callback. To prevent the driver from detaching or 12763 * suspending while a runout is pending the driver 12764 * state is set to SD_STATE_RWAIT 12765 * 12766 * Note: using the scsi_init_pkt callback facility can 12767 * result in an I/O request persisting at the head of 12768 * the list which cannot be satisfied even after 12769 * multiple retries. In the future the driver may 12770 * implement some kind of maximum runout count before 12771 * failing an I/O. 12772 * 12773 * Note: the use of funcp below may seem superfluous, 12774 * but it helps warlock figure out the correct 12775 * initpkt function calls (see [s]sd.wlcmd). 12776 */ 12777 struct scsi_pkt *pktp; 12778 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12779 12780 ASSERT(bp != un->un_rqs_bp); 12781 12782 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12783 switch ((*funcp)(bp, &pktp)) { 12784 case SD_PKT_ALLOC_SUCCESS: 12785 xp->xb_pktp = pktp; 12786 SD_TRACE(SD_LOG_IO_CORE, un, 12787 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12788 pktp); 12789 goto got_pkt; 12790 12791 case SD_PKT_ALLOC_FAILURE: 12792 /* 12793 * Temporary (hopefully) resource depletion. 12794 * Since retries and RQS commands always have a 12795 * scsi_pkt allocated, these cases should never 12796 * get here. So the only cases this needs to 12797 * handle is a bp from the waitq (which we put 12798 * back onto the waitq for sdrunout), or a bp 12799 * sent as an immed_bp (which we just fail). 12800 */ 12801 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12802 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12803 12804 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12805 12806 if (bp == immed_bp) { 12807 /* 12808 * If SD_XB_DMA_FREED is clear, then 12809 * this is a failure to allocate a 12810 * scsi_pkt, and we must fail the 12811 * command. 12812 */ 12813 if ((xp->xb_pkt_flags & 12814 SD_XB_DMA_FREED) == 0) { 12815 break; 12816 } 12817 12818 /* 12819 * If this immediate command is NOT our 12820 * un_retry_bp, then we must fail it. 12821 */ 12822 if (bp != un->un_retry_bp) { 12823 break; 12824 } 12825 12826 /* 12827 * We get here if this cmd is our 12828 * un_retry_bp that was DMAFREED, but 12829 * scsi_init_pkt() failed to reallocate 12830 * DMA resources when we attempted to 12831 * retry it. This can happen when an 12832 * mpxio failover is in progress, but 12833 * we don't want to just fail the 12834 * command in this case. 12835 * 12836 * Use timeout(9F) to restart it after 12837 * a 100ms delay. We don't want to 12838 * let sdrunout() restart it, because 12839 * sdrunout() is just supposed to start 12840 * commands that are sitting on the 12841 * wait queue. The un_retry_bp stays 12842 * set until the command completes, but 12843 * sdrunout can be called many times 12844 * before that happens. Since sdrunout 12845 * cannot tell if the un_retry_bp is 12846 * already in the transport, it could 12847 * end up calling scsi_transport() for 12848 * the un_retry_bp multiple times. 12849 * 12850 * Also: don't schedule the callback 12851 * if some other callback is already 12852 * pending. 12853 */ 12854 if (un->un_retry_statp == NULL) { 12855 /* 12856 * restore the kstat pointer to 12857 * keep kstat counts coherent 12858 * when we do retry the command. 12859 */ 12860 un->un_retry_statp = 12861 saved_statp; 12862 } 12863 12864 if ((un->un_startstop_timeid == NULL) && 12865 (un->un_retry_timeid == NULL) && 12866 (un->un_direct_priority_timeid == 12867 NULL)) { 12868 12869 un->un_retry_timeid = 12870 timeout( 12871 sd_start_retry_command, 12872 un, SD_RESTART_TIMEOUT); 12873 } 12874 goto exit; 12875 } 12876 12877 #else 12878 if (bp == immed_bp) { 12879 break; /* Just fail the command */ 12880 } 12881 #endif 12882 12883 /* Add the buf back to the head of the waitq */ 12884 bp->av_forw = un->un_waitq_headp; 12885 un->un_waitq_headp = bp; 12886 if (un->un_waitq_tailp == NULL) { 12887 un->un_waitq_tailp = bp; 12888 } 12889 goto exit; 12890 12891 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12892 /* 12893 * HBA DMA resource failure. Fail the command 12894 * and continue processing of the queues. 12895 */ 12896 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12897 "sd_start_cmds: " 12898 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12899 break; 12900 12901 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12902 /* 12903 * Note:x86: Partial DMA mapping not supported 12904 * for USCSI commands, and all the needed DMA 12905 * resources were not allocated. 12906 */ 12907 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12908 "sd_start_cmds: " 12909 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12910 break; 12911 12912 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12913 /* 12914 * Note:x86: Request cannot fit into CDB based 12915 * on lba and len. 12916 */ 12917 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12918 "sd_start_cmds: " 12919 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 12920 break; 12921 12922 default: 12923 /* Should NEVER get here! */ 12924 panic("scsi_initpkt error"); 12925 /*NOTREACHED*/ 12926 } 12927 12928 /* 12929 * Fatal error in allocating a scsi_pkt for this buf. 12930 * Update kstats & return the buf with an error code. 12931 * We must use sd_return_failed_command_no_restart() to 12932 * avoid a recursive call back into sd_start_cmds(). 12933 * However this also means that we must keep processing 12934 * the waitq here in order to avoid stalling. 12935 */ 12936 if (statp == kstat_waitq_to_runq) { 12937 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 12938 } 12939 sd_return_failed_command_no_restart(un, bp, EIO); 12940 if (bp == immed_bp) { 12941 /* immed_bp is gone by now, so clear this */ 12942 immed_bp = NULL; 12943 } 12944 continue; 12945 } 12946 got_pkt: 12947 if (bp == immed_bp) { 12948 /* goto the head of the class.... */ 12949 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 12950 } 12951 12952 un->un_ncmds_in_transport++; 12953 SD_UPDATE_KSTATS(un, statp, bp); 12954 12955 /* 12956 * Call scsi_transport() to send the command to the target. 12957 * According to SCSA architecture, we must drop the mutex here 12958 * before calling scsi_transport() in order to avoid deadlock. 12959 * Note that the scsi_pkt's completion routine can be executed 12960 * (from interrupt context) even before the call to 12961 * scsi_transport() returns. 12962 */ 12963 SD_TRACE(SD_LOG_IO_CORE, un, 12964 "sd_start_cmds: calling scsi_transport()\n"); 12965 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 12966 12967 mutex_exit(SD_MUTEX(un)); 12968 rval = scsi_transport(xp->xb_pktp); 12969 mutex_enter(SD_MUTEX(un)); 12970 12971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12972 "sd_start_cmds: scsi_transport() returned %d\n", rval); 12973 12974 switch (rval) { 12975 case TRAN_ACCEPT: 12976 /* Clear this with every pkt accepted by the HBA */ 12977 un->un_tran_fatal_count = 0; 12978 break; /* Success; try the next cmd (if any) */ 12979 12980 case TRAN_BUSY: 12981 un->un_ncmds_in_transport--; 12982 ASSERT(un->un_ncmds_in_transport >= 0); 12983 12984 /* 12985 * Don't retry request sense, the sense data 12986 * is lost when another request is sent. 12987 * Free up the rqs buf and retry 12988 * the original failed cmd. Update kstat. 12989 */ 12990 if (bp == un->un_rqs_bp) { 12991 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 12992 bp = sd_mark_rqs_idle(un, xp); 12993 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 12994 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 12995 kstat_waitq_enter); 12996 goto exit; 12997 } 12998 12999 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13000 /* 13001 * Free the DMA resources for the scsi_pkt. This will 13002 * allow mpxio to select another path the next time 13003 * we call scsi_transport() with this scsi_pkt. 13004 * See sdintr() for the rationalization behind this. 13005 */ 13006 if ((un->un_f_is_fibre == TRUE) && 13007 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13008 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13009 scsi_dmafree(xp->xb_pktp); 13010 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13011 } 13012 #endif 13013 13014 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13015 /* 13016 * Commands that are SD_PATH_DIRECT_PRIORITY 13017 * are for error recovery situations. These do 13018 * not use the normal command waitq, so if they 13019 * get a TRAN_BUSY we cannot put them back onto 13020 * the waitq for later retry. One possible 13021 * problem is that there could already be some 13022 * other command on un_retry_bp that is waiting 13023 * for this one to complete, so we would be 13024 * deadlocked if we put this command back onto 13025 * the waitq for later retry (since un_retry_bp 13026 * must complete before the driver gets back to 13027 * commands on the waitq). 13028 * 13029 * To avoid deadlock we must schedule a callback 13030 * that will restart this command after a set 13031 * interval. This should keep retrying for as 13032 * long as the underlying transport keeps 13033 * returning TRAN_BUSY (just like for other 13034 * commands). Use the same timeout interval as 13035 * for the ordinary TRAN_BUSY retry. 13036 */ 13037 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13038 "sd_start_cmds: scsi_transport() returned " 13039 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13040 13041 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13042 un->un_direct_priority_timeid = 13043 timeout(sd_start_direct_priority_command, 13044 bp, SD_BSY_TIMEOUT / 500); 13045 13046 goto exit; 13047 } 13048 13049 /* 13050 * For TRAN_BUSY, we want to reduce the throttle value, 13051 * unless we are retrying a command. 13052 */ 13053 if (bp != un->un_retry_bp) { 13054 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13055 } 13056 13057 /* 13058 * Set up the bp to be tried again 10 ms later. 13059 * Note:x86: Is there a timeout value in the sd_lun 13060 * for this condition? 13061 */ 13062 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13063 kstat_runq_back_to_waitq); 13064 goto exit; 13065 13066 case TRAN_FATAL_ERROR: 13067 un->un_tran_fatal_count++; 13068 /* FALLTHRU */ 13069 13070 case TRAN_BADPKT: 13071 default: 13072 un->un_ncmds_in_transport--; 13073 ASSERT(un->un_ncmds_in_transport >= 0); 13074 13075 /* 13076 * If this is our REQUEST SENSE command with a 13077 * transport error, we must get back the pointers 13078 * to the original buf, and mark the REQUEST 13079 * SENSE command as "available". 13080 */ 13081 if (bp == un->un_rqs_bp) { 13082 bp = sd_mark_rqs_idle(un, xp); 13083 xp = SD_GET_XBUF(bp); 13084 } else { 13085 /* 13086 * Legacy behavior: do not update transport 13087 * error count for request sense commands. 13088 */ 13089 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13090 } 13091 13092 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13093 sd_print_transport_rejected_message(un, xp, rval); 13094 13095 /* 13096 * We must use sd_return_failed_command_no_restart() to 13097 * avoid a recursive call back into sd_start_cmds(). 13098 * However this also means that we must keep processing 13099 * the waitq here in order to avoid stalling. 13100 */ 13101 sd_return_failed_command_no_restart(un, bp, EIO); 13102 13103 /* 13104 * Notify any threads waiting in sd_ddi_suspend() that 13105 * a command completion has occurred. 13106 */ 13107 if (un->un_state == SD_STATE_SUSPENDED) { 13108 cv_broadcast(&un->un_disk_busy_cv); 13109 } 13110 13111 if (bp == immed_bp) { 13112 /* immed_bp is gone by now, so clear this */ 13113 immed_bp = NULL; 13114 } 13115 break; 13116 } 13117 13118 } while (immed_bp == NULL); 13119 13120 exit: 13121 ASSERT(mutex_owned(SD_MUTEX(un))); 13122 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13123 } 13124 13125 13126 /* 13127 * Function: sd_return_command 13128 * 13129 * Description: Returns a command to its originator (with or without an 13130 * error). Also starts commands waiting to be transported 13131 * to the target. 13132 * 13133 * Context: May be called from interrupt, kernel, or timeout context 13134 */ 13135 13136 static void 13137 sd_return_command(struct sd_lun *un, struct buf *bp) 13138 { 13139 struct sd_xbuf *xp; 13140 #if defined(__i386) || defined(__amd64) 13141 struct scsi_pkt *pktp; 13142 #endif 13143 13144 ASSERT(bp != NULL); 13145 ASSERT(un != NULL); 13146 ASSERT(mutex_owned(SD_MUTEX(un))); 13147 ASSERT(bp != un->un_rqs_bp); 13148 xp = SD_GET_XBUF(bp); 13149 ASSERT(xp != NULL); 13150 13151 #if defined(__i386) || defined(__amd64) 13152 pktp = SD_GET_PKTP(bp); 13153 #endif 13154 13155 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13156 13157 #if defined(__i386) || defined(__amd64) 13158 /* 13159 * Note:x86: check for the "sdrestart failed" case. 13160 */ 13161 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13162 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13163 (xp->xb_pktp->pkt_resid == 0)) { 13164 13165 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13166 /* 13167 * Successfully set up next portion of cmd 13168 * transfer, try sending it 13169 */ 13170 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13171 NULL, NULL, 0, (clock_t)0, NULL); 13172 sd_start_cmds(un, NULL); 13173 return; /* Note:x86: need a return here? */ 13174 } 13175 } 13176 #endif 13177 13178 /* 13179 * If this is the failfast bp, clear it from un_failfast_bp. This 13180 * can happen if upon being re-tried the failfast bp either 13181 * succeeded or encountered another error (possibly even a different 13182 * error than the one that precipitated the failfast state, but in 13183 * that case it would have had to exhaust retries as well). Regardless, 13184 * this should not occur whenever the instance is in the active 13185 * failfast state. 13186 */ 13187 if (bp == un->un_failfast_bp) { 13188 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13189 un->un_failfast_bp = NULL; 13190 } 13191 13192 /* 13193 * Clear the failfast state upon successful completion of ANY cmd. 13194 */ 13195 if (bp->b_error == 0) { 13196 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13197 } 13198 13199 /* 13200 * This is used if the command was retried one or more times. Show that 13201 * we are done with it, and allow processing of the waitq to resume. 13202 */ 13203 if (bp == un->un_retry_bp) { 13204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13205 "sd_return_command: un:0x%p: " 13206 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13207 un->un_retry_bp = NULL; 13208 un->un_retry_statp = NULL; 13209 } 13210 13211 SD_UPDATE_RDWR_STATS(un, bp); 13212 SD_UPDATE_PARTITION_STATS(un, bp); 13213 13214 switch (un->un_state) { 13215 case SD_STATE_SUSPENDED: 13216 /* 13217 * Notify any threads waiting in sd_ddi_suspend() that 13218 * a command completion has occurred. 13219 */ 13220 cv_broadcast(&un->un_disk_busy_cv); 13221 break; 13222 default: 13223 sd_start_cmds(un, NULL); 13224 break; 13225 } 13226 13227 /* Return this command up the iodone chain to its originator. */ 13228 mutex_exit(SD_MUTEX(un)); 13229 13230 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13231 xp->xb_pktp = NULL; 13232 13233 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13234 13235 ASSERT(!mutex_owned(SD_MUTEX(un))); 13236 mutex_enter(SD_MUTEX(un)); 13237 13238 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13239 } 13240 13241 13242 /* 13243 * Function: sd_return_failed_command 13244 * 13245 * Description: Command completion when an error occurred. 13246 * 13247 * Context: May be called from interrupt context 13248 */ 13249 13250 static void 13251 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13252 { 13253 ASSERT(bp != NULL); 13254 ASSERT(un != NULL); 13255 ASSERT(mutex_owned(SD_MUTEX(un))); 13256 13257 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13258 "sd_return_failed_command: entry\n"); 13259 13260 /* 13261 * b_resid could already be nonzero due to a partial data 13262 * transfer, so do not change it here. 13263 */ 13264 SD_BIOERROR(bp, errcode); 13265 13266 sd_return_command(un, bp); 13267 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13268 "sd_return_failed_command: exit\n"); 13269 } 13270 13271 13272 /* 13273 * Function: sd_return_failed_command_no_restart 13274 * 13275 * Description: Same as sd_return_failed_command, but ensures that no 13276 * call back into sd_start_cmds will be issued. 13277 * 13278 * Context: May be called from interrupt context 13279 */ 13280 13281 static void 13282 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13283 int errcode) 13284 { 13285 struct sd_xbuf *xp; 13286 13287 ASSERT(bp != NULL); 13288 ASSERT(un != NULL); 13289 ASSERT(mutex_owned(SD_MUTEX(un))); 13290 xp = SD_GET_XBUF(bp); 13291 ASSERT(xp != NULL); 13292 ASSERT(errcode != 0); 13293 13294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13295 "sd_return_failed_command_no_restart: entry\n"); 13296 13297 /* 13298 * b_resid could already be nonzero due to a partial data 13299 * transfer, so do not change it here. 13300 */ 13301 SD_BIOERROR(bp, errcode); 13302 13303 /* 13304 * If this is the failfast bp, clear it. This can happen if the 13305 * failfast bp encounterd a fatal error when we attempted to 13306 * re-try it (such as a scsi_transport(9F) failure). However 13307 * we should NOT be in an active failfast state if the failfast 13308 * bp is not NULL. 13309 */ 13310 if (bp == un->un_failfast_bp) { 13311 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13312 un->un_failfast_bp = NULL; 13313 } 13314 13315 if (bp == un->un_retry_bp) { 13316 /* 13317 * This command was retried one or more times. Show that we are 13318 * done with it, and allow processing of the waitq to resume. 13319 */ 13320 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13321 "sd_return_failed_command_no_restart: " 13322 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13323 un->un_retry_bp = NULL; 13324 un->un_retry_statp = NULL; 13325 } 13326 13327 SD_UPDATE_RDWR_STATS(un, bp); 13328 SD_UPDATE_PARTITION_STATS(un, bp); 13329 13330 mutex_exit(SD_MUTEX(un)); 13331 13332 if (xp->xb_pktp != NULL) { 13333 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13334 xp->xb_pktp = NULL; 13335 } 13336 13337 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13338 13339 mutex_enter(SD_MUTEX(un)); 13340 13341 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13342 "sd_return_failed_command_no_restart: exit\n"); 13343 } 13344 13345 13346 /* 13347 * Function: sd_retry_command 13348 * 13349 * Description: queue up a command for retry, or (optionally) fail it 13350 * if retry counts are exhausted. 13351 * 13352 * Arguments: un - Pointer to the sd_lun struct for the target. 13353 * 13354 * bp - Pointer to the buf for the command to be retried. 13355 * 13356 * retry_check_flag - Flag to see which (if any) of the retry 13357 * counts should be decremented/checked. If the indicated 13358 * retry count is exhausted, then the command will not be 13359 * retried; it will be failed instead. This should use a 13360 * value equal to one of the following: 13361 * 13362 * SD_RETRIES_NOCHECK 13363 * SD_RESD_RETRIES_STANDARD 13364 * SD_RETRIES_VICTIM 13365 * 13366 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13367 * if the check should be made to see of FLAG_ISOLATE is set 13368 * in the pkt. If FLAG_ISOLATE is set, then the command is 13369 * not retried, it is simply failed. 13370 * 13371 * user_funcp - Ptr to function to call before dispatching the 13372 * command. May be NULL if no action needs to be performed. 13373 * (Primarily intended for printing messages.) 13374 * 13375 * user_arg - Optional argument to be passed along to 13376 * the user_funcp call. 13377 * 13378 * failure_code - errno return code to set in the bp if the 13379 * command is going to be failed. 13380 * 13381 * retry_delay - Retry delay interval in (clock_t) units. May 13382 * be zero which indicates that the retry should be retried 13383 * immediately (ie, without an intervening delay). 13384 * 13385 * statp - Ptr to kstat function to be updated if the command 13386 * is queued for a delayed retry. May be NULL if no kstat 13387 * update is desired. 13388 * 13389 * Context: May be called from interupt context. 13390 */ 13391 13392 static void 13393 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13394 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13395 code), void *user_arg, int failure_code, clock_t retry_delay, 13396 void (*statp)(kstat_io_t *)) 13397 { 13398 struct sd_xbuf *xp; 13399 struct scsi_pkt *pktp; 13400 13401 ASSERT(un != NULL); 13402 ASSERT(mutex_owned(SD_MUTEX(un))); 13403 ASSERT(bp != NULL); 13404 xp = SD_GET_XBUF(bp); 13405 ASSERT(xp != NULL); 13406 pktp = SD_GET_PKTP(bp); 13407 ASSERT(pktp != NULL); 13408 13409 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13410 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13411 13412 /* 13413 * If we are syncing or dumping, fail the command to avoid 13414 * recursively calling back into scsi_transport(). 13415 */ 13416 if (ddi_in_panic()) { 13417 goto fail_command_no_log; 13418 } 13419 13420 /* 13421 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13422 * log an error and fail the command. 13423 */ 13424 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13425 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13426 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13427 sd_dump_memory(un, SD_LOG_IO, "CDB", 13428 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13429 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13430 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13431 goto fail_command; 13432 } 13433 13434 /* 13435 * If we are suspended, then put the command onto head of the 13436 * wait queue since we don't want to start more commands. 13437 */ 13438 switch (un->un_state) { 13439 case SD_STATE_SUSPENDED: 13440 case SD_STATE_DUMPING: 13441 bp->av_forw = un->un_waitq_headp; 13442 un->un_waitq_headp = bp; 13443 if (un->un_waitq_tailp == NULL) { 13444 un->un_waitq_tailp = bp; 13445 } 13446 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13447 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13448 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13449 return; 13450 default: 13451 break; 13452 } 13453 13454 /* 13455 * If the caller wants us to check FLAG_ISOLATE, then see if that 13456 * is set; if it is then we do not want to retry the command. 13457 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13458 */ 13459 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13460 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13461 goto fail_command; 13462 } 13463 } 13464 13465 13466 /* 13467 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13468 * command timeout or a selection timeout has occurred. This means 13469 * that we were unable to establish an kind of communication with 13470 * the target, and subsequent retries and/or commands are likely 13471 * to encounter similar results and take a long time to complete. 13472 * 13473 * If this is a failfast error condition, we need to update the 13474 * failfast state, even if this bp does not have B_FAILFAST set. 13475 */ 13476 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13477 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13478 ASSERT(un->un_failfast_bp == NULL); 13479 /* 13480 * If we are already in the active failfast state, and 13481 * another failfast error condition has been detected, 13482 * then fail this command if it has B_FAILFAST set. 13483 * If B_FAILFAST is clear, then maintain the legacy 13484 * behavior of retrying heroically, even tho this will 13485 * take a lot more time to fail the command. 13486 */ 13487 if (bp->b_flags & B_FAILFAST) { 13488 goto fail_command; 13489 } 13490 } else { 13491 /* 13492 * We're not in the active failfast state, but we 13493 * have a failfast error condition, so we must begin 13494 * transition to the next state. We do this regardless 13495 * of whether or not this bp has B_FAILFAST set. 13496 */ 13497 if (un->un_failfast_bp == NULL) { 13498 /* 13499 * This is the first bp to meet a failfast 13500 * condition so save it on un_failfast_bp & 13501 * do normal retry processing. Do not enter 13502 * active failfast state yet. This marks 13503 * entry into the "failfast pending" state. 13504 */ 13505 un->un_failfast_bp = bp; 13506 13507 } else if (un->un_failfast_bp == bp) { 13508 /* 13509 * This is the second time *this* bp has 13510 * encountered a failfast error condition, 13511 * so enter active failfast state & flush 13512 * queues as appropriate. 13513 */ 13514 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13515 un->un_failfast_bp = NULL; 13516 sd_failfast_flushq(un); 13517 13518 /* 13519 * Fail this bp now if B_FAILFAST set; 13520 * otherwise continue with retries. (It would 13521 * be pretty ironic if this bp succeeded on a 13522 * subsequent retry after we just flushed all 13523 * the queues). 13524 */ 13525 if (bp->b_flags & B_FAILFAST) { 13526 goto fail_command; 13527 } 13528 13529 #if !defined(lint) && !defined(__lint) 13530 } else { 13531 /* 13532 * If neither of the preceeding conditionals 13533 * was true, it means that there is some 13534 * *other* bp that has met an inital failfast 13535 * condition and is currently either being 13536 * retried or is waiting to be retried. In 13537 * that case we should perform normal retry 13538 * processing on *this* bp, since there is a 13539 * chance that the current failfast condition 13540 * is transient and recoverable. If that does 13541 * not turn out to be the case, then retries 13542 * will be cleared when the wait queue is 13543 * flushed anyway. 13544 */ 13545 #endif 13546 } 13547 } 13548 } else { 13549 /* 13550 * SD_RETRIES_FAILFAST is clear, which indicates that we 13551 * likely were able to at least establish some level of 13552 * communication with the target and subsequent commands 13553 * and/or retries are likely to get through to the target, 13554 * In this case we want to be aggressive about clearing 13555 * the failfast state. Note that this does not affect 13556 * the "failfast pending" condition. 13557 */ 13558 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13559 } 13560 13561 13562 /* 13563 * Check the specified retry count to see if we can still do 13564 * any retries with this pkt before we should fail it. 13565 */ 13566 switch (retry_check_flag & SD_RETRIES_MASK) { 13567 case SD_RETRIES_VICTIM: 13568 /* 13569 * Check the victim retry count. If exhausted, then fall 13570 * thru & check against the standard retry count. 13571 */ 13572 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13573 /* Increment count & proceed with the retry */ 13574 xp->xb_victim_retry_count++; 13575 break; 13576 } 13577 /* Victim retries exhausted, fall back to std. retries... */ 13578 /* FALLTHRU */ 13579 13580 case SD_RETRIES_STANDARD: 13581 if (xp->xb_retry_count >= un->un_retry_count) { 13582 /* Retries exhausted, fail the command */ 13583 SD_TRACE(SD_LOG_IO_CORE, un, 13584 "sd_retry_command: retries exhausted!\n"); 13585 /* 13586 * update b_resid for failed SCMD_READ & SCMD_WRITE 13587 * commands with nonzero pkt_resid. 13588 */ 13589 if ((pktp->pkt_reason == CMD_CMPLT) && 13590 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13591 (pktp->pkt_resid != 0)) { 13592 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13593 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13594 SD_UPDATE_B_RESID(bp, pktp); 13595 } 13596 } 13597 goto fail_command; 13598 } 13599 xp->xb_retry_count++; 13600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13601 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13602 break; 13603 13604 case SD_RETRIES_UA: 13605 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13606 /* Retries exhausted, fail the command */ 13607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13608 "Unit Attention retries exhausted. " 13609 "Check the target.\n"); 13610 goto fail_command; 13611 } 13612 xp->xb_ua_retry_count++; 13613 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13614 "sd_retry_command: retry count:%d\n", 13615 xp->xb_ua_retry_count); 13616 break; 13617 13618 case SD_RETRIES_BUSY: 13619 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13620 /* Retries exhausted, fail the command */ 13621 SD_TRACE(SD_LOG_IO_CORE, un, 13622 "sd_retry_command: retries exhausted!\n"); 13623 goto fail_command; 13624 } 13625 xp->xb_retry_count++; 13626 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13627 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13628 break; 13629 13630 case SD_RETRIES_NOCHECK: 13631 default: 13632 /* No retry count to check. Just proceed with the retry */ 13633 break; 13634 } 13635 13636 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13637 13638 /* 13639 * If we were given a zero timeout, we must attempt to retry the 13640 * command immediately (ie, without a delay). 13641 */ 13642 if (retry_delay == 0) { 13643 /* 13644 * Check some limiting conditions to see if we can actually 13645 * do the immediate retry. If we cannot, then we must 13646 * fall back to queueing up a delayed retry. 13647 */ 13648 if (un->un_ncmds_in_transport >= un->un_throttle) { 13649 /* 13650 * We are at the throttle limit for the target, 13651 * fall back to delayed retry. 13652 */ 13653 retry_delay = SD_BSY_TIMEOUT; 13654 statp = kstat_waitq_enter; 13655 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13656 "sd_retry_command: immed. retry hit " 13657 "throttle!\n"); 13658 } else { 13659 /* 13660 * We're clear to proceed with the immediate retry. 13661 * First call the user-provided function (if any) 13662 */ 13663 if (user_funcp != NULL) { 13664 (*user_funcp)(un, bp, user_arg, 13665 SD_IMMEDIATE_RETRY_ISSUED); 13666 #ifdef __lock_lint 13667 sd_print_incomplete_msg(un, bp, user_arg, 13668 SD_IMMEDIATE_RETRY_ISSUED); 13669 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13670 SD_IMMEDIATE_RETRY_ISSUED); 13671 sd_print_sense_failed_msg(un, bp, user_arg, 13672 SD_IMMEDIATE_RETRY_ISSUED); 13673 #endif 13674 } 13675 13676 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13677 "sd_retry_command: issuing immediate retry\n"); 13678 13679 /* 13680 * Call sd_start_cmds() to transport the command to 13681 * the target. 13682 */ 13683 sd_start_cmds(un, bp); 13684 13685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13686 "sd_retry_command exit\n"); 13687 return; 13688 } 13689 } 13690 13691 /* 13692 * Set up to retry the command after a delay. 13693 * First call the user-provided function (if any) 13694 */ 13695 if (user_funcp != NULL) { 13696 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13697 } 13698 13699 sd_set_retry_bp(un, bp, retry_delay, statp); 13700 13701 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13702 return; 13703 13704 fail_command: 13705 13706 if (user_funcp != NULL) { 13707 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13708 } 13709 13710 fail_command_no_log: 13711 13712 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13713 "sd_retry_command: returning failed command\n"); 13714 13715 sd_return_failed_command(un, bp, failure_code); 13716 13717 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13718 } 13719 13720 13721 /* 13722 * Function: sd_set_retry_bp 13723 * 13724 * Description: Set up the given bp for retry. 13725 * 13726 * Arguments: un - ptr to associated softstate 13727 * bp - ptr to buf(9S) for the command 13728 * retry_delay - time interval before issuing retry (may be 0) 13729 * statp - optional pointer to kstat function 13730 * 13731 * Context: May be called under interrupt context 13732 */ 13733 13734 static void 13735 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13736 void (*statp)(kstat_io_t *)) 13737 { 13738 ASSERT(un != NULL); 13739 ASSERT(mutex_owned(SD_MUTEX(un))); 13740 ASSERT(bp != NULL); 13741 13742 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13743 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13744 13745 /* 13746 * Indicate that the command is being retried. This will not allow any 13747 * other commands on the wait queue to be transported to the target 13748 * until this command has been completed (success or failure). The 13749 * "retry command" is not transported to the target until the given 13750 * time delay expires, unless the user specified a 0 retry_delay. 13751 * 13752 * Note: the timeout(9F) callback routine is what actually calls 13753 * sd_start_cmds() to transport the command, with the exception of a 13754 * zero retry_delay. The only current implementor of a zero retry delay 13755 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13756 */ 13757 if (un->un_retry_bp == NULL) { 13758 ASSERT(un->un_retry_statp == NULL); 13759 un->un_retry_bp = bp; 13760 13761 /* 13762 * If the user has not specified a delay the command should 13763 * be queued and no timeout should be scheduled. 13764 */ 13765 if (retry_delay == 0) { 13766 /* 13767 * Save the kstat pointer that will be used in the 13768 * call to SD_UPDATE_KSTATS() below, so that 13769 * sd_start_cmds() can correctly decrement the waitq 13770 * count when it is time to transport this command. 13771 */ 13772 un->un_retry_statp = statp; 13773 goto done; 13774 } 13775 } 13776 13777 if (un->un_retry_bp == bp) { 13778 /* 13779 * Save the kstat pointer that will be used in the call to 13780 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13781 * correctly decrement the waitq count when it is time to 13782 * transport this command. 13783 */ 13784 un->un_retry_statp = statp; 13785 13786 /* 13787 * Schedule a timeout if: 13788 * 1) The user has specified a delay. 13789 * 2) There is not a START_STOP_UNIT callback pending. 13790 * 13791 * If no delay has been specified, then it is up to the caller 13792 * to ensure that IO processing continues without stalling. 13793 * Effectively, this means that the caller will issue the 13794 * required call to sd_start_cmds(). The START_STOP_UNIT 13795 * callback does this after the START STOP UNIT command has 13796 * completed. In either of these cases we should not schedule 13797 * a timeout callback here. Also don't schedule the timeout if 13798 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13799 */ 13800 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13801 (un->un_direct_priority_timeid == NULL)) { 13802 un->un_retry_timeid = 13803 timeout(sd_start_retry_command, un, retry_delay); 13804 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13805 "sd_set_retry_bp: setting timeout: un: 0x%p" 13806 " bp:0x%p un_retry_timeid:0x%p\n", 13807 un, bp, un->un_retry_timeid); 13808 } 13809 } else { 13810 /* 13811 * We only get in here if there is already another command 13812 * waiting to be retried. In this case, we just put the 13813 * given command onto the wait queue, so it can be transported 13814 * after the current retry command has completed. 13815 * 13816 * Also we have to make sure that if the command at the head 13817 * of the wait queue is the un_failfast_bp, that we do not 13818 * put ahead of it any other commands that are to be retried. 13819 */ 13820 if ((un->un_failfast_bp != NULL) && 13821 (un->un_failfast_bp == un->un_waitq_headp)) { 13822 /* 13823 * Enqueue this command AFTER the first command on 13824 * the wait queue (which is also un_failfast_bp). 13825 */ 13826 bp->av_forw = un->un_waitq_headp->av_forw; 13827 un->un_waitq_headp->av_forw = bp; 13828 if (un->un_waitq_headp == un->un_waitq_tailp) { 13829 un->un_waitq_tailp = bp; 13830 } 13831 } else { 13832 /* Enqueue this command at the head of the waitq. */ 13833 bp->av_forw = un->un_waitq_headp; 13834 un->un_waitq_headp = bp; 13835 if (un->un_waitq_tailp == NULL) { 13836 un->un_waitq_tailp = bp; 13837 } 13838 } 13839 13840 if (statp == NULL) { 13841 statp = kstat_waitq_enter; 13842 } 13843 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13844 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13845 } 13846 13847 done: 13848 if (statp != NULL) { 13849 SD_UPDATE_KSTATS(un, statp, bp); 13850 } 13851 13852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13853 "sd_set_retry_bp: exit un:0x%p\n", un); 13854 } 13855 13856 13857 /* 13858 * Function: sd_start_retry_command 13859 * 13860 * Description: Start the command that has been waiting on the target's 13861 * retry queue. Called from timeout(9F) context after the 13862 * retry delay interval has expired. 13863 * 13864 * Arguments: arg - pointer to associated softstate for the device. 13865 * 13866 * Context: timeout(9F) thread context. May not sleep. 13867 */ 13868 13869 static void 13870 sd_start_retry_command(void *arg) 13871 { 13872 struct sd_lun *un = arg; 13873 13874 ASSERT(un != NULL); 13875 ASSERT(!mutex_owned(SD_MUTEX(un))); 13876 13877 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13878 "sd_start_retry_command: entry\n"); 13879 13880 mutex_enter(SD_MUTEX(un)); 13881 13882 un->un_retry_timeid = NULL; 13883 13884 if (un->un_retry_bp != NULL) { 13885 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13886 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13887 un, un->un_retry_bp); 13888 sd_start_cmds(un, un->un_retry_bp); 13889 } 13890 13891 mutex_exit(SD_MUTEX(un)); 13892 13893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13894 "sd_start_retry_command: exit\n"); 13895 } 13896 13897 13898 /* 13899 * Function: sd_start_direct_priority_command 13900 * 13901 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13902 * received TRAN_BUSY when we called scsi_transport() to send it 13903 * to the underlying HBA. This function is called from timeout(9F) 13904 * context after the delay interval has expired. 13905 * 13906 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13907 * 13908 * Context: timeout(9F) thread context. May not sleep. 13909 */ 13910 13911 static void 13912 sd_start_direct_priority_command(void *arg) 13913 { 13914 struct buf *priority_bp = arg; 13915 struct sd_lun *un; 13916 13917 ASSERT(priority_bp != NULL); 13918 un = SD_GET_UN(priority_bp); 13919 ASSERT(un != NULL); 13920 ASSERT(!mutex_owned(SD_MUTEX(un))); 13921 13922 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13923 "sd_start_direct_priority_command: entry\n"); 13924 13925 mutex_enter(SD_MUTEX(un)); 13926 un->un_direct_priority_timeid = NULL; 13927 sd_start_cmds(un, priority_bp); 13928 mutex_exit(SD_MUTEX(un)); 13929 13930 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13931 "sd_start_direct_priority_command: exit\n"); 13932 } 13933 13934 13935 /* 13936 * Function: sd_send_request_sense_command 13937 * 13938 * Description: Sends a REQUEST SENSE command to the target 13939 * 13940 * Context: May be called from interrupt context. 13941 */ 13942 13943 static void 13944 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 13945 struct scsi_pkt *pktp) 13946 { 13947 ASSERT(bp != NULL); 13948 ASSERT(un != NULL); 13949 ASSERT(mutex_owned(SD_MUTEX(un))); 13950 13951 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 13952 "entry: buf:0x%p\n", bp); 13953 13954 /* 13955 * If we are syncing or dumping, then fail the command to avoid a 13956 * recursive callback into scsi_transport(). Also fail the command 13957 * if we are suspended (legacy behavior). 13958 */ 13959 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 13960 (un->un_state == SD_STATE_DUMPING)) { 13961 sd_return_failed_command(un, bp, EIO); 13962 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13963 "sd_send_request_sense_command: syncing/dumping, exit\n"); 13964 return; 13965 } 13966 13967 /* 13968 * Retry the failed command and don't issue the request sense if: 13969 * 1) the sense buf is busy 13970 * 2) we have 1 or more outstanding commands on the target 13971 * (the sense data will be cleared or invalidated any way) 13972 * 13973 * Note: There could be an issue with not checking a retry limit here, 13974 * the problem is determining which retry limit to check. 13975 */ 13976 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 13977 /* Don't retry if the command is flagged as non-retryable */ 13978 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 13979 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13980 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 13981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13982 "sd_send_request_sense_command: " 13983 "at full throttle, retrying exit\n"); 13984 } else { 13985 sd_return_failed_command(un, bp, EIO); 13986 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13987 "sd_send_request_sense_command: " 13988 "at full throttle, non-retryable exit\n"); 13989 } 13990 return; 13991 } 13992 13993 sd_mark_rqs_busy(un, bp); 13994 sd_start_cmds(un, un->un_rqs_bp); 13995 13996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13997 "sd_send_request_sense_command: exit\n"); 13998 } 13999 14000 14001 /* 14002 * Function: sd_mark_rqs_busy 14003 * 14004 * Description: Indicate that the request sense bp for this instance is 14005 * in use. 14006 * 14007 * Context: May be called under interrupt context 14008 */ 14009 14010 static void 14011 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14012 { 14013 struct sd_xbuf *sense_xp; 14014 14015 ASSERT(un != NULL); 14016 ASSERT(bp != NULL); 14017 ASSERT(mutex_owned(SD_MUTEX(un))); 14018 ASSERT(un->un_sense_isbusy == 0); 14019 14020 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14021 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14022 14023 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14024 ASSERT(sense_xp != NULL); 14025 14026 SD_INFO(SD_LOG_IO, un, 14027 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14028 14029 ASSERT(sense_xp->xb_pktp != NULL); 14030 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14031 == (FLAG_SENSING | FLAG_HEAD)); 14032 14033 un->un_sense_isbusy = 1; 14034 un->un_rqs_bp->b_resid = 0; 14035 sense_xp->xb_pktp->pkt_resid = 0; 14036 sense_xp->xb_pktp->pkt_reason = 0; 14037 14038 /* So we can get back the bp at interrupt time! */ 14039 sense_xp->xb_sense_bp = bp; 14040 14041 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14042 14043 /* 14044 * Mark this buf as awaiting sense data. (This is already set in 14045 * the pkt_flags for the RQS packet.) 14046 */ 14047 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14048 14049 sense_xp->xb_retry_count = 0; 14050 sense_xp->xb_victim_retry_count = 0; 14051 sense_xp->xb_ua_retry_count = 0; 14052 sense_xp->xb_dma_resid = 0; 14053 14054 /* Clean up the fields for auto-request sense */ 14055 sense_xp->xb_sense_status = 0; 14056 sense_xp->xb_sense_state = 0; 14057 sense_xp->xb_sense_resid = 0; 14058 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14059 14060 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14061 } 14062 14063 14064 /* 14065 * Function: sd_mark_rqs_idle 14066 * 14067 * Description: SD_MUTEX must be held continuously through this routine 14068 * to prevent reuse of the rqs struct before the caller can 14069 * complete it's processing. 14070 * 14071 * Return Code: Pointer to the RQS buf 14072 * 14073 * Context: May be called under interrupt context 14074 */ 14075 14076 static struct buf * 14077 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14078 { 14079 struct buf *bp; 14080 ASSERT(un != NULL); 14081 ASSERT(sense_xp != NULL); 14082 ASSERT(mutex_owned(SD_MUTEX(un))); 14083 ASSERT(un->un_sense_isbusy != 0); 14084 14085 un->un_sense_isbusy = 0; 14086 bp = sense_xp->xb_sense_bp; 14087 sense_xp->xb_sense_bp = NULL; 14088 14089 /* This pkt is no longer interested in getting sense data */ 14090 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14091 14092 return (bp); 14093 } 14094 14095 14096 14097 /* 14098 * Function: sd_alloc_rqs 14099 * 14100 * Description: Set up the unit to receive auto request sense data 14101 * 14102 * Return Code: DDI_SUCCESS or DDI_FAILURE 14103 * 14104 * Context: Called under attach(9E) context 14105 */ 14106 14107 static int 14108 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14109 { 14110 struct sd_xbuf *xp; 14111 14112 ASSERT(un != NULL); 14113 ASSERT(!mutex_owned(SD_MUTEX(un))); 14114 ASSERT(un->un_rqs_bp == NULL); 14115 ASSERT(un->un_rqs_pktp == NULL); 14116 14117 /* 14118 * First allocate the required buf and scsi_pkt structs, then set up 14119 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14120 */ 14121 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14122 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14123 if (un->un_rqs_bp == NULL) { 14124 return (DDI_FAILURE); 14125 } 14126 14127 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14128 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14129 14130 if (un->un_rqs_pktp == NULL) { 14131 sd_free_rqs(un); 14132 return (DDI_FAILURE); 14133 } 14134 14135 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14136 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14137 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14138 14139 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14140 14141 /* Set up the other needed members in the ARQ scsi_pkt. */ 14142 un->un_rqs_pktp->pkt_comp = sdintr; 14143 un->un_rqs_pktp->pkt_time = sd_io_time; 14144 un->un_rqs_pktp->pkt_flags |= 14145 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14146 14147 /* 14148 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14149 * provide any intpkt, destroypkt routines as we take care of 14150 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14151 */ 14152 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14153 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14154 xp->xb_pktp = un->un_rqs_pktp; 14155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14156 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14157 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14158 14159 /* 14160 * Save the pointer to the request sense private bp so it can 14161 * be retrieved in sdintr. 14162 */ 14163 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14164 ASSERT(un->un_rqs_bp->b_private == xp); 14165 14166 /* 14167 * See if the HBA supports auto-request sense for the specified 14168 * target/lun. If it does, then try to enable it (if not already 14169 * enabled). 14170 * 14171 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14172 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14173 * return success. However, in both of these cases ARQ is always 14174 * enabled and scsi_ifgetcap will always return true. The best approach 14175 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14176 * 14177 * The 3rd case is the HBA (adp) always return enabled on 14178 * scsi_ifgetgetcap even when it's not enable, the best approach 14179 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14180 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14181 */ 14182 14183 if (un->un_f_is_fibre == TRUE) { 14184 un->un_f_arq_enabled = TRUE; 14185 } else { 14186 #if defined(__i386) || defined(__amd64) 14187 /* 14188 * Circumvent the Adaptec bug, remove this code when 14189 * the bug is fixed 14190 */ 14191 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14192 #endif 14193 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14194 case 0: 14195 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14196 "sd_alloc_rqs: HBA supports ARQ\n"); 14197 /* 14198 * ARQ is supported by this HBA but currently is not 14199 * enabled. Attempt to enable it and if successful then 14200 * mark this instance as ARQ enabled. 14201 */ 14202 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14203 == 1) { 14204 /* Successfully enabled ARQ in the HBA */ 14205 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14206 "sd_alloc_rqs: ARQ enabled\n"); 14207 un->un_f_arq_enabled = TRUE; 14208 } else { 14209 /* Could not enable ARQ in the HBA */ 14210 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14211 "sd_alloc_rqs: failed ARQ enable\n"); 14212 un->un_f_arq_enabled = FALSE; 14213 } 14214 break; 14215 case 1: 14216 /* 14217 * ARQ is supported by this HBA and is already enabled. 14218 * Just mark ARQ as enabled for this instance. 14219 */ 14220 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14221 "sd_alloc_rqs: ARQ already enabled\n"); 14222 un->un_f_arq_enabled = TRUE; 14223 break; 14224 default: 14225 /* 14226 * ARQ is not supported by this HBA; disable it for this 14227 * instance. 14228 */ 14229 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14230 "sd_alloc_rqs: HBA does not support ARQ\n"); 14231 un->un_f_arq_enabled = FALSE; 14232 break; 14233 } 14234 } 14235 14236 return (DDI_SUCCESS); 14237 } 14238 14239 14240 /* 14241 * Function: sd_free_rqs 14242 * 14243 * Description: Cleanup for the pre-instance RQS command. 14244 * 14245 * Context: Kernel thread context 14246 */ 14247 14248 static void 14249 sd_free_rqs(struct sd_lun *un) 14250 { 14251 ASSERT(un != NULL); 14252 14253 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14254 14255 /* 14256 * If consistent memory is bound to a scsi_pkt, the pkt 14257 * has to be destroyed *before* freeing the consistent memory. 14258 * Don't change the sequence of this operations. 14259 * scsi_destroy_pkt() might access memory, which isn't allowed, 14260 * after it was freed in scsi_free_consistent_buf(). 14261 */ 14262 if (un->un_rqs_pktp != NULL) { 14263 scsi_destroy_pkt(un->un_rqs_pktp); 14264 un->un_rqs_pktp = NULL; 14265 } 14266 14267 if (un->un_rqs_bp != NULL) { 14268 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14269 scsi_free_consistent_buf(un->un_rqs_bp); 14270 un->un_rqs_bp = NULL; 14271 } 14272 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14273 } 14274 14275 14276 14277 /* 14278 * Function: sd_reduce_throttle 14279 * 14280 * Description: Reduces the maximun # of outstanding commands on a 14281 * target to the current number of outstanding commands. 14282 * Queues a tiemout(9F) callback to restore the limit 14283 * after a specified interval has elapsed. 14284 * Typically used when we get a TRAN_BUSY return code 14285 * back from scsi_transport(). 14286 * 14287 * Arguments: un - ptr to the sd_lun softstate struct 14288 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14289 * 14290 * Context: May be called from interrupt context 14291 */ 14292 14293 static void 14294 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14295 { 14296 ASSERT(un != NULL); 14297 ASSERT(mutex_owned(SD_MUTEX(un))); 14298 ASSERT(un->un_ncmds_in_transport >= 0); 14299 14300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14301 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14302 un, un->un_throttle, un->un_ncmds_in_transport); 14303 14304 if (un->un_throttle > 1) { 14305 if (un->un_f_use_adaptive_throttle == TRUE) { 14306 switch (throttle_type) { 14307 case SD_THROTTLE_TRAN_BUSY: 14308 if (un->un_busy_throttle == 0) { 14309 un->un_busy_throttle = un->un_throttle; 14310 } 14311 break; 14312 case SD_THROTTLE_QFULL: 14313 un->un_busy_throttle = 0; 14314 break; 14315 default: 14316 ASSERT(FALSE); 14317 } 14318 14319 if (un->un_ncmds_in_transport > 0) { 14320 un->un_throttle = un->un_ncmds_in_transport; 14321 } 14322 14323 } else { 14324 if (un->un_ncmds_in_transport == 0) { 14325 un->un_throttle = 1; 14326 } else { 14327 un->un_throttle = un->un_ncmds_in_transport; 14328 } 14329 } 14330 } 14331 14332 /* Reschedule the timeout if none is currently active */ 14333 if (un->un_reset_throttle_timeid == NULL) { 14334 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14335 un, SD_THROTTLE_RESET_INTERVAL); 14336 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14337 "sd_reduce_throttle: timeout scheduled!\n"); 14338 } 14339 14340 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14341 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14342 } 14343 14344 14345 14346 /* 14347 * Function: sd_restore_throttle 14348 * 14349 * Description: Callback function for timeout(9F). Resets the current 14350 * value of un->un_throttle to its default. 14351 * 14352 * Arguments: arg - pointer to associated softstate for the device. 14353 * 14354 * Context: May be called from interrupt context 14355 */ 14356 14357 static void 14358 sd_restore_throttle(void *arg) 14359 { 14360 struct sd_lun *un = arg; 14361 14362 ASSERT(un != NULL); 14363 ASSERT(!mutex_owned(SD_MUTEX(un))); 14364 14365 mutex_enter(SD_MUTEX(un)); 14366 14367 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14368 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14369 14370 un->un_reset_throttle_timeid = NULL; 14371 14372 if (un->un_f_use_adaptive_throttle == TRUE) { 14373 /* 14374 * If un_busy_throttle is nonzero, then it contains the 14375 * value that un_throttle was when we got a TRAN_BUSY back 14376 * from scsi_transport(). We want to revert back to this 14377 * value. 14378 * 14379 * In the QFULL case, the throttle limit will incrementally 14380 * increase until it reaches max throttle. 14381 */ 14382 if (un->un_busy_throttle > 0) { 14383 un->un_throttle = un->un_busy_throttle; 14384 un->un_busy_throttle = 0; 14385 } else { 14386 /* 14387 * increase throttle by 10% open gate slowly, schedule 14388 * another restore if saved throttle has not been 14389 * reached 14390 */ 14391 short throttle; 14392 if (sd_qfull_throttle_enable) { 14393 throttle = un->un_throttle + 14394 max((un->un_throttle / 10), 1); 14395 un->un_throttle = 14396 (throttle < un->un_saved_throttle) ? 14397 throttle : un->un_saved_throttle; 14398 if (un->un_throttle < un->un_saved_throttle) { 14399 un->un_reset_throttle_timeid = 14400 timeout(sd_restore_throttle, 14401 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 14402 } 14403 } 14404 } 14405 14406 /* 14407 * If un_throttle has fallen below the low-water mark, we 14408 * restore the maximum value here (and allow it to ratchet 14409 * down again if necessary). 14410 */ 14411 if (un->un_throttle < un->un_min_throttle) { 14412 un->un_throttle = un->un_saved_throttle; 14413 } 14414 } else { 14415 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14416 "restoring limit from 0x%x to 0x%x\n", 14417 un->un_throttle, un->un_saved_throttle); 14418 un->un_throttle = un->un_saved_throttle; 14419 } 14420 14421 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14422 "sd_restore_throttle: calling sd_start_cmds!\n"); 14423 14424 sd_start_cmds(un, NULL); 14425 14426 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14427 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14428 un, un->un_throttle); 14429 14430 mutex_exit(SD_MUTEX(un)); 14431 14432 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14433 } 14434 14435 /* 14436 * Function: sdrunout 14437 * 14438 * Description: Callback routine for scsi_init_pkt when a resource allocation 14439 * fails. 14440 * 14441 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14442 * soft state instance. 14443 * 14444 * Return Code: The scsi_init_pkt routine allows for the callback function to 14445 * return a 0 indicating the callback should be rescheduled or a 1 14446 * indicating not to reschedule. This routine always returns 1 14447 * because the driver always provides a callback function to 14448 * scsi_init_pkt. This results in a callback always being scheduled 14449 * (via the scsi_init_pkt callback implementation) if a resource 14450 * failure occurs. 14451 * 14452 * Context: This callback function may not block or call routines that block 14453 * 14454 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14455 * request persisting at the head of the list which cannot be 14456 * satisfied even after multiple retries. In the future the driver 14457 * may implement some time of maximum runout count before failing 14458 * an I/O. 14459 */ 14460 14461 static int 14462 sdrunout(caddr_t arg) 14463 { 14464 struct sd_lun *un = (struct sd_lun *)arg; 14465 14466 ASSERT(un != NULL); 14467 ASSERT(!mutex_owned(SD_MUTEX(un))); 14468 14469 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14470 14471 mutex_enter(SD_MUTEX(un)); 14472 sd_start_cmds(un, NULL); 14473 mutex_exit(SD_MUTEX(un)); 14474 /* 14475 * This callback routine always returns 1 (i.e. do not reschedule) 14476 * because we always specify sdrunout as the callback handler for 14477 * scsi_init_pkt inside the call to sd_start_cmds. 14478 */ 14479 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14480 return (1); 14481 } 14482 14483 14484 /* 14485 * Function: sdintr 14486 * 14487 * Description: Completion callback routine for scsi_pkt(9S) structs 14488 * sent to the HBA driver via scsi_transport(9F). 14489 * 14490 * Context: Interrupt context 14491 */ 14492 14493 static void 14494 sdintr(struct scsi_pkt *pktp) 14495 { 14496 struct buf *bp; 14497 struct sd_xbuf *xp; 14498 struct sd_lun *un; 14499 14500 ASSERT(pktp != NULL); 14501 bp = (struct buf *)pktp->pkt_private; 14502 ASSERT(bp != NULL); 14503 xp = SD_GET_XBUF(bp); 14504 ASSERT(xp != NULL); 14505 ASSERT(xp->xb_pktp != NULL); 14506 un = SD_GET_UN(bp); 14507 ASSERT(un != NULL); 14508 ASSERT(!mutex_owned(SD_MUTEX(un))); 14509 14510 #ifdef SD_FAULT_INJECTION 14511 14512 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14513 /* SD FaultInjection */ 14514 sd_faultinjection(pktp); 14515 14516 #endif /* SD_FAULT_INJECTION */ 14517 14518 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14519 " xp:0x%p, un:0x%p\n", bp, xp, un); 14520 14521 mutex_enter(SD_MUTEX(un)); 14522 14523 /* Reduce the count of the #commands currently in transport */ 14524 un->un_ncmds_in_transport--; 14525 ASSERT(un->un_ncmds_in_transport >= 0); 14526 14527 /* Increment counter to indicate that the callback routine is active */ 14528 un->un_in_callback++; 14529 14530 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14531 14532 #ifdef SDDEBUG 14533 if (bp == un->un_retry_bp) { 14534 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14535 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14536 un, un->un_retry_bp, un->un_ncmds_in_transport); 14537 } 14538 #endif 14539 14540 /* 14541 * If pkt_reason is CMD_DEV_GONE, just fail the command 14542 */ 14543 if (pktp->pkt_reason == CMD_DEV_GONE) { 14544 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14545 "Device is gone\n"); 14546 sd_return_failed_command(un, bp, EIO); 14547 goto exit; 14548 } 14549 14550 /* 14551 * First see if the pkt has auto-request sense data with it.... 14552 * Look at the packet state first so we don't take a performance 14553 * hit looking at the arq enabled flag unless absolutely necessary. 14554 */ 14555 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14556 (un->un_f_arq_enabled == TRUE)) { 14557 /* 14558 * The HBA did an auto request sense for this command so check 14559 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14560 * driver command that should not be retried. 14561 */ 14562 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14563 /* 14564 * Save the relevant sense info into the xp for the 14565 * original cmd. 14566 */ 14567 struct scsi_arq_status *asp; 14568 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14569 xp->xb_sense_status = 14570 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14571 xp->xb_sense_state = asp->sts_rqpkt_state; 14572 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14573 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14574 min(sizeof (struct scsi_extended_sense), 14575 SENSE_LENGTH)); 14576 14577 /* fail the command */ 14578 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14579 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14580 sd_return_failed_command(un, bp, EIO); 14581 goto exit; 14582 } 14583 14584 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14585 /* 14586 * We want to either retry or fail this command, so free 14587 * the DMA resources here. If we retry the command then 14588 * the DMA resources will be reallocated in sd_start_cmds(). 14589 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14590 * causes the *entire* transfer to start over again from the 14591 * beginning of the request, even for PARTIAL chunks that 14592 * have already transferred successfully. 14593 */ 14594 if ((un->un_f_is_fibre == TRUE) && 14595 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14596 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14597 scsi_dmafree(pktp); 14598 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14599 } 14600 #endif 14601 14602 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14603 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14604 14605 sd_handle_auto_request_sense(un, bp, xp, pktp); 14606 goto exit; 14607 } 14608 14609 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14610 if (pktp->pkt_flags & FLAG_SENSING) { 14611 /* This pktp is from the unit's REQUEST_SENSE command */ 14612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14613 "sdintr: sd_handle_request_sense\n"); 14614 sd_handle_request_sense(un, bp, xp, pktp); 14615 goto exit; 14616 } 14617 14618 /* 14619 * Check to see if the command successfully completed as requested; 14620 * this is the most common case (and also the hot performance path). 14621 * 14622 * Requirements for successful completion are: 14623 * pkt_reason is CMD_CMPLT and packet status is status good. 14624 * In addition: 14625 * - A residual of zero indicates successful completion no matter what 14626 * the command is. 14627 * - If the residual is not zero and the command is not a read or 14628 * write, then it's still defined as successful completion. In other 14629 * words, if the command is a read or write the residual must be 14630 * zero for successful completion. 14631 * - If the residual is not zero and the command is a read or 14632 * write, and it's a USCSICMD, then it's still defined as 14633 * successful completion. 14634 */ 14635 if ((pktp->pkt_reason == CMD_CMPLT) && 14636 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14637 14638 /* 14639 * Since this command is returned with a good status, we 14640 * can reset the count for Sonoma failover. 14641 */ 14642 un->un_sonoma_failure_count = 0; 14643 14644 /* 14645 * Return all USCSI commands on good status 14646 */ 14647 if (pktp->pkt_resid == 0) { 14648 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14649 "sdintr: returning command for resid == 0\n"); 14650 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14651 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14652 SD_UPDATE_B_RESID(bp, pktp); 14653 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14654 "sdintr: returning command for resid != 0\n"); 14655 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14656 SD_UPDATE_B_RESID(bp, pktp); 14657 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14658 "sdintr: returning uscsi command\n"); 14659 } else { 14660 goto not_successful; 14661 } 14662 sd_return_command(un, bp); 14663 14664 /* 14665 * Decrement counter to indicate that the callback routine 14666 * is done. 14667 */ 14668 un->un_in_callback--; 14669 ASSERT(un->un_in_callback >= 0); 14670 mutex_exit(SD_MUTEX(un)); 14671 14672 return; 14673 } 14674 14675 not_successful: 14676 14677 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14678 /* 14679 * The following is based upon knowledge of the underlying transport 14680 * and its use of DMA resources. This code should be removed when 14681 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14682 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14683 * and sd_start_cmds(). 14684 * 14685 * Free any DMA resources associated with this command if there 14686 * is a chance it could be retried or enqueued for later retry. 14687 * If we keep the DMA binding then mpxio cannot reissue the 14688 * command on another path whenever a path failure occurs. 14689 * 14690 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14691 * causes the *entire* transfer to start over again from the 14692 * beginning of the request, even for PARTIAL chunks that 14693 * have already transferred successfully. 14694 * 14695 * This is only done for non-uscsi commands (and also skipped for the 14696 * driver's internal RQS command). Also just do this for Fibre Channel 14697 * devices as these are the only ones that support mpxio. 14698 */ 14699 if ((un->un_f_is_fibre == TRUE) && 14700 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14701 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14702 scsi_dmafree(pktp); 14703 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14704 } 14705 #endif 14706 14707 /* 14708 * The command did not successfully complete as requested so check 14709 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14710 * driver command that should not be retried so just return. If 14711 * FLAG_DIAGNOSE is not set the error will be processed below. 14712 */ 14713 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14715 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14716 /* 14717 * Issue a request sense if a check condition caused the error 14718 * (we handle the auto request sense case above), otherwise 14719 * just fail the command. 14720 */ 14721 if ((pktp->pkt_reason == CMD_CMPLT) && 14722 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14723 sd_send_request_sense_command(un, bp, pktp); 14724 } else { 14725 sd_return_failed_command(un, bp, EIO); 14726 } 14727 goto exit; 14728 } 14729 14730 /* 14731 * The command did not successfully complete as requested so process 14732 * the error, retry, and/or attempt recovery. 14733 */ 14734 switch (pktp->pkt_reason) { 14735 case CMD_CMPLT: 14736 switch (SD_GET_PKT_STATUS(pktp)) { 14737 case STATUS_GOOD: 14738 /* 14739 * The command completed successfully with a non-zero 14740 * residual 14741 */ 14742 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14743 "sdintr: STATUS_GOOD \n"); 14744 sd_pkt_status_good(un, bp, xp, pktp); 14745 break; 14746 14747 case STATUS_CHECK: 14748 case STATUS_TERMINATED: 14749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14750 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14751 sd_pkt_status_check_condition(un, bp, xp, pktp); 14752 break; 14753 14754 case STATUS_BUSY: 14755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14756 "sdintr: STATUS_BUSY\n"); 14757 sd_pkt_status_busy(un, bp, xp, pktp); 14758 break; 14759 14760 case STATUS_RESERVATION_CONFLICT: 14761 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14762 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14763 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14764 break; 14765 14766 case STATUS_QFULL: 14767 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14768 "sdintr: STATUS_QFULL\n"); 14769 sd_pkt_status_qfull(un, bp, xp, pktp); 14770 break; 14771 14772 case STATUS_MET: 14773 case STATUS_INTERMEDIATE: 14774 case STATUS_SCSI2: 14775 case STATUS_INTERMEDIATE_MET: 14776 case STATUS_ACA_ACTIVE: 14777 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14778 "Unexpected SCSI status received: 0x%x\n", 14779 SD_GET_PKT_STATUS(pktp)); 14780 sd_return_failed_command(un, bp, EIO); 14781 break; 14782 14783 default: 14784 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14785 "Invalid SCSI status received: 0x%x\n", 14786 SD_GET_PKT_STATUS(pktp)); 14787 sd_return_failed_command(un, bp, EIO); 14788 break; 14789 14790 } 14791 break; 14792 14793 case CMD_INCOMPLETE: 14794 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14795 "sdintr: CMD_INCOMPLETE\n"); 14796 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14797 break; 14798 case CMD_TRAN_ERR: 14799 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14800 "sdintr: CMD_TRAN_ERR\n"); 14801 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14802 break; 14803 case CMD_RESET: 14804 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14805 "sdintr: CMD_RESET \n"); 14806 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14807 break; 14808 case CMD_ABORTED: 14809 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14810 "sdintr: CMD_ABORTED \n"); 14811 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14812 break; 14813 case CMD_TIMEOUT: 14814 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14815 "sdintr: CMD_TIMEOUT\n"); 14816 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14817 break; 14818 case CMD_UNX_BUS_FREE: 14819 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14820 "sdintr: CMD_UNX_BUS_FREE \n"); 14821 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14822 break; 14823 case CMD_TAG_REJECT: 14824 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14825 "sdintr: CMD_TAG_REJECT\n"); 14826 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14827 break; 14828 default: 14829 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14830 "sdintr: default\n"); 14831 sd_pkt_reason_default(un, bp, xp, pktp); 14832 break; 14833 } 14834 14835 exit: 14836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14837 14838 /* Decrement counter to indicate that the callback routine is done. */ 14839 un->un_in_callback--; 14840 ASSERT(un->un_in_callback >= 0); 14841 14842 /* 14843 * At this point, the pkt has been dispatched, ie, it is either 14844 * being re-tried or has been returned to its caller and should 14845 * not be referenced. 14846 */ 14847 14848 mutex_exit(SD_MUTEX(un)); 14849 } 14850 14851 14852 /* 14853 * Function: sd_print_incomplete_msg 14854 * 14855 * Description: Prints the error message for a CMD_INCOMPLETE error. 14856 * 14857 * Arguments: un - ptr to associated softstate for the device. 14858 * bp - ptr to the buf(9S) for the command. 14859 * arg - message string ptr 14860 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14861 * or SD_NO_RETRY_ISSUED. 14862 * 14863 * Context: May be called under interrupt context 14864 */ 14865 14866 static void 14867 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14868 { 14869 struct scsi_pkt *pktp; 14870 char *msgp; 14871 char *cmdp = arg; 14872 14873 ASSERT(un != NULL); 14874 ASSERT(mutex_owned(SD_MUTEX(un))); 14875 ASSERT(bp != NULL); 14876 ASSERT(arg != NULL); 14877 pktp = SD_GET_PKTP(bp); 14878 ASSERT(pktp != NULL); 14879 14880 switch (code) { 14881 case SD_DELAYED_RETRY_ISSUED: 14882 case SD_IMMEDIATE_RETRY_ISSUED: 14883 msgp = "retrying"; 14884 break; 14885 case SD_NO_RETRY_ISSUED: 14886 default: 14887 msgp = "giving up"; 14888 break; 14889 } 14890 14891 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14892 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14893 "incomplete %s- %s\n", cmdp, msgp); 14894 } 14895 } 14896 14897 14898 14899 /* 14900 * Function: sd_pkt_status_good 14901 * 14902 * Description: Processing for a STATUS_GOOD code in pkt_status. 14903 * 14904 * Context: May be called under interrupt context 14905 */ 14906 14907 static void 14908 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 14909 struct sd_xbuf *xp, struct scsi_pkt *pktp) 14910 { 14911 char *cmdp; 14912 14913 ASSERT(un != NULL); 14914 ASSERT(mutex_owned(SD_MUTEX(un))); 14915 ASSERT(bp != NULL); 14916 ASSERT(xp != NULL); 14917 ASSERT(pktp != NULL); 14918 ASSERT(pktp->pkt_reason == CMD_CMPLT); 14919 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 14920 ASSERT(pktp->pkt_resid != 0); 14921 14922 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 14923 14924 SD_UPDATE_ERRSTATS(un, sd_harderrs); 14925 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 14926 case SCMD_READ: 14927 cmdp = "read"; 14928 break; 14929 case SCMD_WRITE: 14930 cmdp = "write"; 14931 break; 14932 default: 14933 SD_UPDATE_B_RESID(bp, pktp); 14934 sd_return_command(un, bp); 14935 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14936 return; 14937 } 14938 14939 /* 14940 * See if we can retry the read/write, preferrably immediately. 14941 * If retries are exhaused, then sd_retry_command() will update 14942 * the b_resid count. 14943 */ 14944 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 14945 cmdp, EIO, (clock_t)0, NULL); 14946 14947 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14948 } 14949 14950 14951 14952 14953 14954 /* 14955 * Function: sd_handle_request_sense 14956 * 14957 * Description: Processing for non-auto Request Sense command. 14958 * 14959 * Arguments: un - ptr to associated softstate 14960 * sense_bp - ptr to buf(9S) for the RQS command 14961 * sense_xp - ptr to the sd_xbuf for the RQS command 14962 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 14963 * 14964 * Context: May be called under interrupt context 14965 */ 14966 14967 static void 14968 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 14969 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 14970 { 14971 struct buf *cmd_bp; /* buf for the original command */ 14972 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 14973 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 14974 14975 ASSERT(un != NULL); 14976 ASSERT(mutex_owned(SD_MUTEX(un))); 14977 ASSERT(sense_bp != NULL); 14978 ASSERT(sense_xp != NULL); 14979 ASSERT(sense_pktp != NULL); 14980 14981 /* 14982 * Note the sense_bp, sense_xp, and sense_pktp here are for the 14983 * RQS command and not the original command. 14984 */ 14985 ASSERT(sense_pktp == un->un_rqs_pktp); 14986 ASSERT(sense_bp == un->un_rqs_bp); 14987 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 14988 (FLAG_SENSING | FLAG_HEAD)); 14989 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 14990 FLAG_SENSING) == FLAG_SENSING); 14991 14992 /* These are the bp, xp, and pktp for the original command */ 14993 cmd_bp = sense_xp->xb_sense_bp; 14994 cmd_xp = SD_GET_XBUF(cmd_bp); 14995 cmd_pktp = SD_GET_PKTP(cmd_bp); 14996 14997 if (sense_pktp->pkt_reason != CMD_CMPLT) { 14998 /* 14999 * The REQUEST SENSE command failed. Release the REQUEST 15000 * SENSE command for re-use, get back the bp for the original 15001 * command, and attempt to re-try the original command if 15002 * FLAG_DIAGNOSE is not set in the original packet. 15003 */ 15004 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15005 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15006 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15007 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15008 NULL, NULL, EIO, (clock_t)0, NULL); 15009 return; 15010 } 15011 } 15012 15013 /* 15014 * Save the relevant sense info into the xp for the original cmd. 15015 * 15016 * Note: if the request sense failed the state info will be zero 15017 * as set in sd_mark_rqs_busy() 15018 */ 15019 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15020 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15021 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15022 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15023 15024 /* 15025 * Free up the RQS command.... 15026 * NOTE: 15027 * Must do this BEFORE calling sd_validate_sense_data! 15028 * sd_validate_sense_data may return the original command in 15029 * which case the pkt will be freed and the flags can no 15030 * longer be touched. 15031 * SD_MUTEX is held through this process until the command 15032 * is dispatched based upon the sense data, so there are 15033 * no race conditions. 15034 */ 15035 (void) sd_mark_rqs_idle(un, sense_xp); 15036 15037 /* 15038 * For a retryable command see if we have valid sense data, if so then 15039 * turn it over to sd_decode_sense() to figure out the right course of 15040 * action. Just fail a non-retryable command. 15041 */ 15042 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15043 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15044 SD_SENSE_DATA_IS_VALID) { 15045 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15046 } 15047 } else { 15048 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15049 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15050 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15051 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15052 sd_return_failed_command(un, cmd_bp, EIO); 15053 } 15054 } 15055 15056 15057 15058 15059 /* 15060 * Function: sd_handle_auto_request_sense 15061 * 15062 * Description: Processing for auto-request sense information. 15063 * 15064 * Arguments: un - ptr to associated softstate 15065 * bp - ptr to buf(9S) for the command 15066 * xp - ptr to the sd_xbuf for the command 15067 * pktp - ptr to the scsi_pkt(9S) for the command 15068 * 15069 * Context: May be called under interrupt context 15070 */ 15071 15072 static void 15073 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15074 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15075 { 15076 struct scsi_arq_status *asp; 15077 15078 ASSERT(un != NULL); 15079 ASSERT(mutex_owned(SD_MUTEX(un))); 15080 ASSERT(bp != NULL); 15081 ASSERT(xp != NULL); 15082 ASSERT(pktp != NULL); 15083 ASSERT(pktp != un->un_rqs_pktp); 15084 ASSERT(bp != un->un_rqs_bp); 15085 15086 /* 15087 * For auto-request sense, we get a scsi_arq_status back from 15088 * the HBA, with the sense data in the sts_sensedata member. 15089 * The pkt_scbp of the packet points to this scsi_arq_status. 15090 */ 15091 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15092 15093 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15094 /* 15095 * The auto REQUEST SENSE failed; see if we can re-try 15096 * the original command. 15097 */ 15098 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15099 "auto request sense failed (reason=%s)\n", 15100 scsi_rname(asp->sts_rqpkt_reason)); 15101 15102 sd_reset_target(un, pktp); 15103 15104 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15105 NULL, NULL, EIO, (clock_t)0, NULL); 15106 return; 15107 } 15108 15109 /* Save the relevant sense info into the xp for the original cmd. */ 15110 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15111 xp->xb_sense_state = asp->sts_rqpkt_state; 15112 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15113 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15114 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15115 15116 /* 15117 * See if we have valid sense data, if so then turn it over to 15118 * sd_decode_sense() to figure out the right course of action. 15119 */ 15120 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15121 sd_decode_sense(un, bp, xp, pktp); 15122 } 15123 } 15124 15125 15126 /* 15127 * Function: sd_print_sense_failed_msg 15128 * 15129 * Description: Print log message when RQS has failed. 15130 * 15131 * Arguments: un - ptr to associated softstate 15132 * bp - ptr to buf(9S) for the command 15133 * arg - generic message string ptr 15134 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15135 * or SD_NO_RETRY_ISSUED 15136 * 15137 * Context: May be called from interrupt context 15138 */ 15139 15140 static void 15141 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15142 int code) 15143 { 15144 char *msgp = arg; 15145 15146 ASSERT(un != NULL); 15147 ASSERT(mutex_owned(SD_MUTEX(un))); 15148 ASSERT(bp != NULL); 15149 15150 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15151 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15152 } 15153 } 15154 15155 15156 /* 15157 * Function: sd_validate_sense_data 15158 * 15159 * Description: Check the given sense data for validity. 15160 * If the sense data is not valid, the command will 15161 * be either failed or retried! 15162 * 15163 * Return Code: SD_SENSE_DATA_IS_INVALID 15164 * SD_SENSE_DATA_IS_VALID 15165 * 15166 * Context: May be called from interrupt context 15167 */ 15168 15169 static int 15170 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15171 { 15172 struct scsi_extended_sense *esp; 15173 struct scsi_pkt *pktp; 15174 size_t actual_len; 15175 char *msgp = NULL; 15176 15177 ASSERT(un != NULL); 15178 ASSERT(mutex_owned(SD_MUTEX(un))); 15179 ASSERT(bp != NULL); 15180 ASSERT(bp != un->un_rqs_bp); 15181 ASSERT(xp != NULL); 15182 15183 pktp = SD_GET_PKTP(bp); 15184 ASSERT(pktp != NULL); 15185 15186 /* 15187 * Check the status of the RQS command (auto or manual). 15188 */ 15189 switch (xp->xb_sense_status & STATUS_MASK) { 15190 case STATUS_GOOD: 15191 break; 15192 15193 case STATUS_RESERVATION_CONFLICT: 15194 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15195 return (SD_SENSE_DATA_IS_INVALID); 15196 15197 case STATUS_BUSY: 15198 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15199 "Busy Status on REQUEST SENSE\n"); 15200 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15201 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15202 return (SD_SENSE_DATA_IS_INVALID); 15203 15204 case STATUS_QFULL: 15205 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15206 "QFULL Status on REQUEST SENSE\n"); 15207 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15208 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15209 return (SD_SENSE_DATA_IS_INVALID); 15210 15211 case STATUS_CHECK: 15212 case STATUS_TERMINATED: 15213 msgp = "Check Condition on REQUEST SENSE\n"; 15214 goto sense_failed; 15215 15216 default: 15217 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15218 goto sense_failed; 15219 } 15220 15221 /* 15222 * See if we got the minimum required amount of sense data. 15223 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15224 * or less. 15225 */ 15226 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15227 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15228 (actual_len == 0)) { 15229 msgp = "Request Sense couldn't get sense data\n"; 15230 goto sense_failed; 15231 } 15232 15233 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15234 msgp = "Not enough sense information\n"; 15235 goto sense_failed; 15236 } 15237 15238 /* 15239 * We require the extended sense data 15240 */ 15241 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15242 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15243 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15244 static char tmp[8]; 15245 static char buf[148]; 15246 char *p = (char *)(xp->xb_sense_data); 15247 int i; 15248 15249 mutex_enter(&sd_sense_mutex); 15250 (void) strcpy(buf, "undecodable sense information:"); 15251 for (i = 0; i < actual_len; i++) { 15252 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15253 (void) strcpy(&buf[strlen(buf)], tmp); 15254 } 15255 i = strlen(buf); 15256 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15257 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15258 mutex_exit(&sd_sense_mutex); 15259 } 15260 /* Note: Legacy behavior, fail the command with no retry */ 15261 sd_return_failed_command(un, bp, EIO); 15262 return (SD_SENSE_DATA_IS_INVALID); 15263 } 15264 15265 /* 15266 * Check that es_code is valid (es_class concatenated with es_code 15267 * make up the "response code" field. es_class will always be 7, so 15268 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15269 * format. 15270 */ 15271 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15272 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15273 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15274 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15275 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15276 goto sense_failed; 15277 } 15278 15279 return (SD_SENSE_DATA_IS_VALID); 15280 15281 sense_failed: 15282 /* 15283 * If the request sense failed (for whatever reason), attempt 15284 * to retry the original command. 15285 */ 15286 #if defined(__i386) || defined(__amd64) 15287 /* 15288 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15289 * sddef.h for Sparc platform, and x86 uses 1 binary 15290 * for both SCSI/FC. 15291 * The SD_RETRY_DELAY value need to be adjusted here 15292 * when SD_RETRY_DELAY change in sddef.h 15293 */ 15294 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15295 sd_print_sense_failed_msg, msgp, EIO, 15296 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15297 #else 15298 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15299 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15300 #endif 15301 15302 return (SD_SENSE_DATA_IS_INVALID); 15303 } 15304 15305 15306 15307 /* 15308 * Function: sd_decode_sense 15309 * 15310 * Description: Take recovery action(s) when SCSI Sense Data is received. 15311 * 15312 * Context: Interrupt context. 15313 */ 15314 15315 static void 15316 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15317 struct scsi_pkt *pktp) 15318 { 15319 uint8_t sense_key; 15320 15321 ASSERT(un != NULL); 15322 ASSERT(mutex_owned(SD_MUTEX(un))); 15323 ASSERT(bp != NULL); 15324 ASSERT(bp != un->un_rqs_bp); 15325 ASSERT(xp != NULL); 15326 ASSERT(pktp != NULL); 15327 15328 sense_key = scsi_sense_key(xp->xb_sense_data); 15329 15330 switch (sense_key) { 15331 case KEY_NO_SENSE: 15332 sd_sense_key_no_sense(un, bp, xp, pktp); 15333 break; 15334 case KEY_RECOVERABLE_ERROR: 15335 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15336 bp, xp, pktp); 15337 break; 15338 case KEY_NOT_READY: 15339 sd_sense_key_not_ready(un, xp->xb_sense_data, 15340 bp, xp, pktp); 15341 break; 15342 case KEY_MEDIUM_ERROR: 15343 case KEY_HARDWARE_ERROR: 15344 sd_sense_key_medium_or_hardware_error(un, 15345 xp->xb_sense_data, bp, xp, pktp); 15346 break; 15347 case KEY_ILLEGAL_REQUEST: 15348 sd_sense_key_illegal_request(un, bp, xp, pktp); 15349 break; 15350 case KEY_UNIT_ATTENTION: 15351 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15352 bp, xp, pktp); 15353 break; 15354 case KEY_WRITE_PROTECT: 15355 case KEY_VOLUME_OVERFLOW: 15356 case KEY_MISCOMPARE: 15357 sd_sense_key_fail_command(un, bp, xp, pktp); 15358 break; 15359 case KEY_BLANK_CHECK: 15360 sd_sense_key_blank_check(un, bp, xp, pktp); 15361 break; 15362 case KEY_ABORTED_COMMAND: 15363 sd_sense_key_aborted_command(un, bp, xp, pktp); 15364 break; 15365 case KEY_VENDOR_UNIQUE: 15366 case KEY_COPY_ABORTED: 15367 case KEY_EQUAL: 15368 case KEY_RESERVED: 15369 default: 15370 sd_sense_key_default(un, xp->xb_sense_data, 15371 bp, xp, pktp); 15372 break; 15373 } 15374 } 15375 15376 15377 /* 15378 * Function: sd_dump_memory 15379 * 15380 * Description: Debug logging routine to print the contents of a user provided 15381 * buffer. The output of the buffer is broken up into 256 byte 15382 * segments due to a size constraint of the scsi_log. 15383 * implementation. 15384 * 15385 * Arguments: un - ptr to softstate 15386 * comp - component mask 15387 * title - "title" string to preceed data when printed 15388 * data - ptr to data block to be printed 15389 * len - size of data block to be printed 15390 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15391 * 15392 * Context: May be called from interrupt context 15393 */ 15394 15395 #define SD_DUMP_MEMORY_BUF_SIZE 256 15396 15397 static char *sd_dump_format_string[] = { 15398 " 0x%02x", 15399 " %c" 15400 }; 15401 15402 static void 15403 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15404 int len, int fmt) 15405 { 15406 int i, j; 15407 int avail_count; 15408 int start_offset; 15409 int end_offset; 15410 size_t entry_len; 15411 char *bufp; 15412 char *local_buf; 15413 char *format_string; 15414 15415 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15416 15417 /* 15418 * In the debug version of the driver, this function is called from a 15419 * number of places which are NOPs in the release driver. 15420 * The debug driver therefore has additional methods of filtering 15421 * debug output. 15422 */ 15423 #ifdef SDDEBUG 15424 /* 15425 * In the debug version of the driver we can reduce the amount of debug 15426 * messages by setting sd_error_level to something other than 15427 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15428 * sd_component_mask. 15429 */ 15430 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15431 (sd_error_level != SCSI_ERR_ALL)) { 15432 return; 15433 } 15434 if (((sd_component_mask & comp) == 0) || 15435 (sd_error_level != SCSI_ERR_ALL)) { 15436 return; 15437 } 15438 #else 15439 if (sd_error_level != SCSI_ERR_ALL) { 15440 return; 15441 } 15442 #endif 15443 15444 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15445 bufp = local_buf; 15446 /* 15447 * Available length is the length of local_buf[], minus the 15448 * length of the title string, minus one for the ":", minus 15449 * one for the newline, minus one for the NULL terminator. 15450 * This gives the #bytes available for holding the printed 15451 * values from the given data buffer. 15452 */ 15453 if (fmt == SD_LOG_HEX) { 15454 format_string = sd_dump_format_string[0]; 15455 } else /* SD_LOG_CHAR */ { 15456 format_string = sd_dump_format_string[1]; 15457 } 15458 /* 15459 * Available count is the number of elements from the given 15460 * data buffer that we can fit into the available length. 15461 * This is based upon the size of the format string used. 15462 * Make one entry and find it's size. 15463 */ 15464 (void) sprintf(bufp, format_string, data[0]); 15465 entry_len = strlen(bufp); 15466 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15467 15468 j = 0; 15469 while (j < len) { 15470 bufp = local_buf; 15471 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15472 start_offset = j; 15473 15474 end_offset = start_offset + avail_count; 15475 15476 (void) sprintf(bufp, "%s:", title); 15477 bufp += strlen(bufp); 15478 for (i = start_offset; ((i < end_offset) && (j < len)); 15479 i++, j++) { 15480 (void) sprintf(bufp, format_string, data[i]); 15481 bufp += entry_len; 15482 } 15483 (void) sprintf(bufp, "\n"); 15484 15485 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15486 } 15487 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15488 } 15489 15490 /* 15491 * Function: sd_print_sense_msg 15492 * 15493 * Description: Log a message based upon the given sense data. 15494 * 15495 * Arguments: un - ptr to associated softstate 15496 * bp - ptr to buf(9S) for the command 15497 * arg - ptr to associate sd_sense_info struct 15498 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15499 * or SD_NO_RETRY_ISSUED 15500 * 15501 * Context: May be called from interrupt context 15502 */ 15503 15504 static void 15505 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15506 { 15507 struct sd_xbuf *xp; 15508 struct scsi_pkt *pktp; 15509 uint8_t *sensep; 15510 daddr_t request_blkno; 15511 diskaddr_t err_blkno; 15512 int severity; 15513 int pfa_flag; 15514 extern struct scsi_key_strings scsi_cmds[]; 15515 15516 ASSERT(un != NULL); 15517 ASSERT(mutex_owned(SD_MUTEX(un))); 15518 ASSERT(bp != NULL); 15519 xp = SD_GET_XBUF(bp); 15520 ASSERT(xp != NULL); 15521 pktp = SD_GET_PKTP(bp); 15522 ASSERT(pktp != NULL); 15523 ASSERT(arg != NULL); 15524 15525 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15526 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15527 15528 if ((code == SD_DELAYED_RETRY_ISSUED) || 15529 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15530 severity = SCSI_ERR_RETRYABLE; 15531 } 15532 15533 /* Use absolute block number for the request block number */ 15534 request_blkno = xp->xb_blkno; 15535 15536 /* 15537 * Now try to get the error block number from the sense data 15538 */ 15539 sensep = xp->xb_sense_data; 15540 15541 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15542 (uint64_t *)&err_blkno)) { 15543 /* 15544 * We retrieved the error block number from the information 15545 * portion of the sense data. 15546 * 15547 * For USCSI commands we are better off using the error 15548 * block no. as the requested block no. (This is the best 15549 * we can estimate.) 15550 */ 15551 if ((SD_IS_BUFIO(xp) == FALSE) && 15552 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15553 request_blkno = err_blkno; 15554 } 15555 } else { 15556 /* 15557 * Without the es_valid bit set (for fixed format) or an 15558 * information descriptor (for descriptor format) we cannot 15559 * be certain of the error blkno, so just use the 15560 * request_blkno. 15561 */ 15562 err_blkno = (diskaddr_t)request_blkno; 15563 } 15564 15565 /* 15566 * The following will log the buffer contents for the release driver 15567 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15568 * level is set to verbose. 15569 */ 15570 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15571 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15572 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15573 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15574 15575 if (pfa_flag == FALSE) { 15576 /* This is normally only set for USCSI */ 15577 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15578 return; 15579 } 15580 15581 if ((SD_IS_BUFIO(xp) == TRUE) && 15582 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15583 (severity < sd_error_level))) { 15584 return; 15585 } 15586 } 15587 15588 /* 15589 * Check for Sonoma Failover and keep a count of how many failed I/O's 15590 */ 15591 if ((SD_IS_LSI(un)) && 15592 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15593 (scsi_sense_asc(sensep) == 0x94) && 15594 (scsi_sense_ascq(sensep) == 0x01)) { 15595 un->un_sonoma_failure_count++; 15596 if (un->un_sonoma_failure_count > 1) { 15597 return; 15598 } 15599 } 15600 15601 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15602 request_blkno, err_blkno, scsi_cmds, 15603 (struct scsi_extended_sense *)sensep, 15604 un->un_additional_codes, NULL); 15605 } 15606 15607 /* 15608 * Function: sd_sense_key_no_sense 15609 * 15610 * Description: Recovery action when sense data was not received. 15611 * 15612 * Context: May be called from interrupt context 15613 */ 15614 15615 static void 15616 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15617 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15618 { 15619 struct sd_sense_info si; 15620 15621 ASSERT(un != NULL); 15622 ASSERT(mutex_owned(SD_MUTEX(un))); 15623 ASSERT(bp != NULL); 15624 ASSERT(xp != NULL); 15625 ASSERT(pktp != NULL); 15626 15627 si.ssi_severity = SCSI_ERR_FATAL; 15628 si.ssi_pfa_flag = FALSE; 15629 15630 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15631 15632 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15633 &si, EIO, (clock_t)0, NULL); 15634 } 15635 15636 15637 /* 15638 * Function: sd_sense_key_recoverable_error 15639 * 15640 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15641 * 15642 * Context: May be called from interrupt context 15643 */ 15644 15645 static void 15646 sd_sense_key_recoverable_error(struct sd_lun *un, 15647 uint8_t *sense_datap, 15648 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15649 { 15650 struct sd_sense_info si; 15651 uint8_t asc = scsi_sense_asc(sense_datap); 15652 15653 ASSERT(un != NULL); 15654 ASSERT(mutex_owned(SD_MUTEX(un))); 15655 ASSERT(bp != NULL); 15656 ASSERT(xp != NULL); 15657 ASSERT(pktp != NULL); 15658 15659 /* 15660 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15661 */ 15662 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15663 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15664 si.ssi_severity = SCSI_ERR_INFO; 15665 si.ssi_pfa_flag = TRUE; 15666 } else { 15667 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15668 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15669 si.ssi_severity = SCSI_ERR_RECOVERED; 15670 si.ssi_pfa_flag = FALSE; 15671 } 15672 15673 if (pktp->pkt_resid == 0) { 15674 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15675 sd_return_command(un, bp); 15676 return; 15677 } 15678 15679 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15680 &si, EIO, (clock_t)0, NULL); 15681 } 15682 15683 15684 15685 15686 /* 15687 * Function: sd_sense_key_not_ready 15688 * 15689 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15690 * 15691 * Context: May be called from interrupt context 15692 */ 15693 15694 static void 15695 sd_sense_key_not_ready(struct sd_lun *un, 15696 uint8_t *sense_datap, 15697 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15698 { 15699 struct sd_sense_info si; 15700 uint8_t asc = scsi_sense_asc(sense_datap); 15701 uint8_t ascq = scsi_sense_ascq(sense_datap); 15702 15703 ASSERT(un != NULL); 15704 ASSERT(mutex_owned(SD_MUTEX(un))); 15705 ASSERT(bp != NULL); 15706 ASSERT(xp != NULL); 15707 ASSERT(pktp != NULL); 15708 15709 si.ssi_severity = SCSI_ERR_FATAL; 15710 si.ssi_pfa_flag = FALSE; 15711 15712 /* 15713 * Update error stats after first NOT READY error. Disks may have 15714 * been powered down and may need to be restarted. For CDROMs, 15715 * report NOT READY errors only if media is present. 15716 */ 15717 if ((ISCD(un) && (asc == 0x3A)) || 15718 (xp->xb_retry_count > 0)) { 15719 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15720 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15721 } 15722 15723 /* 15724 * Just fail if the "not ready" retry limit has been reached. 15725 */ 15726 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15727 /* Special check for error message printing for removables. */ 15728 if (un->un_f_has_removable_media && (asc == 0x04) && 15729 (ascq >= 0x04)) { 15730 si.ssi_severity = SCSI_ERR_ALL; 15731 } 15732 goto fail_command; 15733 } 15734 15735 /* 15736 * Check the ASC and ASCQ in the sense data as needed, to determine 15737 * what to do. 15738 */ 15739 switch (asc) { 15740 case 0x04: /* LOGICAL UNIT NOT READY */ 15741 /* 15742 * disk drives that don't spin up result in a very long delay 15743 * in format without warning messages. We will log a message 15744 * if the error level is set to verbose. 15745 */ 15746 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15748 "logical unit not ready, resetting disk\n"); 15749 } 15750 15751 /* 15752 * There are different requirements for CDROMs and disks for 15753 * the number of retries. If a CD-ROM is giving this, it is 15754 * probably reading TOC and is in the process of getting 15755 * ready, so we should keep on trying for a long time to make 15756 * sure that all types of media are taken in account (for 15757 * some media the drive takes a long time to read TOC). For 15758 * disks we do not want to retry this too many times as this 15759 * can cause a long hang in format when the drive refuses to 15760 * spin up (a very common failure). 15761 */ 15762 switch (ascq) { 15763 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15764 /* 15765 * Disk drives frequently refuse to spin up which 15766 * results in a very long hang in format without 15767 * warning messages. 15768 * 15769 * Note: This code preserves the legacy behavior of 15770 * comparing xb_retry_count against zero for fibre 15771 * channel targets instead of comparing against the 15772 * un_reset_retry_count value. The reason for this 15773 * discrepancy has been so utterly lost beneath the 15774 * Sands of Time that even Indiana Jones could not 15775 * find it. 15776 */ 15777 if (un->un_f_is_fibre == TRUE) { 15778 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15779 (xp->xb_retry_count > 0)) && 15780 (un->un_startstop_timeid == NULL)) { 15781 scsi_log(SD_DEVINFO(un), sd_label, 15782 CE_WARN, "logical unit not ready, " 15783 "resetting disk\n"); 15784 sd_reset_target(un, pktp); 15785 } 15786 } else { 15787 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15788 (xp->xb_retry_count > 15789 un->un_reset_retry_count)) && 15790 (un->un_startstop_timeid == NULL)) { 15791 scsi_log(SD_DEVINFO(un), sd_label, 15792 CE_WARN, "logical unit not ready, " 15793 "resetting disk\n"); 15794 sd_reset_target(un, pktp); 15795 } 15796 } 15797 break; 15798 15799 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15800 /* 15801 * If the target is in the process of becoming 15802 * ready, just proceed with the retry. This can 15803 * happen with CD-ROMs that take a long time to 15804 * read TOC after a power cycle or reset. 15805 */ 15806 goto do_retry; 15807 15808 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15809 break; 15810 15811 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15812 /* 15813 * Retries cannot help here so just fail right away. 15814 */ 15815 goto fail_command; 15816 15817 case 0x88: 15818 /* 15819 * Vendor-unique code for T3/T4: it indicates a 15820 * path problem in a mutipathed config, but as far as 15821 * the target driver is concerned it equates to a fatal 15822 * error, so we should just fail the command right away 15823 * (without printing anything to the console). If this 15824 * is not a T3/T4, fall thru to the default recovery 15825 * action. 15826 * T3/T4 is FC only, don't need to check is_fibre 15827 */ 15828 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15829 sd_return_failed_command(un, bp, EIO); 15830 return; 15831 } 15832 /* FALLTHRU */ 15833 15834 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15835 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15836 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15837 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15838 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15839 default: /* Possible future codes in SCSI spec? */ 15840 /* 15841 * For removable-media devices, do not retry if 15842 * ASCQ > 2 as these result mostly from USCSI commands 15843 * on MMC devices issued to check status of an 15844 * operation initiated in immediate mode. Also for 15845 * ASCQ >= 4 do not print console messages as these 15846 * mainly represent a user-initiated operation 15847 * instead of a system failure. 15848 */ 15849 if (un->un_f_has_removable_media) { 15850 si.ssi_severity = SCSI_ERR_ALL; 15851 goto fail_command; 15852 } 15853 break; 15854 } 15855 15856 /* 15857 * As part of our recovery attempt for the NOT READY 15858 * condition, we issue a START STOP UNIT command. However 15859 * we want to wait for a short delay before attempting this 15860 * as there may still be more commands coming back from the 15861 * target with the check condition. To do this we use 15862 * timeout(9F) to call sd_start_stop_unit_callback() after 15863 * the delay interval expires. (sd_start_stop_unit_callback() 15864 * dispatches sd_start_stop_unit_task(), which will issue 15865 * the actual START STOP UNIT command. The delay interval 15866 * is one-half of the delay that we will use to retry the 15867 * command that generated the NOT READY condition. 15868 * 15869 * Note that we could just dispatch sd_start_stop_unit_task() 15870 * from here and allow it to sleep for the delay interval, 15871 * but then we would be tying up the taskq thread 15872 * uncesessarily for the duration of the delay. 15873 * 15874 * Do not issue the START STOP UNIT if the current command 15875 * is already a START STOP UNIT. 15876 */ 15877 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15878 break; 15879 } 15880 15881 /* 15882 * Do not schedule the timeout if one is already pending. 15883 */ 15884 if (un->un_startstop_timeid != NULL) { 15885 SD_INFO(SD_LOG_ERROR, un, 15886 "sd_sense_key_not_ready: restart already issued to" 15887 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15888 ddi_get_instance(SD_DEVINFO(un))); 15889 break; 15890 } 15891 15892 /* 15893 * Schedule the START STOP UNIT command, then queue the command 15894 * for a retry. 15895 * 15896 * Note: A timeout is not scheduled for this retry because we 15897 * want the retry to be serial with the START_STOP_UNIT. The 15898 * retry will be started when the START_STOP_UNIT is completed 15899 * in sd_start_stop_unit_task. 15900 */ 15901 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15902 un, SD_BSY_TIMEOUT / 2); 15903 xp->xb_retry_count++; 15904 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 15905 return; 15906 15907 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 15908 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15909 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15910 "unit does not respond to selection\n"); 15911 } 15912 break; 15913 15914 case 0x3A: /* MEDIUM NOT PRESENT */ 15915 if (sd_error_level >= SCSI_ERR_FATAL) { 15916 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15917 "Caddy not inserted in drive\n"); 15918 } 15919 15920 sr_ejected(un); 15921 un->un_mediastate = DKIO_EJECTED; 15922 /* The state has changed, inform the media watch routines */ 15923 cv_broadcast(&un->un_state_cv); 15924 /* Just fail if no media is present in the drive. */ 15925 goto fail_command; 15926 15927 default: 15928 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15929 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15930 "Unit not Ready. Additional sense code 0x%x\n", 15931 asc); 15932 } 15933 break; 15934 } 15935 15936 do_retry: 15937 15938 /* 15939 * Retry the command, as some targets may report NOT READY for 15940 * several seconds after being reset. 15941 */ 15942 xp->xb_retry_count++; 15943 si.ssi_severity = SCSI_ERR_RETRYABLE; 15944 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 15945 &si, EIO, SD_BSY_TIMEOUT, NULL); 15946 15947 return; 15948 15949 fail_command: 15950 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15951 sd_return_failed_command(un, bp, EIO); 15952 } 15953 15954 15955 15956 /* 15957 * Function: sd_sense_key_medium_or_hardware_error 15958 * 15959 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 15960 * sense key. 15961 * 15962 * Context: May be called from interrupt context 15963 */ 15964 15965 static void 15966 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 15967 uint8_t *sense_datap, 15968 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15969 { 15970 struct sd_sense_info si; 15971 uint8_t sense_key = scsi_sense_key(sense_datap); 15972 uint8_t asc = scsi_sense_asc(sense_datap); 15973 15974 ASSERT(un != NULL); 15975 ASSERT(mutex_owned(SD_MUTEX(un))); 15976 ASSERT(bp != NULL); 15977 ASSERT(xp != NULL); 15978 ASSERT(pktp != NULL); 15979 15980 si.ssi_severity = SCSI_ERR_FATAL; 15981 si.ssi_pfa_flag = FALSE; 15982 15983 if (sense_key == KEY_MEDIUM_ERROR) { 15984 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 15985 } 15986 15987 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15988 15989 if ((un->un_reset_retry_count != 0) && 15990 (xp->xb_retry_count == un->un_reset_retry_count)) { 15991 mutex_exit(SD_MUTEX(un)); 15992 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 15993 if (un->un_f_allow_bus_device_reset == TRUE) { 15994 15995 boolean_t try_resetting_target = B_TRUE; 15996 15997 /* 15998 * We need to be able to handle specific ASC when we are 15999 * handling a KEY_HARDWARE_ERROR. In particular 16000 * taking the default action of resetting the target may 16001 * not be the appropriate way to attempt recovery. 16002 * Resetting a target because of a single LUN failure 16003 * victimizes all LUNs on that target. 16004 * 16005 * This is true for the LSI arrays, if an LSI 16006 * array controller returns an ASC of 0x84 (LUN Dead) we 16007 * should trust it. 16008 */ 16009 16010 if (sense_key == KEY_HARDWARE_ERROR) { 16011 switch (asc) { 16012 case 0x84: 16013 if (SD_IS_LSI(un)) { 16014 try_resetting_target = B_FALSE; 16015 } 16016 break; 16017 default: 16018 break; 16019 } 16020 } 16021 16022 if (try_resetting_target == B_TRUE) { 16023 int reset_retval = 0; 16024 if (un->un_f_lun_reset_enabled == TRUE) { 16025 SD_TRACE(SD_LOG_IO_CORE, un, 16026 "sd_sense_key_medium_or_hardware_" 16027 "error: issuing RESET_LUN\n"); 16028 reset_retval = 16029 scsi_reset(SD_ADDRESS(un), 16030 RESET_LUN); 16031 } 16032 if (reset_retval == 0) { 16033 SD_TRACE(SD_LOG_IO_CORE, un, 16034 "sd_sense_key_medium_or_hardware_" 16035 "error: issuing RESET_TARGET\n"); 16036 (void) scsi_reset(SD_ADDRESS(un), 16037 RESET_TARGET); 16038 } 16039 } 16040 } 16041 mutex_enter(SD_MUTEX(un)); 16042 } 16043 16044 /* 16045 * This really ought to be a fatal error, but we will retry anyway 16046 * as some drives report this as a spurious error. 16047 */ 16048 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16049 &si, EIO, (clock_t)0, NULL); 16050 } 16051 16052 16053 16054 /* 16055 * Function: sd_sense_key_illegal_request 16056 * 16057 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16058 * 16059 * Context: May be called from interrupt context 16060 */ 16061 16062 static void 16063 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16064 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16065 { 16066 struct sd_sense_info si; 16067 16068 ASSERT(un != NULL); 16069 ASSERT(mutex_owned(SD_MUTEX(un))); 16070 ASSERT(bp != NULL); 16071 ASSERT(xp != NULL); 16072 ASSERT(pktp != NULL); 16073 16074 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16075 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16076 16077 si.ssi_severity = SCSI_ERR_INFO; 16078 si.ssi_pfa_flag = FALSE; 16079 16080 /* Pointless to retry if the target thinks it's an illegal request */ 16081 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16082 sd_return_failed_command(un, bp, EIO); 16083 } 16084 16085 16086 16087 16088 /* 16089 * Function: sd_sense_key_unit_attention 16090 * 16091 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16092 * 16093 * Context: May be called from interrupt context 16094 */ 16095 16096 static void 16097 sd_sense_key_unit_attention(struct sd_lun *un, 16098 uint8_t *sense_datap, 16099 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16100 { 16101 /* 16102 * For UNIT ATTENTION we allow retries for one minute. Devices 16103 * like Sonoma can return UNIT ATTENTION close to a minute 16104 * under certain conditions. 16105 */ 16106 int retry_check_flag = SD_RETRIES_UA; 16107 boolean_t kstat_updated = B_FALSE; 16108 struct sd_sense_info si; 16109 uint8_t asc = scsi_sense_asc(sense_datap); 16110 16111 ASSERT(un != NULL); 16112 ASSERT(mutex_owned(SD_MUTEX(un))); 16113 ASSERT(bp != NULL); 16114 ASSERT(xp != NULL); 16115 ASSERT(pktp != NULL); 16116 16117 si.ssi_severity = SCSI_ERR_INFO; 16118 si.ssi_pfa_flag = FALSE; 16119 16120 16121 switch (asc) { 16122 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16123 if (sd_report_pfa != 0) { 16124 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16125 si.ssi_pfa_flag = TRUE; 16126 retry_check_flag = SD_RETRIES_STANDARD; 16127 goto do_retry; 16128 } 16129 16130 break; 16131 16132 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16133 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16134 un->un_resvd_status |= 16135 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16136 } 16137 #ifdef _LP64 16138 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16139 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16140 un, KM_NOSLEEP) == 0) { 16141 /* 16142 * If we can't dispatch the task we'll just 16143 * live without descriptor sense. We can 16144 * try again on the next "unit attention" 16145 */ 16146 SD_ERROR(SD_LOG_ERROR, un, 16147 "sd_sense_key_unit_attention: " 16148 "Could not dispatch " 16149 "sd_reenable_dsense_task\n"); 16150 } 16151 } 16152 #endif /* _LP64 */ 16153 /* FALLTHRU */ 16154 16155 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16156 if (!un->un_f_has_removable_media) { 16157 break; 16158 } 16159 16160 /* 16161 * When we get a unit attention from a removable-media device, 16162 * it may be in a state that will take a long time to recover 16163 * (e.g., from a reset). Since we are executing in interrupt 16164 * context here, we cannot wait around for the device to come 16165 * back. So hand this command off to sd_media_change_task() 16166 * for deferred processing under taskq thread context. (Note 16167 * that the command still may be failed if a problem is 16168 * encountered at a later time.) 16169 */ 16170 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16171 KM_NOSLEEP) == 0) { 16172 /* 16173 * Cannot dispatch the request so fail the command. 16174 */ 16175 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16176 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16177 si.ssi_severity = SCSI_ERR_FATAL; 16178 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16179 sd_return_failed_command(un, bp, EIO); 16180 } 16181 16182 /* 16183 * If failed to dispatch sd_media_change_task(), we already 16184 * updated kstat. If succeed to dispatch sd_media_change_task(), 16185 * we should update kstat later if it encounters an error. So, 16186 * we update kstat_updated flag here. 16187 */ 16188 kstat_updated = B_TRUE; 16189 16190 /* 16191 * Either the command has been successfully dispatched to a 16192 * task Q for retrying, or the dispatch failed. In either case 16193 * do NOT retry again by calling sd_retry_command. This sets up 16194 * two retries of the same command and when one completes and 16195 * frees the resources the other will access freed memory, 16196 * a bad thing. 16197 */ 16198 return; 16199 16200 default: 16201 break; 16202 } 16203 16204 /* 16205 * Update kstat if we haven't done that. 16206 */ 16207 if (!kstat_updated) { 16208 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16209 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16210 } 16211 16212 do_retry: 16213 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16214 EIO, SD_UA_RETRY_DELAY, NULL); 16215 } 16216 16217 16218 16219 /* 16220 * Function: sd_sense_key_fail_command 16221 * 16222 * Description: Use to fail a command when we don't like the sense key that 16223 * was returned. 16224 * 16225 * Context: May be called from interrupt context 16226 */ 16227 16228 static void 16229 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16230 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16231 { 16232 struct sd_sense_info si; 16233 16234 ASSERT(un != NULL); 16235 ASSERT(mutex_owned(SD_MUTEX(un))); 16236 ASSERT(bp != NULL); 16237 ASSERT(xp != NULL); 16238 ASSERT(pktp != NULL); 16239 16240 si.ssi_severity = SCSI_ERR_FATAL; 16241 si.ssi_pfa_flag = FALSE; 16242 16243 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16244 sd_return_failed_command(un, bp, EIO); 16245 } 16246 16247 16248 16249 /* 16250 * Function: sd_sense_key_blank_check 16251 * 16252 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16253 * Has no monetary connotation. 16254 * 16255 * Context: May be called from interrupt context 16256 */ 16257 16258 static void 16259 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16260 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16261 { 16262 struct sd_sense_info si; 16263 16264 ASSERT(un != NULL); 16265 ASSERT(mutex_owned(SD_MUTEX(un))); 16266 ASSERT(bp != NULL); 16267 ASSERT(xp != NULL); 16268 ASSERT(pktp != NULL); 16269 16270 /* 16271 * Blank check is not fatal for removable devices, therefore 16272 * it does not require a console message. 16273 */ 16274 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16275 SCSI_ERR_FATAL; 16276 si.ssi_pfa_flag = FALSE; 16277 16278 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16279 sd_return_failed_command(un, bp, EIO); 16280 } 16281 16282 16283 16284 16285 /* 16286 * Function: sd_sense_key_aborted_command 16287 * 16288 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16289 * 16290 * Context: May be called from interrupt context 16291 */ 16292 16293 static void 16294 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16295 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16296 { 16297 struct sd_sense_info si; 16298 16299 ASSERT(un != NULL); 16300 ASSERT(mutex_owned(SD_MUTEX(un))); 16301 ASSERT(bp != NULL); 16302 ASSERT(xp != NULL); 16303 ASSERT(pktp != NULL); 16304 16305 si.ssi_severity = SCSI_ERR_FATAL; 16306 si.ssi_pfa_flag = FALSE; 16307 16308 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16309 16310 /* 16311 * This really ought to be a fatal error, but we will retry anyway 16312 * as some drives report this as a spurious error. 16313 */ 16314 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16315 &si, EIO, (clock_t)0, NULL); 16316 } 16317 16318 16319 16320 /* 16321 * Function: sd_sense_key_default 16322 * 16323 * Description: Default recovery action for several SCSI sense keys (basically 16324 * attempts a retry). 16325 * 16326 * Context: May be called from interrupt context 16327 */ 16328 16329 static void 16330 sd_sense_key_default(struct sd_lun *un, 16331 uint8_t *sense_datap, 16332 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16333 { 16334 struct sd_sense_info si; 16335 uint8_t sense_key = scsi_sense_key(sense_datap); 16336 16337 ASSERT(un != NULL); 16338 ASSERT(mutex_owned(SD_MUTEX(un))); 16339 ASSERT(bp != NULL); 16340 ASSERT(xp != NULL); 16341 ASSERT(pktp != NULL); 16342 16343 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16344 16345 /* 16346 * Undecoded sense key. Attempt retries and hope that will fix 16347 * the problem. Otherwise, we're dead. 16348 */ 16349 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16350 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16351 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16352 } 16353 16354 si.ssi_severity = SCSI_ERR_FATAL; 16355 si.ssi_pfa_flag = FALSE; 16356 16357 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16358 &si, EIO, (clock_t)0, NULL); 16359 } 16360 16361 16362 16363 /* 16364 * Function: sd_print_retry_msg 16365 * 16366 * Description: Print a message indicating the retry action being taken. 16367 * 16368 * Arguments: un - ptr to associated softstate 16369 * bp - ptr to buf(9S) for the command 16370 * arg - not used. 16371 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16372 * or SD_NO_RETRY_ISSUED 16373 * 16374 * Context: May be called from interrupt context 16375 */ 16376 /* ARGSUSED */ 16377 static void 16378 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16379 { 16380 struct sd_xbuf *xp; 16381 struct scsi_pkt *pktp; 16382 char *reasonp; 16383 char *msgp; 16384 16385 ASSERT(un != NULL); 16386 ASSERT(mutex_owned(SD_MUTEX(un))); 16387 ASSERT(bp != NULL); 16388 pktp = SD_GET_PKTP(bp); 16389 ASSERT(pktp != NULL); 16390 xp = SD_GET_XBUF(bp); 16391 ASSERT(xp != NULL); 16392 16393 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16394 mutex_enter(&un->un_pm_mutex); 16395 if ((un->un_state == SD_STATE_SUSPENDED) || 16396 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16397 (pktp->pkt_flags & FLAG_SILENT)) { 16398 mutex_exit(&un->un_pm_mutex); 16399 goto update_pkt_reason; 16400 } 16401 mutex_exit(&un->un_pm_mutex); 16402 16403 /* 16404 * Suppress messages if they are all the same pkt_reason; with 16405 * TQ, many (up to 256) are returned with the same pkt_reason. 16406 * If we are in panic, then suppress the retry messages. 16407 */ 16408 switch (flag) { 16409 case SD_NO_RETRY_ISSUED: 16410 msgp = "giving up"; 16411 break; 16412 case SD_IMMEDIATE_RETRY_ISSUED: 16413 case SD_DELAYED_RETRY_ISSUED: 16414 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16415 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16416 (sd_error_level != SCSI_ERR_ALL))) { 16417 return; 16418 } 16419 msgp = "retrying command"; 16420 break; 16421 default: 16422 goto update_pkt_reason; 16423 } 16424 16425 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16426 scsi_rname(pktp->pkt_reason)); 16427 16428 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16429 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16430 16431 update_pkt_reason: 16432 /* 16433 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16434 * This is to prevent multiple console messages for the same failure 16435 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16436 * when the command is retried successfully because there still may be 16437 * more commands coming back with the same value of pktp->pkt_reason. 16438 */ 16439 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16440 un->un_last_pkt_reason = pktp->pkt_reason; 16441 } 16442 } 16443 16444 16445 /* 16446 * Function: sd_print_cmd_incomplete_msg 16447 * 16448 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16449 * 16450 * Arguments: un - ptr to associated softstate 16451 * bp - ptr to buf(9S) for the command 16452 * arg - passed to sd_print_retry_msg() 16453 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16454 * or SD_NO_RETRY_ISSUED 16455 * 16456 * Context: May be called from interrupt context 16457 */ 16458 16459 static void 16460 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16461 int code) 16462 { 16463 dev_info_t *dip; 16464 16465 ASSERT(un != NULL); 16466 ASSERT(mutex_owned(SD_MUTEX(un))); 16467 ASSERT(bp != NULL); 16468 16469 switch (code) { 16470 case SD_NO_RETRY_ISSUED: 16471 /* Command was failed. Someone turned off this target? */ 16472 if (un->un_state != SD_STATE_OFFLINE) { 16473 /* 16474 * Suppress message if we are detaching and 16475 * device has been disconnected 16476 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16477 * private interface and not part of the DDI 16478 */ 16479 dip = un->un_sd->sd_dev; 16480 if (!(DEVI_IS_DETACHING(dip) && 16481 DEVI_IS_DEVICE_REMOVED(dip))) { 16482 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16483 "disk not responding to selection\n"); 16484 } 16485 New_state(un, SD_STATE_OFFLINE); 16486 } 16487 break; 16488 16489 case SD_DELAYED_RETRY_ISSUED: 16490 case SD_IMMEDIATE_RETRY_ISSUED: 16491 default: 16492 /* Command was successfully queued for retry */ 16493 sd_print_retry_msg(un, bp, arg, code); 16494 break; 16495 } 16496 } 16497 16498 16499 /* 16500 * Function: sd_pkt_reason_cmd_incomplete 16501 * 16502 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16503 * 16504 * Context: May be called from interrupt context 16505 */ 16506 16507 static void 16508 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16509 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16510 { 16511 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16512 16513 ASSERT(un != NULL); 16514 ASSERT(mutex_owned(SD_MUTEX(un))); 16515 ASSERT(bp != NULL); 16516 ASSERT(xp != NULL); 16517 ASSERT(pktp != NULL); 16518 16519 /* Do not do a reset if selection did not complete */ 16520 /* Note: Should this not just check the bit? */ 16521 if (pktp->pkt_state != STATE_GOT_BUS) { 16522 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16523 sd_reset_target(un, pktp); 16524 } 16525 16526 /* 16527 * If the target was not successfully selected, then set 16528 * SD_RETRIES_FAILFAST to indicate that we lost communication 16529 * with the target, and further retries and/or commands are 16530 * likely to take a long time. 16531 */ 16532 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16533 flag |= SD_RETRIES_FAILFAST; 16534 } 16535 16536 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16537 16538 sd_retry_command(un, bp, flag, 16539 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16540 } 16541 16542 16543 16544 /* 16545 * Function: sd_pkt_reason_cmd_tran_err 16546 * 16547 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16548 * 16549 * Context: May be called from interrupt context 16550 */ 16551 16552 static void 16553 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16554 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16555 { 16556 ASSERT(un != NULL); 16557 ASSERT(mutex_owned(SD_MUTEX(un))); 16558 ASSERT(bp != NULL); 16559 ASSERT(xp != NULL); 16560 ASSERT(pktp != NULL); 16561 16562 /* 16563 * Do not reset if we got a parity error, or if 16564 * selection did not complete. 16565 */ 16566 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16567 /* Note: Should this not just check the bit for pkt_state? */ 16568 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16569 (pktp->pkt_state != STATE_GOT_BUS)) { 16570 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16571 sd_reset_target(un, pktp); 16572 } 16573 16574 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16575 16576 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16577 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16578 } 16579 16580 16581 16582 /* 16583 * Function: sd_pkt_reason_cmd_reset 16584 * 16585 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16586 * 16587 * Context: May be called from interrupt context 16588 */ 16589 16590 static void 16591 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16592 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16593 { 16594 ASSERT(un != NULL); 16595 ASSERT(mutex_owned(SD_MUTEX(un))); 16596 ASSERT(bp != NULL); 16597 ASSERT(xp != NULL); 16598 ASSERT(pktp != NULL); 16599 16600 /* The target may still be running the command, so try to reset. */ 16601 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16602 sd_reset_target(un, pktp); 16603 16604 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16605 16606 /* 16607 * If pkt_reason is CMD_RESET chances are that this pkt got 16608 * reset because another target on this bus caused it. The target 16609 * that caused it should get CMD_TIMEOUT with pkt_statistics 16610 * of STAT_TIMEOUT/STAT_DEV_RESET. 16611 */ 16612 16613 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16614 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16615 } 16616 16617 16618 16619 16620 /* 16621 * Function: sd_pkt_reason_cmd_aborted 16622 * 16623 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16624 * 16625 * Context: May be called from interrupt context 16626 */ 16627 16628 static void 16629 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16630 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16631 { 16632 ASSERT(un != NULL); 16633 ASSERT(mutex_owned(SD_MUTEX(un))); 16634 ASSERT(bp != NULL); 16635 ASSERT(xp != NULL); 16636 ASSERT(pktp != NULL); 16637 16638 /* The target may still be running the command, so try to reset. */ 16639 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16640 sd_reset_target(un, pktp); 16641 16642 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16643 16644 /* 16645 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16646 * aborted because another target on this bus caused it. The target 16647 * that caused it should get CMD_TIMEOUT with pkt_statistics 16648 * of STAT_TIMEOUT/STAT_DEV_RESET. 16649 */ 16650 16651 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16652 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16653 } 16654 16655 16656 16657 /* 16658 * Function: sd_pkt_reason_cmd_timeout 16659 * 16660 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16661 * 16662 * Context: May be called from interrupt context 16663 */ 16664 16665 static void 16666 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16667 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16668 { 16669 ASSERT(un != NULL); 16670 ASSERT(mutex_owned(SD_MUTEX(un))); 16671 ASSERT(bp != NULL); 16672 ASSERT(xp != NULL); 16673 ASSERT(pktp != NULL); 16674 16675 16676 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16677 sd_reset_target(un, pktp); 16678 16679 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16680 16681 /* 16682 * A command timeout indicates that we could not establish 16683 * communication with the target, so set SD_RETRIES_FAILFAST 16684 * as further retries/commands are likely to take a long time. 16685 */ 16686 sd_retry_command(un, bp, 16687 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16688 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16689 } 16690 16691 16692 16693 /* 16694 * Function: sd_pkt_reason_cmd_unx_bus_free 16695 * 16696 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16697 * 16698 * Context: May be called from interrupt context 16699 */ 16700 16701 static void 16702 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16703 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16704 { 16705 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16706 16707 ASSERT(un != NULL); 16708 ASSERT(mutex_owned(SD_MUTEX(un))); 16709 ASSERT(bp != NULL); 16710 ASSERT(xp != NULL); 16711 ASSERT(pktp != NULL); 16712 16713 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16714 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16715 16716 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16717 sd_print_retry_msg : NULL; 16718 16719 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16720 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16721 } 16722 16723 16724 /* 16725 * Function: sd_pkt_reason_cmd_tag_reject 16726 * 16727 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16728 * 16729 * Context: May be called from interrupt context 16730 */ 16731 16732 static void 16733 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16734 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16735 { 16736 ASSERT(un != NULL); 16737 ASSERT(mutex_owned(SD_MUTEX(un))); 16738 ASSERT(bp != NULL); 16739 ASSERT(xp != NULL); 16740 ASSERT(pktp != NULL); 16741 16742 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16743 pktp->pkt_flags = 0; 16744 un->un_tagflags = 0; 16745 if (un->un_f_opt_queueing == TRUE) { 16746 un->un_throttle = min(un->un_throttle, 3); 16747 } else { 16748 un->un_throttle = 1; 16749 } 16750 mutex_exit(SD_MUTEX(un)); 16751 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16752 mutex_enter(SD_MUTEX(un)); 16753 16754 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16755 16756 /* Legacy behavior not to check retry counts here. */ 16757 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16758 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16759 } 16760 16761 16762 /* 16763 * Function: sd_pkt_reason_default 16764 * 16765 * Description: Default recovery actions for SCSA pkt_reason values that 16766 * do not have more explicit recovery actions. 16767 * 16768 * Context: May be called from interrupt context 16769 */ 16770 16771 static void 16772 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16773 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16774 { 16775 ASSERT(un != NULL); 16776 ASSERT(mutex_owned(SD_MUTEX(un))); 16777 ASSERT(bp != NULL); 16778 ASSERT(xp != NULL); 16779 ASSERT(pktp != NULL); 16780 16781 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16782 sd_reset_target(un, pktp); 16783 16784 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16785 16786 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16787 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16788 } 16789 16790 16791 16792 /* 16793 * Function: sd_pkt_status_check_condition 16794 * 16795 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16796 * 16797 * Context: May be called from interrupt context 16798 */ 16799 16800 static void 16801 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16802 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16803 { 16804 ASSERT(un != NULL); 16805 ASSERT(mutex_owned(SD_MUTEX(un))); 16806 ASSERT(bp != NULL); 16807 ASSERT(xp != NULL); 16808 ASSERT(pktp != NULL); 16809 16810 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16811 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16812 16813 /* 16814 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16815 * command will be retried after the request sense). Otherwise, retry 16816 * the command. Note: we are issuing the request sense even though the 16817 * retry limit may have been reached for the failed command. 16818 */ 16819 if (un->un_f_arq_enabled == FALSE) { 16820 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16821 "no ARQ, sending request sense command\n"); 16822 sd_send_request_sense_command(un, bp, pktp); 16823 } else { 16824 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16825 "ARQ,retrying request sense command\n"); 16826 #if defined(__i386) || defined(__amd64) 16827 /* 16828 * The SD_RETRY_DELAY value need to be adjusted here 16829 * when SD_RETRY_DELAY change in sddef.h 16830 */ 16831 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16832 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16833 NULL); 16834 #else 16835 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16836 EIO, SD_RETRY_DELAY, NULL); 16837 #endif 16838 } 16839 16840 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16841 } 16842 16843 16844 /* 16845 * Function: sd_pkt_status_busy 16846 * 16847 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16848 * 16849 * Context: May be called from interrupt context 16850 */ 16851 16852 static void 16853 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16854 struct scsi_pkt *pktp) 16855 { 16856 ASSERT(un != NULL); 16857 ASSERT(mutex_owned(SD_MUTEX(un))); 16858 ASSERT(bp != NULL); 16859 ASSERT(xp != NULL); 16860 ASSERT(pktp != NULL); 16861 16862 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16863 "sd_pkt_status_busy: entry\n"); 16864 16865 /* If retries are exhausted, just fail the command. */ 16866 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16867 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16868 "device busy too long\n"); 16869 sd_return_failed_command(un, bp, EIO); 16870 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16871 "sd_pkt_status_busy: exit\n"); 16872 return; 16873 } 16874 xp->xb_retry_count++; 16875 16876 /* 16877 * Try to reset the target. However, we do not want to perform 16878 * more than one reset if the device continues to fail. The reset 16879 * will be performed when the retry count reaches the reset 16880 * threshold. This threshold should be set such that at least 16881 * one retry is issued before the reset is performed. 16882 */ 16883 if (xp->xb_retry_count == 16884 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16885 int rval = 0; 16886 mutex_exit(SD_MUTEX(un)); 16887 if (un->un_f_allow_bus_device_reset == TRUE) { 16888 /* 16889 * First try to reset the LUN; if we cannot then 16890 * try to reset the target. 16891 */ 16892 if (un->un_f_lun_reset_enabled == TRUE) { 16893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16894 "sd_pkt_status_busy: RESET_LUN\n"); 16895 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16896 } 16897 if (rval == 0) { 16898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16899 "sd_pkt_status_busy: RESET_TARGET\n"); 16900 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16901 } 16902 } 16903 if (rval == 0) { 16904 /* 16905 * If the RESET_LUN and/or RESET_TARGET failed, 16906 * try RESET_ALL 16907 */ 16908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16909 "sd_pkt_status_busy: RESET_ALL\n"); 16910 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 16911 } 16912 mutex_enter(SD_MUTEX(un)); 16913 if (rval == 0) { 16914 /* 16915 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 16916 * At this point we give up & fail the command. 16917 */ 16918 sd_return_failed_command(un, bp, EIO); 16919 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16920 "sd_pkt_status_busy: exit (failed cmd)\n"); 16921 return; 16922 } 16923 } 16924 16925 /* 16926 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 16927 * we have already checked the retry counts above. 16928 */ 16929 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 16930 EIO, SD_BSY_TIMEOUT, NULL); 16931 16932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16933 "sd_pkt_status_busy: exit\n"); 16934 } 16935 16936 16937 /* 16938 * Function: sd_pkt_status_reservation_conflict 16939 * 16940 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 16941 * command status. 16942 * 16943 * Context: May be called from interrupt context 16944 */ 16945 16946 static void 16947 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 16948 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16949 { 16950 ASSERT(un != NULL); 16951 ASSERT(mutex_owned(SD_MUTEX(un))); 16952 ASSERT(bp != NULL); 16953 ASSERT(xp != NULL); 16954 ASSERT(pktp != NULL); 16955 16956 /* 16957 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 16958 * conflict could be due to various reasons like incorrect keys, not 16959 * registered or not reserved etc. So, we return EACCES to the caller. 16960 */ 16961 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 16962 int cmd = SD_GET_PKT_OPCODE(pktp); 16963 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 16964 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 16965 sd_return_failed_command(un, bp, EACCES); 16966 return; 16967 } 16968 } 16969 16970 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 16971 16972 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 16973 if (sd_failfast_enable != 0) { 16974 /* By definition, we must panic here.... */ 16975 sd_panic_for_res_conflict(un); 16976 /*NOTREACHED*/ 16977 } 16978 SD_ERROR(SD_LOG_IO, un, 16979 "sd_handle_resv_conflict: Disk Reserved\n"); 16980 sd_return_failed_command(un, bp, EACCES); 16981 return; 16982 } 16983 16984 /* 16985 * 1147670: retry only if sd_retry_on_reservation_conflict 16986 * property is set (default is 1). Retries will not succeed 16987 * on a disk reserved by another initiator. HA systems 16988 * may reset this via sd.conf to avoid these retries. 16989 * 16990 * Note: The legacy return code for this failure is EIO, however EACCES 16991 * seems more appropriate for a reservation conflict. 16992 */ 16993 if (sd_retry_on_reservation_conflict == 0) { 16994 SD_ERROR(SD_LOG_IO, un, 16995 "sd_handle_resv_conflict: Device Reserved\n"); 16996 sd_return_failed_command(un, bp, EIO); 16997 return; 16998 } 16999 17000 /* 17001 * Retry the command if we can. 17002 * 17003 * Note: The legacy return code for this failure is EIO, however EACCES 17004 * seems more appropriate for a reservation conflict. 17005 */ 17006 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17007 (clock_t)2, NULL); 17008 } 17009 17010 17011 17012 /* 17013 * Function: sd_pkt_status_qfull 17014 * 17015 * Description: Handle a QUEUE FULL condition from the target. This can 17016 * occur if the HBA does not handle the queue full condition. 17017 * (Basically this means third-party HBAs as Sun HBAs will 17018 * handle the queue full condition.) Note that if there are 17019 * some commands already in the transport, then the queue full 17020 * has occurred because the queue for this nexus is actually 17021 * full. If there are no commands in the transport, then the 17022 * queue full is resulting from some other initiator or lun 17023 * consuming all the resources at the target. 17024 * 17025 * Context: May be called from interrupt context 17026 */ 17027 17028 static void 17029 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17030 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17031 { 17032 ASSERT(un != NULL); 17033 ASSERT(mutex_owned(SD_MUTEX(un))); 17034 ASSERT(bp != NULL); 17035 ASSERT(xp != NULL); 17036 ASSERT(pktp != NULL); 17037 17038 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17039 "sd_pkt_status_qfull: entry\n"); 17040 17041 /* 17042 * Just lower the QFULL throttle and retry the command. Note that 17043 * we do not limit the number of retries here. 17044 */ 17045 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17046 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17047 SD_RESTART_TIMEOUT, NULL); 17048 17049 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17050 "sd_pkt_status_qfull: exit\n"); 17051 } 17052 17053 17054 /* 17055 * Function: sd_reset_target 17056 * 17057 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17058 * RESET_TARGET, or RESET_ALL. 17059 * 17060 * Context: May be called under interrupt context. 17061 */ 17062 17063 static void 17064 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17065 { 17066 int rval = 0; 17067 17068 ASSERT(un != NULL); 17069 ASSERT(mutex_owned(SD_MUTEX(un))); 17070 ASSERT(pktp != NULL); 17071 17072 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17073 17074 /* 17075 * No need to reset if the transport layer has already done so. 17076 */ 17077 if ((pktp->pkt_statistics & 17078 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17079 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17080 "sd_reset_target: no reset\n"); 17081 return; 17082 } 17083 17084 mutex_exit(SD_MUTEX(un)); 17085 17086 if (un->un_f_allow_bus_device_reset == TRUE) { 17087 if (un->un_f_lun_reset_enabled == TRUE) { 17088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17089 "sd_reset_target: RESET_LUN\n"); 17090 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17091 } 17092 if (rval == 0) { 17093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17094 "sd_reset_target: RESET_TARGET\n"); 17095 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17096 } 17097 } 17098 17099 if (rval == 0) { 17100 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17101 "sd_reset_target: RESET_ALL\n"); 17102 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17103 } 17104 17105 mutex_enter(SD_MUTEX(un)); 17106 17107 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17108 } 17109 17110 17111 /* 17112 * Function: sd_media_change_task 17113 * 17114 * Description: Recovery action for CDROM to become available. 17115 * 17116 * Context: Executes in a taskq() thread context 17117 */ 17118 17119 static void 17120 sd_media_change_task(void *arg) 17121 { 17122 struct scsi_pkt *pktp = arg; 17123 struct sd_lun *un; 17124 struct buf *bp; 17125 struct sd_xbuf *xp; 17126 int err = 0; 17127 int retry_count = 0; 17128 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17129 struct sd_sense_info si; 17130 17131 ASSERT(pktp != NULL); 17132 bp = (struct buf *)pktp->pkt_private; 17133 ASSERT(bp != NULL); 17134 xp = SD_GET_XBUF(bp); 17135 ASSERT(xp != NULL); 17136 un = SD_GET_UN(bp); 17137 ASSERT(un != NULL); 17138 ASSERT(!mutex_owned(SD_MUTEX(un))); 17139 ASSERT(un->un_f_monitor_media_state); 17140 17141 si.ssi_severity = SCSI_ERR_INFO; 17142 si.ssi_pfa_flag = FALSE; 17143 17144 /* 17145 * When a reset is issued on a CDROM, it takes a long time to 17146 * recover. First few attempts to read capacity and other things 17147 * related to handling unit attention fail (with a ASC 0x4 and 17148 * ASCQ 0x1). In that case we want to do enough retries and we want 17149 * to limit the retries in other cases of genuine failures like 17150 * no media in drive. 17151 */ 17152 while (retry_count++ < retry_limit) { 17153 if ((err = sd_handle_mchange(un)) == 0) { 17154 break; 17155 } 17156 if (err == EAGAIN) { 17157 retry_limit = SD_UNIT_ATTENTION_RETRY; 17158 } 17159 /* Sleep for 0.5 sec. & try again */ 17160 delay(drv_usectohz(500000)); 17161 } 17162 17163 /* 17164 * Dispatch (retry or fail) the original command here, 17165 * along with appropriate console messages.... 17166 * 17167 * Must grab the mutex before calling sd_retry_command, 17168 * sd_print_sense_msg and sd_return_failed_command. 17169 */ 17170 mutex_enter(SD_MUTEX(un)); 17171 if (err != SD_CMD_SUCCESS) { 17172 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17173 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17174 si.ssi_severity = SCSI_ERR_FATAL; 17175 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17176 sd_return_failed_command(un, bp, EIO); 17177 } else { 17178 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17179 &si, EIO, (clock_t)0, NULL); 17180 } 17181 mutex_exit(SD_MUTEX(un)); 17182 } 17183 17184 17185 17186 /* 17187 * Function: sd_handle_mchange 17188 * 17189 * Description: Perform geometry validation & other recovery when CDROM 17190 * has been removed from drive. 17191 * 17192 * Return Code: 0 for success 17193 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17194 * sd_send_scsi_READ_CAPACITY() 17195 * 17196 * Context: Executes in a taskq() thread context 17197 */ 17198 17199 static int 17200 sd_handle_mchange(struct sd_lun *un) 17201 { 17202 uint64_t capacity; 17203 uint32_t lbasize; 17204 int rval; 17205 17206 ASSERT(!mutex_owned(SD_MUTEX(un))); 17207 ASSERT(un->un_f_monitor_media_state); 17208 17209 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17210 SD_PATH_DIRECT_PRIORITY)) != 0) { 17211 return (rval); 17212 } 17213 17214 mutex_enter(SD_MUTEX(un)); 17215 sd_update_block_info(un, lbasize, capacity); 17216 17217 if (un->un_errstats != NULL) { 17218 struct sd_errstats *stp = 17219 (struct sd_errstats *)un->un_errstats->ks_data; 17220 stp->sd_capacity.value.ui64 = (uint64_t) 17221 ((uint64_t)un->un_blockcount * 17222 (uint64_t)un->un_tgt_blocksize); 17223 } 17224 17225 17226 /* 17227 * Check if the media in the device is writable or not 17228 */ 17229 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17230 17231 /* 17232 * Note: Maybe let the strategy/partitioning chain worry about getting 17233 * valid geometry. 17234 */ 17235 mutex_exit(SD_MUTEX(un)); 17236 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17237 17238 17239 if (cmlb_validate(un->un_cmlbhandle, 0, 17240 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17241 return (EIO); 17242 } else { 17243 if (un->un_f_pkstats_enabled) { 17244 sd_set_pstats(un); 17245 SD_TRACE(SD_LOG_IO_PARTITION, un, 17246 "sd_handle_mchange: un:0x%p pstats created and " 17247 "set\n", un); 17248 } 17249 } 17250 17251 17252 /* 17253 * Try to lock the door 17254 */ 17255 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17256 SD_PATH_DIRECT_PRIORITY)); 17257 } 17258 17259 17260 /* 17261 * Function: sd_send_scsi_DOORLOCK 17262 * 17263 * Description: Issue the scsi DOOR LOCK command 17264 * 17265 * Arguments: un - pointer to driver soft state (unit) structure for 17266 * this target. 17267 * flag - SD_REMOVAL_ALLOW 17268 * SD_REMOVAL_PREVENT 17269 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17270 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17271 * to use the USCSI "direct" chain and bypass the normal 17272 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17273 * command is issued as part of an error recovery action. 17274 * 17275 * Return Code: 0 - Success 17276 * errno return code from sd_send_scsi_cmd() 17277 * 17278 * Context: Can sleep. 17279 */ 17280 17281 static int 17282 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17283 { 17284 union scsi_cdb cdb; 17285 struct uscsi_cmd ucmd_buf; 17286 struct scsi_extended_sense sense_buf; 17287 int status; 17288 17289 ASSERT(un != NULL); 17290 ASSERT(!mutex_owned(SD_MUTEX(un))); 17291 17292 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17293 17294 /* already determined doorlock is not supported, fake success */ 17295 if (un->un_f_doorlock_supported == FALSE) { 17296 return (0); 17297 } 17298 17299 /* 17300 * If we are ejecting and see an SD_REMOVAL_PREVENT 17301 * ignore the command so we can complete the eject 17302 * operation. 17303 */ 17304 if (flag == SD_REMOVAL_PREVENT) { 17305 mutex_enter(SD_MUTEX(un)); 17306 if (un->un_f_ejecting == TRUE) { 17307 mutex_exit(SD_MUTEX(un)); 17308 return (EAGAIN); 17309 } 17310 mutex_exit(SD_MUTEX(un)); 17311 } 17312 17313 bzero(&cdb, sizeof (cdb)); 17314 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17315 17316 cdb.scc_cmd = SCMD_DOORLOCK; 17317 cdb.cdb_opaque[4] = (uchar_t)flag; 17318 17319 ucmd_buf.uscsi_cdb = (char *)&cdb; 17320 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17321 ucmd_buf.uscsi_bufaddr = NULL; 17322 ucmd_buf.uscsi_buflen = 0; 17323 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17324 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17325 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17326 ucmd_buf.uscsi_timeout = 15; 17327 17328 SD_TRACE(SD_LOG_IO, un, 17329 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17330 17331 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17332 UIO_SYSSPACE, path_flag); 17333 17334 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17335 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17336 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17337 /* fake success and skip subsequent doorlock commands */ 17338 un->un_f_doorlock_supported = FALSE; 17339 return (0); 17340 } 17341 17342 return (status); 17343 } 17344 17345 /* 17346 * Function: sd_send_scsi_READ_CAPACITY 17347 * 17348 * Description: This routine uses the scsi READ CAPACITY command to determine 17349 * the device capacity in number of blocks and the device native 17350 * block size. If this function returns a failure, then the 17351 * values in *capp and *lbap are undefined. If the capacity 17352 * returned is 0xffffffff then the lun is too large for a 17353 * normal READ CAPACITY command and the results of a 17354 * READ CAPACITY 16 will be used instead. 17355 * 17356 * Arguments: un - ptr to soft state struct for the target 17357 * capp - ptr to unsigned 64-bit variable to receive the 17358 * capacity value from the command. 17359 * lbap - ptr to unsigned 32-bit varaible to receive the 17360 * block size value from the command 17361 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17362 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17363 * to use the USCSI "direct" chain and bypass the normal 17364 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17365 * command is issued as part of an error recovery action. 17366 * 17367 * Return Code: 0 - Success 17368 * EIO - IO error 17369 * EACCES - Reservation conflict detected 17370 * EAGAIN - Device is becoming ready 17371 * errno return code from sd_send_scsi_cmd() 17372 * 17373 * Context: Can sleep. Blocks until command completes. 17374 */ 17375 17376 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17377 17378 static int 17379 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17380 int path_flag) 17381 { 17382 struct scsi_extended_sense sense_buf; 17383 struct uscsi_cmd ucmd_buf; 17384 union scsi_cdb cdb; 17385 uint32_t *capacity_buf; 17386 uint64_t capacity; 17387 uint32_t lbasize; 17388 int status; 17389 17390 ASSERT(un != NULL); 17391 ASSERT(!mutex_owned(SD_MUTEX(un))); 17392 ASSERT(capp != NULL); 17393 ASSERT(lbap != NULL); 17394 17395 SD_TRACE(SD_LOG_IO, un, 17396 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17397 17398 /* 17399 * First send a READ_CAPACITY command to the target. 17400 * (This command is mandatory under SCSI-2.) 17401 * 17402 * Set up the CDB for the READ_CAPACITY command. The Partial 17403 * Medium Indicator bit is cleared. The address field must be 17404 * zero if the PMI bit is zero. 17405 */ 17406 bzero(&cdb, sizeof (cdb)); 17407 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17408 17409 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17410 17411 cdb.scc_cmd = SCMD_READ_CAPACITY; 17412 17413 ucmd_buf.uscsi_cdb = (char *)&cdb; 17414 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17415 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17416 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17417 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17418 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17419 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17420 ucmd_buf.uscsi_timeout = 60; 17421 17422 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17423 UIO_SYSSPACE, path_flag); 17424 17425 switch (status) { 17426 case 0: 17427 /* Return failure if we did not get valid capacity data. */ 17428 if (ucmd_buf.uscsi_resid != 0) { 17429 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17430 return (EIO); 17431 } 17432 17433 /* 17434 * Read capacity and block size from the READ CAPACITY 10 data. 17435 * This data may be adjusted later due to device specific 17436 * issues. 17437 * 17438 * According to the SCSI spec, the READ CAPACITY 10 17439 * command returns the following: 17440 * 17441 * bytes 0-3: Maximum logical block address available. 17442 * (MSB in byte:0 & LSB in byte:3) 17443 * 17444 * bytes 4-7: Block length in bytes 17445 * (MSB in byte:4 & LSB in byte:7) 17446 * 17447 */ 17448 capacity = BE_32(capacity_buf[0]); 17449 lbasize = BE_32(capacity_buf[1]); 17450 17451 /* 17452 * Done with capacity_buf 17453 */ 17454 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17455 17456 /* 17457 * if the reported capacity is set to all 0xf's, then 17458 * this disk is too large and requires SBC-2 commands. 17459 * Reissue the request using READ CAPACITY 16. 17460 */ 17461 if (capacity == 0xffffffff) { 17462 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17463 &lbasize, path_flag); 17464 if (status != 0) { 17465 return (status); 17466 } 17467 } 17468 break; /* Success! */ 17469 case EIO: 17470 switch (ucmd_buf.uscsi_status) { 17471 case STATUS_RESERVATION_CONFLICT: 17472 status = EACCES; 17473 break; 17474 case STATUS_CHECK: 17475 /* 17476 * Check condition; look for ASC/ASCQ of 0x04/0x01 17477 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17478 */ 17479 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17480 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17481 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17482 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17483 return (EAGAIN); 17484 } 17485 break; 17486 default: 17487 break; 17488 } 17489 /* FALLTHRU */ 17490 default: 17491 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17492 return (status); 17493 } 17494 17495 /* 17496 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17497 * (2352 and 0 are common) so for these devices always force the value 17498 * to 2048 as required by the ATAPI specs. 17499 */ 17500 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17501 lbasize = 2048; 17502 } 17503 17504 /* 17505 * Get the maximum LBA value from the READ CAPACITY data. 17506 * Here we assume that the Partial Medium Indicator (PMI) bit 17507 * was cleared when issuing the command. This means that the LBA 17508 * returned from the device is the LBA of the last logical block 17509 * on the logical unit. The actual logical block count will be 17510 * this value plus one. 17511 * 17512 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17513 * so scale the capacity value to reflect this. 17514 */ 17515 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17516 17517 /* 17518 * Copy the values from the READ CAPACITY command into the space 17519 * provided by the caller. 17520 */ 17521 *capp = capacity; 17522 *lbap = lbasize; 17523 17524 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17525 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17526 17527 /* 17528 * Both the lbasize and capacity from the device must be nonzero, 17529 * otherwise we assume that the values are not valid and return 17530 * failure to the caller. (4203735) 17531 */ 17532 if ((capacity == 0) || (lbasize == 0)) { 17533 return (EIO); 17534 } 17535 17536 return (0); 17537 } 17538 17539 /* 17540 * Function: sd_send_scsi_READ_CAPACITY_16 17541 * 17542 * Description: This routine uses the scsi READ CAPACITY 16 command to 17543 * determine the device capacity in number of blocks and the 17544 * device native block size. If this function returns a failure, 17545 * then the values in *capp and *lbap are undefined. 17546 * This routine should always be called by 17547 * sd_send_scsi_READ_CAPACITY which will appy any device 17548 * specific adjustments to capacity and lbasize. 17549 * 17550 * Arguments: un - ptr to soft state struct for the target 17551 * capp - ptr to unsigned 64-bit variable to receive the 17552 * capacity value from the command. 17553 * lbap - ptr to unsigned 32-bit varaible to receive the 17554 * block size value from the command 17555 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17556 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17557 * to use the USCSI "direct" chain and bypass the normal 17558 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17559 * this command is issued as part of an error recovery 17560 * action. 17561 * 17562 * Return Code: 0 - Success 17563 * EIO - IO error 17564 * EACCES - Reservation conflict detected 17565 * EAGAIN - Device is becoming ready 17566 * errno return code from sd_send_scsi_cmd() 17567 * 17568 * Context: Can sleep. Blocks until command completes. 17569 */ 17570 17571 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17572 17573 static int 17574 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17575 uint32_t *lbap, int path_flag) 17576 { 17577 struct scsi_extended_sense sense_buf; 17578 struct uscsi_cmd ucmd_buf; 17579 union scsi_cdb cdb; 17580 uint64_t *capacity16_buf; 17581 uint64_t capacity; 17582 uint32_t lbasize; 17583 int status; 17584 17585 ASSERT(un != NULL); 17586 ASSERT(!mutex_owned(SD_MUTEX(un))); 17587 ASSERT(capp != NULL); 17588 ASSERT(lbap != NULL); 17589 17590 SD_TRACE(SD_LOG_IO, un, 17591 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17592 17593 /* 17594 * First send a READ_CAPACITY_16 command to the target. 17595 * 17596 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17597 * Medium Indicator bit is cleared. The address field must be 17598 * zero if the PMI bit is zero. 17599 */ 17600 bzero(&cdb, sizeof (cdb)); 17601 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17602 17603 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17604 17605 ucmd_buf.uscsi_cdb = (char *)&cdb; 17606 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17607 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17608 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17609 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17610 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17611 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17612 ucmd_buf.uscsi_timeout = 60; 17613 17614 /* 17615 * Read Capacity (16) is a Service Action In command. One 17616 * command byte (0x9E) is overloaded for multiple operations, 17617 * with the second CDB byte specifying the desired operation 17618 */ 17619 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17620 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17621 17622 /* 17623 * Fill in allocation length field 17624 */ 17625 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17626 17627 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17628 UIO_SYSSPACE, path_flag); 17629 17630 switch (status) { 17631 case 0: 17632 /* Return failure if we did not get valid capacity data. */ 17633 if (ucmd_buf.uscsi_resid > 20) { 17634 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17635 return (EIO); 17636 } 17637 17638 /* 17639 * Read capacity and block size from the READ CAPACITY 10 data. 17640 * This data may be adjusted later due to device specific 17641 * issues. 17642 * 17643 * According to the SCSI spec, the READ CAPACITY 10 17644 * command returns the following: 17645 * 17646 * bytes 0-7: Maximum logical block address available. 17647 * (MSB in byte:0 & LSB in byte:7) 17648 * 17649 * bytes 8-11: Block length in bytes 17650 * (MSB in byte:8 & LSB in byte:11) 17651 * 17652 */ 17653 capacity = BE_64(capacity16_buf[0]); 17654 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17655 17656 /* 17657 * Done with capacity16_buf 17658 */ 17659 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17660 17661 /* 17662 * if the reported capacity is set to all 0xf's, then 17663 * this disk is too large. This could only happen with 17664 * a device that supports LBAs larger than 64 bits which 17665 * are not defined by any current T10 standards. 17666 */ 17667 if (capacity == 0xffffffffffffffff) { 17668 return (EIO); 17669 } 17670 break; /* Success! */ 17671 case EIO: 17672 switch (ucmd_buf.uscsi_status) { 17673 case STATUS_RESERVATION_CONFLICT: 17674 status = EACCES; 17675 break; 17676 case STATUS_CHECK: 17677 /* 17678 * Check condition; look for ASC/ASCQ of 0x04/0x01 17679 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17680 */ 17681 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17682 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17683 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17684 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17685 return (EAGAIN); 17686 } 17687 break; 17688 default: 17689 break; 17690 } 17691 /* FALLTHRU */ 17692 default: 17693 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17694 return (status); 17695 } 17696 17697 *capp = capacity; 17698 *lbap = lbasize; 17699 17700 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17701 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17702 17703 return (0); 17704 } 17705 17706 17707 /* 17708 * Function: sd_send_scsi_START_STOP_UNIT 17709 * 17710 * Description: Issue a scsi START STOP UNIT command to the target. 17711 * 17712 * Arguments: un - pointer to driver soft state (unit) structure for 17713 * this target. 17714 * flag - SD_TARGET_START 17715 * SD_TARGET_STOP 17716 * SD_TARGET_EJECT 17717 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17718 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17719 * to use the USCSI "direct" chain and bypass the normal 17720 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17721 * command is issued as part of an error recovery action. 17722 * 17723 * Return Code: 0 - Success 17724 * EIO - IO error 17725 * EACCES - Reservation conflict detected 17726 * ENXIO - Not Ready, medium not present 17727 * errno return code from sd_send_scsi_cmd() 17728 * 17729 * Context: Can sleep. 17730 */ 17731 17732 static int 17733 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17734 { 17735 struct scsi_extended_sense sense_buf; 17736 union scsi_cdb cdb; 17737 struct uscsi_cmd ucmd_buf; 17738 int status; 17739 17740 ASSERT(un != NULL); 17741 ASSERT(!mutex_owned(SD_MUTEX(un))); 17742 17743 SD_TRACE(SD_LOG_IO, un, 17744 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17745 17746 if (un->un_f_check_start_stop && 17747 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17748 (un->un_f_start_stop_supported != TRUE)) { 17749 return (0); 17750 } 17751 17752 /* 17753 * If we are performing an eject operation and 17754 * we receive any command other than SD_TARGET_EJECT 17755 * we should immediately return. 17756 */ 17757 if (flag != SD_TARGET_EJECT) { 17758 mutex_enter(SD_MUTEX(un)); 17759 if (un->un_f_ejecting == TRUE) { 17760 mutex_exit(SD_MUTEX(un)); 17761 return (EAGAIN); 17762 } 17763 mutex_exit(SD_MUTEX(un)); 17764 } 17765 17766 bzero(&cdb, sizeof (cdb)); 17767 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17768 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17769 17770 cdb.scc_cmd = SCMD_START_STOP; 17771 cdb.cdb_opaque[4] = (uchar_t)flag; 17772 17773 ucmd_buf.uscsi_cdb = (char *)&cdb; 17774 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17775 ucmd_buf.uscsi_bufaddr = NULL; 17776 ucmd_buf.uscsi_buflen = 0; 17777 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17778 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17779 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17780 ucmd_buf.uscsi_timeout = 200; 17781 17782 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17783 UIO_SYSSPACE, path_flag); 17784 17785 switch (status) { 17786 case 0: 17787 break; /* Success! */ 17788 case EIO: 17789 switch (ucmd_buf.uscsi_status) { 17790 case STATUS_RESERVATION_CONFLICT: 17791 status = EACCES; 17792 break; 17793 case STATUS_CHECK: 17794 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17795 switch (scsi_sense_key( 17796 (uint8_t *)&sense_buf)) { 17797 case KEY_ILLEGAL_REQUEST: 17798 status = ENOTSUP; 17799 break; 17800 case KEY_NOT_READY: 17801 if (scsi_sense_asc( 17802 (uint8_t *)&sense_buf) 17803 == 0x3A) { 17804 status = ENXIO; 17805 } 17806 break; 17807 default: 17808 break; 17809 } 17810 } 17811 break; 17812 default: 17813 break; 17814 } 17815 break; 17816 default: 17817 break; 17818 } 17819 17820 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17821 17822 return (status); 17823 } 17824 17825 17826 /* 17827 * Function: sd_start_stop_unit_callback 17828 * 17829 * Description: timeout(9F) callback to begin recovery process for a 17830 * device that has spun down. 17831 * 17832 * Arguments: arg - pointer to associated softstate struct. 17833 * 17834 * Context: Executes in a timeout(9F) thread context 17835 */ 17836 17837 static void 17838 sd_start_stop_unit_callback(void *arg) 17839 { 17840 struct sd_lun *un = arg; 17841 ASSERT(un != NULL); 17842 ASSERT(!mutex_owned(SD_MUTEX(un))); 17843 17844 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17845 17846 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17847 } 17848 17849 17850 /* 17851 * Function: sd_start_stop_unit_task 17852 * 17853 * Description: Recovery procedure when a drive is spun down. 17854 * 17855 * Arguments: arg - pointer to associated softstate struct. 17856 * 17857 * Context: Executes in a taskq() thread context 17858 */ 17859 17860 static void 17861 sd_start_stop_unit_task(void *arg) 17862 { 17863 struct sd_lun *un = arg; 17864 17865 ASSERT(un != NULL); 17866 ASSERT(!mutex_owned(SD_MUTEX(un))); 17867 17868 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17869 17870 /* 17871 * Some unformatted drives report not ready error, no need to 17872 * restart if format has been initiated. 17873 */ 17874 mutex_enter(SD_MUTEX(un)); 17875 if (un->un_f_format_in_progress == TRUE) { 17876 mutex_exit(SD_MUTEX(un)); 17877 return; 17878 } 17879 mutex_exit(SD_MUTEX(un)); 17880 17881 /* 17882 * When a START STOP command is issued from here, it is part of a 17883 * failure recovery operation and must be issued before any other 17884 * commands, including any pending retries. Thus it must be sent 17885 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17886 * succeeds or not, we will start I/O after the attempt. 17887 */ 17888 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17889 SD_PATH_DIRECT_PRIORITY); 17890 17891 /* 17892 * The above call blocks until the START_STOP_UNIT command completes. 17893 * Now that it has completed, we must re-try the original IO that 17894 * received the NOT READY condition in the first place. There are 17895 * three possible conditions here: 17896 * 17897 * (1) The original IO is on un_retry_bp. 17898 * (2) The original IO is on the regular wait queue, and un_retry_bp 17899 * is NULL. 17900 * (3) The original IO is on the regular wait queue, and un_retry_bp 17901 * points to some other, unrelated bp. 17902 * 17903 * For each case, we must call sd_start_cmds() with un_retry_bp 17904 * as the argument. If un_retry_bp is NULL, this will initiate 17905 * processing of the regular wait queue. If un_retry_bp is not NULL, 17906 * then this will process the bp on un_retry_bp. That may or may not 17907 * be the original IO, but that does not matter: the important thing 17908 * is to keep the IO processing going at this point. 17909 * 17910 * Note: This is a very specific error recovery sequence associated 17911 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 17912 * serialize the I/O with completion of the spin-up. 17913 */ 17914 mutex_enter(SD_MUTEX(un)); 17915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17916 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 17917 un, un->un_retry_bp); 17918 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 17919 sd_start_cmds(un, un->un_retry_bp); 17920 mutex_exit(SD_MUTEX(un)); 17921 17922 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 17923 } 17924 17925 17926 /* 17927 * Function: sd_send_scsi_INQUIRY 17928 * 17929 * Description: Issue the scsi INQUIRY command. 17930 * 17931 * Arguments: un 17932 * bufaddr 17933 * buflen 17934 * evpd 17935 * page_code 17936 * page_length 17937 * 17938 * Return Code: 0 - Success 17939 * errno return code from sd_send_scsi_cmd() 17940 * 17941 * Context: Can sleep. Does not return until command is completed. 17942 */ 17943 17944 static int 17945 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 17946 uchar_t evpd, uchar_t page_code, size_t *residp) 17947 { 17948 union scsi_cdb cdb; 17949 struct uscsi_cmd ucmd_buf; 17950 int status; 17951 17952 ASSERT(un != NULL); 17953 ASSERT(!mutex_owned(SD_MUTEX(un))); 17954 ASSERT(bufaddr != NULL); 17955 17956 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 17957 17958 bzero(&cdb, sizeof (cdb)); 17959 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17960 bzero(bufaddr, buflen); 17961 17962 cdb.scc_cmd = SCMD_INQUIRY; 17963 cdb.cdb_opaque[1] = evpd; 17964 cdb.cdb_opaque[2] = page_code; 17965 FORMG0COUNT(&cdb, buflen); 17966 17967 ucmd_buf.uscsi_cdb = (char *)&cdb; 17968 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17969 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 17970 ucmd_buf.uscsi_buflen = buflen; 17971 ucmd_buf.uscsi_rqbuf = NULL; 17972 ucmd_buf.uscsi_rqlen = 0; 17973 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 17974 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 17975 17976 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17977 UIO_SYSSPACE, SD_PATH_DIRECT); 17978 17979 if ((status == 0) && (residp != NULL)) { 17980 *residp = ucmd_buf.uscsi_resid; 17981 } 17982 17983 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 17984 17985 return (status); 17986 } 17987 17988 17989 /* 17990 * Function: sd_send_scsi_TEST_UNIT_READY 17991 * 17992 * Description: Issue the scsi TEST UNIT READY command. 17993 * This routine can be told to set the flag USCSI_DIAGNOSE to 17994 * prevent retrying failed commands. Use this when the intent 17995 * is either to check for device readiness, to clear a Unit 17996 * Attention, or to clear any outstanding sense data. 17997 * However under specific conditions the expected behavior 17998 * is for retries to bring a device ready, so use the flag 17999 * with caution. 18000 * 18001 * Arguments: un 18002 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18003 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18004 * 0: dont check for media present, do retries on cmd. 18005 * 18006 * Return Code: 0 - Success 18007 * EIO - IO error 18008 * EACCES - Reservation conflict detected 18009 * ENXIO - Not Ready, medium not present 18010 * errno return code from sd_send_scsi_cmd() 18011 * 18012 * Context: Can sleep. Does not return until command is completed. 18013 */ 18014 18015 static int 18016 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18017 { 18018 struct scsi_extended_sense sense_buf; 18019 union scsi_cdb cdb; 18020 struct uscsi_cmd ucmd_buf; 18021 int status; 18022 18023 ASSERT(un != NULL); 18024 ASSERT(!mutex_owned(SD_MUTEX(un))); 18025 18026 SD_TRACE(SD_LOG_IO, un, 18027 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18028 18029 /* 18030 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18031 * timeouts when they receive a TUR and the queue is not empty. Check 18032 * the configuration flag set during attach (indicating the drive has 18033 * this firmware bug) and un_ncmds_in_transport before issuing the 18034 * TUR. If there are 18035 * pending commands return success, this is a bit arbitrary but is ok 18036 * for non-removables (i.e. the eliteI disks) and non-clustering 18037 * configurations. 18038 */ 18039 if (un->un_f_cfg_tur_check == TRUE) { 18040 mutex_enter(SD_MUTEX(un)); 18041 if (un->un_ncmds_in_transport != 0) { 18042 mutex_exit(SD_MUTEX(un)); 18043 return (0); 18044 } 18045 mutex_exit(SD_MUTEX(un)); 18046 } 18047 18048 bzero(&cdb, sizeof (cdb)); 18049 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18050 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18051 18052 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18053 18054 ucmd_buf.uscsi_cdb = (char *)&cdb; 18055 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18056 ucmd_buf.uscsi_bufaddr = NULL; 18057 ucmd_buf.uscsi_buflen = 0; 18058 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18059 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18060 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18061 18062 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18063 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18064 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18065 } 18066 ucmd_buf.uscsi_timeout = 60; 18067 18068 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18069 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18070 SD_PATH_STANDARD)); 18071 18072 switch (status) { 18073 case 0: 18074 break; /* Success! */ 18075 case EIO: 18076 switch (ucmd_buf.uscsi_status) { 18077 case STATUS_RESERVATION_CONFLICT: 18078 status = EACCES; 18079 break; 18080 case STATUS_CHECK: 18081 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18082 break; 18083 } 18084 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18085 (scsi_sense_key((uint8_t *)&sense_buf) == 18086 KEY_NOT_READY) && 18087 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18088 status = ENXIO; 18089 } 18090 break; 18091 default: 18092 break; 18093 } 18094 break; 18095 default: 18096 break; 18097 } 18098 18099 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18100 18101 return (status); 18102 } 18103 18104 18105 /* 18106 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18107 * 18108 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18109 * 18110 * Arguments: un 18111 * 18112 * Return Code: 0 - Success 18113 * EACCES 18114 * ENOTSUP 18115 * errno return code from sd_send_scsi_cmd() 18116 * 18117 * Context: Can sleep. Does not return until command is completed. 18118 */ 18119 18120 static int 18121 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18122 uint16_t data_len, uchar_t *data_bufp) 18123 { 18124 struct scsi_extended_sense sense_buf; 18125 union scsi_cdb cdb; 18126 struct uscsi_cmd ucmd_buf; 18127 int status; 18128 int no_caller_buf = FALSE; 18129 18130 ASSERT(un != NULL); 18131 ASSERT(!mutex_owned(SD_MUTEX(un))); 18132 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18133 18134 SD_TRACE(SD_LOG_IO, un, 18135 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18136 18137 bzero(&cdb, sizeof (cdb)); 18138 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18139 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18140 if (data_bufp == NULL) { 18141 /* Allocate a default buf if the caller did not give one */ 18142 ASSERT(data_len == 0); 18143 data_len = MHIOC_RESV_KEY_SIZE; 18144 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18145 no_caller_buf = TRUE; 18146 } 18147 18148 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18149 cdb.cdb_opaque[1] = usr_cmd; 18150 FORMG1COUNT(&cdb, data_len); 18151 18152 ucmd_buf.uscsi_cdb = (char *)&cdb; 18153 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18154 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18155 ucmd_buf.uscsi_buflen = data_len; 18156 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18157 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18158 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18159 ucmd_buf.uscsi_timeout = 60; 18160 18161 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18162 UIO_SYSSPACE, SD_PATH_STANDARD); 18163 18164 switch (status) { 18165 case 0: 18166 break; /* Success! */ 18167 case EIO: 18168 switch (ucmd_buf.uscsi_status) { 18169 case STATUS_RESERVATION_CONFLICT: 18170 status = EACCES; 18171 break; 18172 case STATUS_CHECK: 18173 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18174 (scsi_sense_key((uint8_t *)&sense_buf) == 18175 KEY_ILLEGAL_REQUEST)) { 18176 status = ENOTSUP; 18177 } 18178 break; 18179 default: 18180 break; 18181 } 18182 break; 18183 default: 18184 break; 18185 } 18186 18187 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18188 18189 if (no_caller_buf == TRUE) { 18190 kmem_free(data_bufp, data_len); 18191 } 18192 18193 return (status); 18194 } 18195 18196 18197 /* 18198 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18199 * 18200 * Description: This routine is the driver entry point for handling CD-ROM 18201 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18202 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18203 * device. 18204 * 18205 * Arguments: un - Pointer to soft state struct for the target. 18206 * usr_cmd SCSI-3 reservation facility command (one of 18207 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18208 * SD_SCSI3_PREEMPTANDABORT) 18209 * usr_bufp - user provided pointer register, reserve descriptor or 18210 * preempt and abort structure (mhioc_register_t, 18211 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18212 * 18213 * Return Code: 0 - Success 18214 * EACCES 18215 * ENOTSUP 18216 * errno return code from sd_send_scsi_cmd() 18217 * 18218 * Context: Can sleep. Does not return until command is completed. 18219 */ 18220 18221 static int 18222 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18223 uchar_t *usr_bufp) 18224 { 18225 struct scsi_extended_sense sense_buf; 18226 union scsi_cdb cdb; 18227 struct uscsi_cmd ucmd_buf; 18228 int status; 18229 uchar_t data_len = sizeof (sd_prout_t); 18230 sd_prout_t *prp; 18231 18232 ASSERT(un != NULL); 18233 ASSERT(!mutex_owned(SD_MUTEX(un))); 18234 ASSERT(data_len == 24); /* required by scsi spec */ 18235 18236 SD_TRACE(SD_LOG_IO, un, 18237 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18238 18239 if (usr_bufp == NULL) { 18240 return (EINVAL); 18241 } 18242 18243 bzero(&cdb, sizeof (cdb)); 18244 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18245 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18246 prp = kmem_zalloc(data_len, KM_SLEEP); 18247 18248 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18249 cdb.cdb_opaque[1] = usr_cmd; 18250 FORMG1COUNT(&cdb, data_len); 18251 18252 ucmd_buf.uscsi_cdb = (char *)&cdb; 18253 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18254 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18255 ucmd_buf.uscsi_buflen = data_len; 18256 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18257 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18258 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18259 ucmd_buf.uscsi_timeout = 60; 18260 18261 switch (usr_cmd) { 18262 case SD_SCSI3_REGISTER: { 18263 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18264 18265 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18266 bcopy(ptr->newkey.key, prp->service_key, 18267 MHIOC_RESV_KEY_SIZE); 18268 prp->aptpl = ptr->aptpl; 18269 break; 18270 } 18271 case SD_SCSI3_RESERVE: 18272 case SD_SCSI3_RELEASE: { 18273 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18274 18275 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18276 prp->scope_address = BE_32(ptr->scope_specific_addr); 18277 cdb.cdb_opaque[2] = ptr->type; 18278 break; 18279 } 18280 case SD_SCSI3_PREEMPTANDABORT: { 18281 mhioc_preemptandabort_t *ptr = 18282 (mhioc_preemptandabort_t *)usr_bufp; 18283 18284 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18285 bcopy(ptr->victim_key.key, prp->service_key, 18286 MHIOC_RESV_KEY_SIZE); 18287 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18288 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18289 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18290 break; 18291 } 18292 case SD_SCSI3_REGISTERANDIGNOREKEY: 18293 { 18294 mhioc_registerandignorekey_t *ptr; 18295 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18296 bcopy(ptr->newkey.key, 18297 prp->service_key, MHIOC_RESV_KEY_SIZE); 18298 prp->aptpl = ptr->aptpl; 18299 break; 18300 } 18301 default: 18302 ASSERT(FALSE); 18303 break; 18304 } 18305 18306 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18307 UIO_SYSSPACE, SD_PATH_STANDARD); 18308 18309 switch (status) { 18310 case 0: 18311 break; /* Success! */ 18312 case EIO: 18313 switch (ucmd_buf.uscsi_status) { 18314 case STATUS_RESERVATION_CONFLICT: 18315 status = EACCES; 18316 break; 18317 case STATUS_CHECK: 18318 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18319 (scsi_sense_key((uint8_t *)&sense_buf) == 18320 KEY_ILLEGAL_REQUEST)) { 18321 status = ENOTSUP; 18322 } 18323 break; 18324 default: 18325 break; 18326 } 18327 break; 18328 default: 18329 break; 18330 } 18331 18332 kmem_free(prp, data_len); 18333 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18334 return (status); 18335 } 18336 18337 18338 /* 18339 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18340 * 18341 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18342 * 18343 * Arguments: un - pointer to the target's soft state struct 18344 * 18345 * Return Code: 0 - success 18346 * errno-type error code 18347 * 18348 * Context: kernel thread context only. 18349 */ 18350 18351 static int 18352 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18353 { 18354 struct sd_uscsi_info *uip; 18355 struct uscsi_cmd *uscmd; 18356 union scsi_cdb *cdb; 18357 struct buf *bp; 18358 int rval = 0; 18359 18360 SD_TRACE(SD_LOG_IO, un, 18361 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18362 18363 ASSERT(un != NULL); 18364 ASSERT(!mutex_owned(SD_MUTEX(un))); 18365 18366 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18367 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18368 18369 /* 18370 * First get some memory for the uscsi_cmd struct and cdb 18371 * and initialize for SYNCHRONIZE_CACHE cmd. 18372 */ 18373 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18374 uscmd->uscsi_cdblen = CDB_GROUP1; 18375 uscmd->uscsi_cdb = (caddr_t)cdb; 18376 uscmd->uscsi_bufaddr = NULL; 18377 uscmd->uscsi_buflen = 0; 18378 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18379 uscmd->uscsi_rqlen = SENSE_LENGTH; 18380 uscmd->uscsi_rqresid = SENSE_LENGTH; 18381 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18382 uscmd->uscsi_timeout = sd_io_time; 18383 18384 /* 18385 * Allocate an sd_uscsi_info struct and fill it with the info 18386 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18387 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18388 * since we allocate the buf here in this function, we do not 18389 * need to preserve the prior contents of b_private. 18390 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18391 */ 18392 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18393 uip->ui_flags = SD_PATH_DIRECT; 18394 uip->ui_cmdp = uscmd; 18395 18396 bp = getrbuf(KM_SLEEP); 18397 bp->b_private = uip; 18398 18399 /* 18400 * Setup buffer to carry uscsi request. 18401 */ 18402 bp->b_flags = B_BUSY; 18403 bp->b_bcount = 0; 18404 bp->b_blkno = 0; 18405 18406 if (dkc != NULL) { 18407 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18408 uip->ui_dkc = *dkc; 18409 } 18410 18411 bp->b_edev = SD_GET_DEV(un); 18412 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18413 18414 (void) sd_uscsi_strategy(bp); 18415 18416 /* 18417 * If synchronous request, wait for completion 18418 * If async just return and let b_iodone callback 18419 * cleanup. 18420 * NOTE: On return, u_ncmds_in_driver will be decremented, 18421 * but it was also incremented in sd_uscsi_strategy(), so 18422 * we should be ok. 18423 */ 18424 if (dkc == NULL) { 18425 (void) biowait(bp); 18426 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18427 } 18428 18429 return (rval); 18430 } 18431 18432 18433 static int 18434 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18435 { 18436 struct sd_uscsi_info *uip; 18437 struct uscsi_cmd *uscmd; 18438 uint8_t *sense_buf; 18439 struct sd_lun *un; 18440 int status; 18441 18442 uip = (struct sd_uscsi_info *)(bp->b_private); 18443 ASSERT(uip != NULL); 18444 18445 uscmd = uip->ui_cmdp; 18446 ASSERT(uscmd != NULL); 18447 18448 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18449 ASSERT(sense_buf != NULL); 18450 18451 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18452 ASSERT(un != NULL); 18453 18454 status = geterror(bp); 18455 switch (status) { 18456 case 0: 18457 break; /* Success! */ 18458 case EIO: 18459 switch (uscmd->uscsi_status) { 18460 case STATUS_RESERVATION_CONFLICT: 18461 /* Ignore reservation conflict */ 18462 status = 0; 18463 goto done; 18464 18465 case STATUS_CHECK: 18466 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18467 (scsi_sense_key(sense_buf) == 18468 KEY_ILLEGAL_REQUEST)) { 18469 /* Ignore Illegal Request error */ 18470 mutex_enter(SD_MUTEX(un)); 18471 un->un_f_sync_cache_supported = FALSE; 18472 mutex_exit(SD_MUTEX(un)); 18473 status = ENOTSUP; 18474 goto done; 18475 } 18476 break; 18477 default: 18478 break; 18479 } 18480 /* FALLTHRU */ 18481 default: 18482 /* 18483 * Don't log an error message if this device 18484 * has removable media. 18485 */ 18486 if (!un->un_f_has_removable_media) { 18487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18488 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18489 } 18490 break; 18491 } 18492 18493 done: 18494 if (uip->ui_dkc.dkc_callback != NULL) { 18495 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18496 } 18497 18498 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18499 freerbuf(bp); 18500 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18501 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18502 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18503 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18504 18505 return (status); 18506 } 18507 18508 18509 /* 18510 * Function: sd_send_scsi_GET_CONFIGURATION 18511 * 18512 * Description: Issues the get configuration command to the device. 18513 * Called from sd_check_for_writable_cd & sd_get_media_info 18514 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18515 * Arguments: un 18516 * ucmdbuf 18517 * rqbuf 18518 * rqbuflen 18519 * bufaddr 18520 * buflen 18521 * path_flag 18522 * 18523 * Return Code: 0 - Success 18524 * errno return code from sd_send_scsi_cmd() 18525 * 18526 * Context: Can sleep. Does not return until command is completed. 18527 * 18528 */ 18529 18530 static int 18531 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18532 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18533 int path_flag) 18534 { 18535 char cdb[CDB_GROUP1]; 18536 int status; 18537 18538 ASSERT(un != NULL); 18539 ASSERT(!mutex_owned(SD_MUTEX(un))); 18540 ASSERT(bufaddr != NULL); 18541 ASSERT(ucmdbuf != NULL); 18542 ASSERT(rqbuf != NULL); 18543 18544 SD_TRACE(SD_LOG_IO, un, 18545 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18546 18547 bzero(cdb, sizeof (cdb)); 18548 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18549 bzero(rqbuf, rqbuflen); 18550 bzero(bufaddr, buflen); 18551 18552 /* 18553 * Set up cdb field for the get configuration command. 18554 */ 18555 cdb[0] = SCMD_GET_CONFIGURATION; 18556 cdb[1] = 0x02; /* Requested Type */ 18557 cdb[8] = SD_PROFILE_HEADER_LEN; 18558 ucmdbuf->uscsi_cdb = cdb; 18559 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18560 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18561 ucmdbuf->uscsi_buflen = buflen; 18562 ucmdbuf->uscsi_timeout = sd_io_time; 18563 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18564 ucmdbuf->uscsi_rqlen = rqbuflen; 18565 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18566 18567 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18568 UIO_SYSSPACE, path_flag); 18569 18570 switch (status) { 18571 case 0: 18572 break; /* Success! */ 18573 case EIO: 18574 switch (ucmdbuf->uscsi_status) { 18575 case STATUS_RESERVATION_CONFLICT: 18576 status = EACCES; 18577 break; 18578 default: 18579 break; 18580 } 18581 break; 18582 default: 18583 break; 18584 } 18585 18586 if (status == 0) { 18587 SD_DUMP_MEMORY(un, SD_LOG_IO, 18588 "sd_send_scsi_GET_CONFIGURATION: data", 18589 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18590 } 18591 18592 SD_TRACE(SD_LOG_IO, un, 18593 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18594 18595 return (status); 18596 } 18597 18598 /* 18599 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18600 * 18601 * Description: Issues the get configuration command to the device to 18602 * retrieve a specfic feature. Called from 18603 * sd_check_for_writable_cd & sd_set_mmc_caps. 18604 * Arguments: un 18605 * ucmdbuf 18606 * rqbuf 18607 * rqbuflen 18608 * bufaddr 18609 * buflen 18610 * feature 18611 * 18612 * Return Code: 0 - Success 18613 * errno return code from sd_send_scsi_cmd() 18614 * 18615 * Context: Can sleep. Does not return until command is completed. 18616 * 18617 */ 18618 static int 18619 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18620 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18621 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18622 { 18623 char cdb[CDB_GROUP1]; 18624 int status; 18625 18626 ASSERT(un != NULL); 18627 ASSERT(!mutex_owned(SD_MUTEX(un))); 18628 ASSERT(bufaddr != NULL); 18629 ASSERT(ucmdbuf != NULL); 18630 ASSERT(rqbuf != NULL); 18631 18632 SD_TRACE(SD_LOG_IO, un, 18633 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18634 18635 bzero(cdb, sizeof (cdb)); 18636 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18637 bzero(rqbuf, rqbuflen); 18638 bzero(bufaddr, buflen); 18639 18640 /* 18641 * Set up cdb field for the get configuration command. 18642 */ 18643 cdb[0] = SCMD_GET_CONFIGURATION; 18644 cdb[1] = 0x02; /* Requested Type */ 18645 cdb[3] = feature; 18646 cdb[8] = buflen; 18647 ucmdbuf->uscsi_cdb = cdb; 18648 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18649 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18650 ucmdbuf->uscsi_buflen = buflen; 18651 ucmdbuf->uscsi_timeout = sd_io_time; 18652 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18653 ucmdbuf->uscsi_rqlen = rqbuflen; 18654 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18655 18656 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18657 UIO_SYSSPACE, path_flag); 18658 18659 switch (status) { 18660 case 0: 18661 break; /* Success! */ 18662 case EIO: 18663 switch (ucmdbuf->uscsi_status) { 18664 case STATUS_RESERVATION_CONFLICT: 18665 status = EACCES; 18666 break; 18667 default: 18668 break; 18669 } 18670 break; 18671 default: 18672 break; 18673 } 18674 18675 if (status == 0) { 18676 SD_DUMP_MEMORY(un, SD_LOG_IO, 18677 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18678 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18679 } 18680 18681 SD_TRACE(SD_LOG_IO, un, 18682 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18683 18684 return (status); 18685 } 18686 18687 18688 /* 18689 * Function: sd_send_scsi_MODE_SENSE 18690 * 18691 * Description: Utility function for issuing a scsi MODE SENSE command. 18692 * Note: This routine uses a consistent implementation for Group0, 18693 * Group1, and Group2 commands across all platforms. ATAPI devices 18694 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18695 * 18696 * Arguments: un - pointer to the softstate struct for the target. 18697 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18698 * CDB_GROUP[1|2] (10 byte). 18699 * bufaddr - buffer for page data retrieved from the target. 18700 * buflen - size of page to be retrieved. 18701 * page_code - page code of data to be retrieved from the target. 18702 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18703 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18704 * to use the USCSI "direct" chain and bypass the normal 18705 * command waitq. 18706 * 18707 * Return Code: 0 - Success 18708 * errno return code from sd_send_scsi_cmd() 18709 * 18710 * Context: Can sleep. Does not return until command is completed. 18711 */ 18712 18713 static int 18714 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18715 size_t buflen, uchar_t page_code, int path_flag) 18716 { 18717 struct scsi_extended_sense sense_buf; 18718 union scsi_cdb cdb; 18719 struct uscsi_cmd ucmd_buf; 18720 int status; 18721 int headlen; 18722 18723 ASSERT(un != NULL); 18724 ASSERT(!mutex_owned(SD_MUTEX(un))); 18725 ASSERT(bufaddr != NULL); 18726 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18727 (cdbsize == CDB_GROUP2)); 18728 18729 SD_TRACE(SD_LOG_IO, un, 18730 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18731 18732 bzero(&cdb, sizeof (cdb)); 18733 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18734 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18735 bzero(bufaddr, buflen); 18736 18737 if (cdbsize == CDB_GROUP0) { 18738 cdb.scc_cmd = SCMD_MODE_SENSE; 18739 cdb.cdb_opaque[2] = page_code; 18740 FORMG0COUNT(&cdb, buflen); 18741 headlen = MODE_HEADER_LENGTH; 18742 } else { 18743 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18744 cdb.cdb_opaque[2] = page_code; 18745 FORMG1COUNT(&cdb, buflen); 18746 headlen = MODE_HEADER_LENGTH_GRP2; 18747 } 18748 18749 ASSERT(headlen <= buflen); 18750 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18751 18752 ucmd_buf.uscsi_cdb = (char *)&cdb; 18753 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18754 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18755 ucmd_buf.uscsi_buflen = buflen; 18756 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18757 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18758 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18759 ucmd_buf.uscsi_timeout = 60; 18760 18761 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18762 UIO_SYSSPACE, path_flag); 18763 18764 switch (status) { 18765 case 0: 18766 /* 18767 * sr_check_wp() uses 0x3f page code and check the header of 18768 * mode page to determine if target device is write-protected. 18769 * But some USB devices return 0 bytes for 0x3f page code. For 18770 * this case, make sure that mode page header is returned at 18771 * least. 18772 */ 18773 if (buflen - ucmd_buf.uscsi_resid < headlen) 18774 status = EIO; 18775 break; /* Success! */ 18776 case EIO: 18777 switch (ucmd_buf.uscsi_status) { 18778 case STATUS_RESERVATION_CONFLICT: 18779 status = EACCES; 18780 break; 18781 default: 18782 break; 18783 } 18784 break; 18785 default: 18786 break; 18787 } 18788 18789 if (status == 0) { 18790 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18791 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18792 } 18793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18794 18795 return (status); 18796 } 18797 18798 18799 /* 18800 * Function: sd_send_scsi_MODE_SELECT 18801 * 18802 * Description: Utility function for issuing a scsi MODE SELECT command. 18803 * Note: This routine uses a consistent implementation for Group0, 18804 * Group1, and Group2 commands across all platforms. ATAPI devices 18805 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18806 * 18807 * Arguments: un - pointer to the softstate struct for the target. 18808 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18809 * CDB_GROUP[1|2] (10 byte). 18810 * bufaddr - buffer for page data retrieved from the target. 18811 * buflen - size of page to be retrieved. 18812 * save_page - boolean to determin if SP bit should be set. 18813 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18814 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18815 * to use the USCSI "direct" chain and bypass the normal 18816 * command waitq. 18817 * 18818 * Return Code: 0 - Success 18819 * errno return code from sd_send_scsi_cmd() 18820 * 18821 * Context: Can sleep. Does not return until command is completed. 18822 */ 18823 18824 static int 18825 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18826 size_t buflen, uchar_t save_page, int path_flag) 18827 { 18828 struct scsi_extended_sense sense_buf; 18829 union scsi_cdb cdb; 18830 struct uscsi_cmd ucmd_buf; 18831 int status; 18832 18833 ASSERT(un != NULL); 18834 ASSERT(!mutex_owned(SD_MUTEX(un))); 18835 ASSERT(bufaddr != NULL); 18836 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18837 (cdbsize == CDB_GROUP2)); 18838 18839 SD_TRACE(SD_LOG_IO, un, 18840 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18841 18842 bzero(&cdb, sizeof (cdb)); 18843 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18844 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18845 18846 /* Set the PF bit for many third party drives */ 18847 cdb.cdb_opaque[1] = 0x10; 18848 18849 /* Set the savepage(SP) bit if given */ 18850 if (save_page == SD_SAVE_PAGE) { 18851 cdb.cdb_opaque[1] |= 0x01; 18852 } 18853 18854 if (cdbsize == CDB_GROUP0) { 18855 cdb.scc_cmd = SCMD_MODE_SELECT; 18856 FORMG0COUNT(&cdb, buflen); 18857 } else { 18858 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18859 FORMG1COUNT(&cdb, buflen); 18860 } 18861 18862 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18863 18864 ucmd_buf.uscsi_cdb = (char *)&cdb; 18865 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18866 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18867 ucmd_buf.uscsi_buflen = buflen; 18868 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18869 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18870 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18871 ucmd_buf.uscsi_timeout = 60; 18872 18873 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18874 UIO_SYSSPACE, path_flag); 18875 18876 switch (status) { 18877 case 0: 18878 break; /* Success! */ 18879 case EIO: 18880 switch (ucmd_buf.uscsi_status) { 18881 case STATUS_RESERVATION_CONFLICT: 18882 status = EACCES; 18883 break; 18884 default: 18885 break; 18886 } 18887 break; 18888 default: 18889 break; 18890 } 18891 18892 if (status == 0) { 18893 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18894 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18895 } 18896 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18897 18898 return (status); 18899 } 18900 18901 18902 /* 18903 * Function: sd_send_scsi_RDWR 18904 * 18905 * Description: Issue a scsi READ or WRITE command with the given parameters. 18906 * 18907 * Arguments: un: Pointer to the sd_lun struct for the target. 18908 * cmd: SCMD_READ or SCMD_WRITE 18909 * bufaddr: Address of caller's buffer to receive the RDWR data 18910 * buflen: Length of caller's buffer receive the RDWR data. 18911 * start_block: Block number for the start of the RDWR operation. 18912 * (Assumes target-native block size.) 18913 * residp: Pointer to variable to receive the redisual of the 18914 * RDWR operation (may be NULL of no residual requested). 18915 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18916 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18917 * to use the USCSI "direct" chain and bypass the normal 18918 * command waitq. 18919 * 18920 * Return Code: 0 - Success 18921 * errno return code from sd_send_scsi_cmd() 18922 * 18923 * Context: Can sleep. Does not return until command is completed. 18924 */ 18925 18926 static int 18927 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 18928 size_t buflen, daddr_t start_block, int path_flag) 18929 { 18930 struct scsi_extended_sense sense_buf; 18931 union scsi_cdb cdb; 18932 struct uscsi_cmd ucmd_buf; 18933 uint32_t block_count; 18934 int status; 18935 int cdbsize; 18936 uchar_t flag; 18937 18938 ASSERT(un != NULL); 18939 ASSERT(!mutex_owned(SD_MUTEX(un))); 18940 ASSERT(bufaddr != NULL); 18941 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 18942 18943 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 18944 18945 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 18946 return (EINVAL); 18947 } 18948 18949 mutex_enter(SD_MUTEX(un)); 18950 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 18951 mutex_exit(SD_MUTEX(un)); 18952 18953 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 18954 18955 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 18956 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 18957 bufaddr, buflen, start_block, block_count); 18958 18959 bzero(&cdb, sizeof (cdb)); 18960 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18961 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18962 18963 /* Compute CDB size to use */ 18964 if (start_block > 0xffffffff) 18965 cdbsize = CDB_GROUP4; 18966 else if ((start_block & 0xFFE00000) || 18967 (un->un_f_cfg_is_atapi == TRUE)) 18968 cdbsize = CDB_GROUP1; 18969 else 18970 cdbsize = CDB_GROUP0; 18971 18972 switch (cdbsize) { 18973 case CDB_GROUP0: /* 6-byte CDBs */ 18974 cdb.scc_cmd = cmd; 18975 FORMG0ADDR(&cdb, start_block); 18976 FORMG0COUNT(&cdb, block_count); 18977 break; 18978 case CDB_GROUP1: /* 10-byte CDBs */ 18979 cdb.scc_cmd = cmd | SCMD_GROUP1; 18980 FORMG1ADDR(&cdb, start_block); 18981 FORMG1COUNT(&cdb, block_count); 18982 break; 18983 case CDB_GROUP4: /* 16-byte CDBs */ 18984 cdb.scc_cmd = cmd | SCMD_GROUP4; 18985 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 18986 FORMG4COUNT(&cdb, block_count); 18987 break; 18988 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 18989 default: 18990 /* All others reserved */ 18991 return (EINVAL); 18992 } 18993 18994 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 18995 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18996 18997 ucmd_buf.uscsi_cdb = (char *)&cdb; 18998 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18999 ucmd_buf.uscsi_bufaddr = bufaddr; 19000 ucmd_buf.uscsi_buflen = buflen; 19001 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19002 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19003 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19004 ucmd_buf.uscsi_timeout = 60; 19005 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19006 UIO_SYSSPACE, path_flag); 19007 switch (status) { 19008 case 0: 19009 break; /* Success! */ 19010 case EIO: 19011 switch (ucmd_buf.uscsi_status) { 19012 case STATUS_RESERVATION_CONFLICT: 19013 status = EACCES; 19014 break; 19015 default: 19016 break; 19017 } 19018 break; 19019 default: 19020 break; 19021 } 19022 19023 if (status == 0) { 19024 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19025 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19026 } 19027 19028 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19029 19030 return (status); 19031 } 19032 19033 19034 /* 19035 * Function: sd_send_scsi_LOG_SENSE 19036 * 19037 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19038 * 19039 * Arguments: un: Pointer to the sd_lun struct for the target. 19040 * 19041 * Return Code: 0 - Success 19042 * errno return code from sd_send_scsi_cmd() 19043 * 19044 * Context: Can sleep. Does not return until command is completed. 19045 */ 19046 19047 static int 19048 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19049 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19050 int path_flag) 19051 19052 { 19053 struct scsi_extended_sense sense_buf; 19054 union scsi_cdb cdb; 19055 struct uscsi_cmd ucmd_buf; 19056 int status; 19057 19058 ASSERT(un != NULL); 19059 ASSERT(!mutex_owned(SD_MUTEX(un))); 19060 19061 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19062 19063 bzero(&cdb, sizeof (cdb)); 19064 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19065 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19066 19067 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19068 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19069 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19070 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19071 FORMG1COUNT(&cdb, buflen); 19072 19073 ucmd_buf.uscsi_cdb = (char *)&cdb; 19074 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19075 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19076 ucmd_buf.uscsi_buflen = buflen; 19077 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19078 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19079 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19080 ucmd_buf.uscsi_timeout = 60; 19081 19082 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19083 UIO_SYSSPACE, path_flag); 19084 19085 switch (status) { 19086 case 0: 19087 break; 19088 case EIO: 19089 switch (ucmd_buf.uscsi_status) { 19090 case STATUS_RESERVATION_CONFLICT: 19091 status = EACCES; 19092 break; 19093 case STATUS_CHECK: 19094 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19095 (scsi_sense_key((uint8_t *)&sense_buf) == 19096 KEY_ILLEGAL_REQUEST) && 19097 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19098 /* 19099 * ASC 0x24: INVALID FIELD IN CDB 19100 */ 19101 switch (page_code) { 19102 case START_STOP_CYCLE_PAGE: 19103 /* 19104 * The start stop cycle counter is 19105 * implemented as page 0x31 in earlier 19106 * generation disks. In new generation 19107 * disks the start stop cycle counter is 19108 * implemented as page 0xE. To properly 19109 * handle this case if an attempt for 19110 * log page 0xE is made and fails we 19111 * will try again using page 0x31. 19112 * 19113 * Network storage BU committed to 19114 * maintain the page 0x31 for this 19115 * purpose and will not have any other 19116 * page implemented with page code 0x31 19117 * until all disks transition to the 19118 * standard page. 19119 */ 19120 mutex_enter(SD_MUTEX(un)); 19121 un->un_start_stop_cycle_page = 19122 START_STOP_CYCLE_VU_PAGE; 19123 cdb.cdb_opaque[2] = 19124 (char)(page_control << 6) | 19125 un->un_start_stop_cycle_page; 19126 mutex_exit(SD_MUTEX(un)); 19127 status = sd_send_scsi_cmd( 19128 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19129 UIO_SYSSPACE, path_flag); 19130 19131 break; 19132 case TEMPERATURE_PAGE: 19133 status = ENOTTY; 19134 break; 19135 default: 19136 break; 19137 } 19138 } 19139 break; 19140 default: 19141 break; 19142 } 19143 break; 19144 default: 19145 break; 19146 } 19147 19148 if (status == 0) { 19149 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19150 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19151 } 19152 19153 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19154 19155 return (status); 19156 } 19157 19158 19159 /* 19160 * Function: sdioctl 19161 * 19162 * Description: Driver's ioctl(9e) entry point function. 19163 * 19164 * Arguments: dev - device number 19165 * cmd - ioctl operation to be performed 19166 * arg - user argument, contains data to be set or reference 19167 * parameter for get 19168 * flag - bit flag, indicating open settings, 32/64 bit type 19169 * cred_p - user credential pointer 19170 * rval_p - calling process return value (OPT) 19171 * 19172 * Return Code: EINVAL 19173 * ENOTTY 19174 * ENXIO 19175 * EIO 19176 * EFAULT 19177 * ENOTSUP 19178 * EPERM 19179 * 19180 * Context: Called from the device switch at normal priority. 19181 */ 19182 19183 static int 19184 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19185 { 19186 struct sd_lun *un = NULL; 19187 int err = 0; 19188 int i = 0; 19189 cred_t *cr; 19190 int tmprval = EINVAL; 19191 int is_valid; 19192 19193 /* 19194 * All device accesses go thru sdstrategy where we check on suspend 19195 * status 19196 */ 19197 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19198 return (ENXIO); 19199 } 19200 19201 ASSERT(!mutex_owned(SD_MUTEX(un))); 19202 19203 19204 is_valid = SD_IS_VALID_LABEL(un); 19205 19206 /* 19207 * Moved this wait from sd_uscsi_strategy to here for 19208 * reasons of deadlock prevention. Internal driver commands, 19209 * specifically those to change a devices power level, result 19210 * in a call to sd_uscsi_strategy. 19211 */ 19212 mutex_enter(SD_MUTEX(un)); 19213 while ((un->un_state == SD_STATE_SUSPENDED) || 19214 (un->un_state == SD_STATE_PM_CHANGING)) { 19215 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19216 } 19217 /* 19218 * Twiddling the counter here protects commands from now 19219 * through to the top of sd_uscsi_strategy. Without the 19220 * counter inc. a power down, for example, could get in 19221 * after the above check for state is made and before 19222 * execution gets to the top of sd_uscsi_strategy. 19223 * That would cause problems. 19224 */ 19225 un->un_ncmds_in_driver++; 19226 19227 if (!is_valid && 19228 (flag & (FNDELAY | FNONBLOCK))) { 19229 switch (cmd) { 19230 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19231 case DKIOCGVTOC: 19232 case DKIOCGAPART: 19233 case DKIOCPARTINFO: 19234 case DKIOCSGEOM: 19235 case DKIOCSAPART: 19236 case DKIOCGETEFI: 19237 case DKIOCPARTITION: 19238 case DKIOCSVTOC: 19239 case DKIOCSETEFI: 19240 case DKIOCGMBOOT: 19241 case DKIOCSMBOOT: 19242 case DKIOCG_PHYGEOM: 19243 case DKIOCG_VIRTGEOM: 19244 /* let cmlb handle it */ 19245 goto skip_ready_valid; 19246 19247 case CDROMPAUSE: 19248 case CDROMRESUME: 19249 case CDROMPLAYMSF: 19250 case CDROMPLAYTRKIND: 19251 case CDROMREADTOCHDR: 19252 case CDROMREADTOCENTRY: 19253 case CDROMSTOP: 19254 case CDROMSTART: 19255 case CDROMVOLCTRL: 19256 case CDROMSUBCHNL: 19257 case CDROMREADMODE2: 19258 case CDROMREADMODE1: 19259 case CDROMREADOFFSET: 19260 case CDROMSBLKMODE: 19261 case CDROMGBLKMODE: 19262 case CDROMGDRVSPEED: 19263 case CDROMSDRVSPEED: 19264 case CDROMCDDA: 19265 case CDROMCDXA: 19266 case CDROMSUBCODE: 19267 if (!ISCD(un)) { 19268 un->un_ncmds_in_driver--; 19269 ASSERT(un->un_ncmds_in_driver >= 0); 19270 mutex_exit(SD_MUTEX(un)); 19271 return (ENOTTY); 19272 } 19273 break; 19274 case FDEJECT: 19275 case DKIOCEJECT: 19276 case CDROMEJECT: 19277 if (!un->un_f_eject_media_supported) { 19278 un->un_ncmds_in_driver--; 19279 ASSERT(un->un_ncmds_in_driver >= 0); 19280 mutex_exit(SD_MUTEX(un)); 19281 return (ENOTTY); 19282 } 19283 break; 19284 case DKIOCFLUSHWRITECACHE: 19285 mutex_exit(SD_MUTEX(un)); 19286 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19287 if (err != 0) { 19288 mutex_enter(SD_MUTEX(un)); 19289 un->un_ncmds_in_driver--; 19290 ASSERT(un->un_ncmds_in_driver >= 0); 19291 mutex_exit(SD_MUTEX(un)); 19292 return (EIO); 19293 } 19294 mutex_enter(SD_MUTEX(un)); 19295 /* FALLTHROUGH */ 19296 case DKIOCREMOVABLE: 19297 case DKIOCHOTPLUGGABLE: 19298 case DKIOCINFO: 19299 case DKIOCGMEDIAINFO: 19300 case MHIOCENFAILFAST: 19301 case MHIOCSTATUS: 19302 case MHIOCTKOWN: 19303 case MHIOCRELEASE: 19304 case MHIOCGRP_INKEYS: 19305 case MHIOCGRP_INRESV: 19306 case MHIOCGRP_REGISTER: 19307 case MHIOCGRP_RESERVE: 19308 case MHIOCGRP_PREEMPTANDABORT: 19309 case MHIOCGRP_REGISTERANDIGNOREKEY: 19310 case CDROMCLOSETRAY: 19311 case USCSICMD: 19312 goto skip_ready_valid; 19313 default: 19314 break; 19315 } 19316 19317 mutex_exit(SD_MUTEX(un)); 19318 err = sd_ready_and_valid(un); 19319 mutex_enter(SD_MUTEX(un)); 19320 19321 if (err != SD_READY_VALID) { 19322 switch (cmd) { 19323 case DKIOCSTATE: 19324 case CDROMGDRVSPEED: 19325 case CDROMSDRVSPEED: 19326 case FDEJECT: /* for eject command */ 19327 case DKIOCEJECT: 19328 case CDROMEJECT: 19329 case DKIOCREMOVABLE: 19330 case DKIOCHOTPLUGGABLE: 19331 break; 19332 default: 19333 if (un->un_f_has_removable_media) { 19334 err = ENXIO; 19335 } else { 19336 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19337 if (err == SD_RESERVED_BY_OTHERS) { 19338 err = EACCES; 19339 } else { 19340 err = EIO; 19341 } 19342 } 19343 un->un_ncmds_in_driver--; 19344 ASSERT(un->un_ncmds_in_driver >= 0); 19345 mutex_exit(SD_MUTEX(un)); 19346 return (err); 19347 } 19348 } 19349 } 19350 19351 skip_ready_valid: 19352 mutex_exit(SD_MUTEX(un)); 19353 19354 switch (cmd) { 19355 case DKIOCINFO: 19356 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19357 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19358 break; 19359 19360 case DKIOCGMEDIAINFO: 19361 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19362 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19363 break; 19364 19365 case DKIOCGGEOM: 19366 case DKIOCGVTOC: 19367 case DKIOCGAPART: 19368 case DKIOCPARTINFO: 19369 case DKIOCSGEOM: 19370 case DKIOCSAPART: 19371 case DKIOCGETEFI: 19372 case DKIOCPARTITION: 19373 case DKIOCSVTOC: 19374 case DKIOCSETEFI: 19375 case DKIOCGMBOOT: 19376 case DKIOCSMBOOT: 19377 case DKIOCG_PHYGEOM: 19378 case DKIOCG_VIRTGEOM: 19379 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19380 19381 /* TUR should spin up */ 19382 19383 if (un->un_f_has_removable_media) 19384 err = sd_send_scsi_TEST_UNIT_READY(un, 19385 SD_CHECK_FOR_MEDIA); 19386 else 19387 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19388 19389 if (err != 0) 19390 break; 19391 19392 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19393 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19394 19395 if ((err == 0) && 19396 ((cmd == DKIOCSETEFI) || 19397 (un->un_f_pkstats_enabled) && 19398 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19399 19400 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19401 (void *)SD_PATH_DIRECT); 19402 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19403 sd_set_pstats(un); 19404 SD_TRACE(SD_LOG_IO_PARTITION, un, 19405 "sd_ioctl: un:0x%p pstats created and " 19406 "set\n", un); 19407 } 19408 } 19409 19410 if ((cmd == DKIOCSVTOC) || 19411 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19412 19413 mutex_enter(SD_MUTEX(un)); 19414 if (un->un_f_devid_supported && 19415 (un->un_f_opt_fab_devid == TRUE)) { 19416 if (un->un_devid == NULL) { 19417 sd_register_devid(un, SD_DEVINFO(un), 19418 SD_TARGET_IS_UNRESERVED); 19419 } else { 19420 /* 19421 * The device id for this disk 19422 * has been fabricated. The 19423 * device id must be preserved 19424 * by writing it back out to 19425 * disk. 19426 */ 19427 if (sd_write_deviceid(un) != 0) { 19428 ddi_devid_free(un->un_devid); 19429 un->un_devid = NULL; 19430 } 19431 } 19432 } 19433 mutex_exit(SD_MUTEX(un)); 19434 } 19435 19436 break; 19437 19438 case DKIOCLOCK: 19439 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19440 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19441 SD_PATH_STANDARD); 19442 break; 19443 19444 case DKIOCUNLOCK: 19445 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19446 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19447 SD_PATH_STANDARD); 19448 break; 19449 19450 case DKIOCSTATE: { 19451 enum dkio_state state; 19452 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19453 19454 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19455 err = EFAULT; 19456 } else { 19457 err = sd_check_media(dev, state); 19458 if (err == 0) { 19459 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19460 sizeof (int), flag) != 0) 19461 err = EFAULT; 19462 } 19463 } 19464 break; 19465 } 19466 19467 case DKIOCREMOVABLE: 19468 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19469 i = un->un_f_has_removable_media ? 1 : 0; 19470 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19471 err = EFAULT; 19472 } else { 19473 err = 0; 19474 } 19475 break; 19476 19477 case DKIOCHOTPLUGGABLE: 19478 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19479 i = un->un_f_is_hotpluggable ? 1 : 0; 19480 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19481 err = EFAULT; 19482 } else { 19483 err = 0; 19484 } 19485 break; 19486 19487 case DKIOCGTEMPERATURE: 19488 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19489 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19490 break; 19491 19492 case MHIOCENFAILFAST: 19493 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19494 if ((err = drv_priv(cred_p)) == 0) { 19495 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19496 } 19497 break; 19498 19499 case MHIOCTKOWN: 19500 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19501 if ((err = drv_priv(cred_p)) == 0) { 19502 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19503 } 19504 break; 19505 19506 case MHIOCRELEASE: 19507 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19508 if ((err = drv_priv(cred_p)) == 0) { 19509 err = sd_mhdioc_release(dev); 19510 } 19511 break; 19512 19513 case MHIOCSTATUS: 19514 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19515 if ((err = drv_priv(cred_p)) == 0) { 19516 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19517 case 0: 19518 err = 0; 19519 break; 19520 case EACCES: 19521 *rval_p = 1; 19522 err = 0; 19523 break; 19524 default: 19525 err = EIO; 19526 break; 19527 } 19528 } 19529 break; 19530 19531 case MHIOCQRESERVE: 19532 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19533 if ((err = drv_priv(cred_p)) == 0) { 19534 err = sd_reserve_release(dev, SD_RESERVE); 19535 } 19536 break; 19537 19538 case MHIOCREREGISTERDEVID: 19539 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19540 if (drv_priv(cred_p) == EPERM) { 19541 err = EPERM; 19542 } else if (!un->un_f_devid_supported) { 19543 err = ENOTTY; 19544 } else { 19545 err = sd_mhdioc_register_devid(dev); 19546 } 19547 break; 19548 19549 case MHIOCGRP_INKEYS: 19550 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19551 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19552 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19553 err = ENOTSUP; 19554 } else { 19555 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19556 flag); 19557 } 19558 } 19559 break; 19560 19561 case MHIOCGRP_INRESV: 19562 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19563 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19564 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19565 err = ENOTSUP; 19566 } else { 19567 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19568 } 19569 } 19570 break; 19571 19572 case MHIOCGRP_REGISTER: 19573 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19574 if ((err = drv_priv(cred_p)) != EPERM) { 19575 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19576 err = ENOTSUP; 19577 } else if (arg != NULL) { 19578 mhioc_register_t reg; 19579 if (ddi_copyin((void *)arg, ®, 19580 sizeof (mhioc_register_t), flag) != 0) { 19581 err = EFAULT; 19582 } else { 19583 err = 19584 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19585 un, SD_SCSI3_REGISTER, 19586 (uchar_t *)®); 19587 } 19588 } 19589 } 19590 break; 19591 19592 case MHIOCGRP_RESERVE: 19593 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19594 if ((err = drv_priv(cred_p)) != EPERM) { 19595 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19596 err = ENOTSUP; 19597 } else if (arg != NULL) { 19598 mhioc_resv_desc_t resv_desc; 19599 if (ddi_copyin((void *)arg, &resv_desc, 19600 sizeof (mhioc_resv_desc_t), flag) != 0) { 19601 err = EFAULT; 19602 } else { 19603 err = 19604 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19605 un, SD_SCSI3_RESERVE, 19606 (uchar_t *)&resv_desc); 19607 } 19608 } 19609 } 19610 break; 19611 19612 case MHIOCGRP_PREEMPTANDABORT: 19613 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19614 if ((err = drv_priv(cred_p)) != EPERM) { 19615 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19616 err = ENOTSUP; 19617 } else if (arg != NULL) { 19618 mhioc_preemptandabort_t preempt_abort; 19619 if (ddi_copyin((void *)arg, &preempt_abort, 19620 sizeof (mhioc_preemptandabort_t), 19621 flag) != 0) { 19622 err = EFAULT; 19623 } else { 19624 err = 19625 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19626 un, SD_SCSI3_PREEMPTANDABORT, 19627 (uchar_t *)&preempt_abort); 19628 } 19629 } 19630 } 19631 break; 19632 19633 case MHIOCGRP_REGISTERANDIGNOREKEY: 19634 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19635 if ((err = drv_priv(cred_p)) != EPERM) { 19636 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19637 err = ENOTSUP; 19638 } else if (arg != NULL) { 19639 mhioc_registerandignorekey_t r_and_i; 19640 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19641 sizeof (mhioc_registerandignorekey_t), 19642 flag) != 0) { 19643 err = EFAULT; 19644 } else { 19645 err = 19646 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19647 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19648 (uchar_t *)&r_and_i); 19649 } 19650 } 19651 } 19652 break; 19653 19654 case USCSICMD: 19655 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19656 cr = ddi_get_cred(); 19657 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19658 err = EPERM; 19659 } else { 19660 enum uio_seg uioseg; 19661 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19662 UIO_USERSPACE; 19663 if (un->un_f_format_in_progress == TRUE) { 19664 err = EAGAIN; 19665 break; 19666 } 19667 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19668 flag, uioseg, SD_PATH_STANDARD); 19669 } 19670 break; 19671 19672 case CDROMPAUSE: 19673 case CDROMRESUME: 19674 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19675 if (!ISCD(un)) { 19676 err = ENOTTY; 19677 } else { 19678 err = sr_pause_resume(dev, cmd); 19679 } 19680 break; 19681 19682 case CDROMPLAYMSF: 19683 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19684 if (!ISCD(un)) { 19685 err = ENOTTY; 19686 } else { 19687 err = sr_play_msf(dev, (caddr_t)arg, flag); 19688 } 19689 break; 19690 19691 case CDROMPLAYTRKIND: 19692 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19693 #if defined(__i386) || defined(__amd64) 19694 /* 19695 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19696 */ 19697 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19698 #else 19699 if (!ISCD(un)) { 19700 #endif 19701 err = ENOTTY; 19702 } else { 19703 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19704 } 19705 break; 19706 19707 case CDROMREADTOCHDR: 19708 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19709 if (!ISCD(un)) { 19710 err = ENOTTY; 19711 } else { 19712 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19713 } 19714 break; 19715 19716 case CDROMREADTOCENTRY: 19717 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19718 if (!ISCD(un)) { 19719 err = ENOTTY; 19720 } else { 19721 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19722 } 19723 break; 19724 19725 case CDROMSTOP: 19726 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19727 if (!ISCD(un)) { 19728 err = ENOTTY; 19729 } else { 19730 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19731 SD_PATH_STANDARD); 19732 } 19733 break; 19734 19735 case CDROMSTART: 19736 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19737 if (!ISCD(un)) { 19738 err = ENOTTY; 19739 } else { 19740 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19741 SD_PATH_STANDARD); 19742 } 19743 break; 19744 19745 case CDROMCLOSETRAY: 19746 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19747 if (!ISCD(un)) { 19748 err = ENOTTY; 19749 } else { 19750 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19751 SD_PATH_STANDARD); 19752 } 19753 break; 19754 19755 case FDEJECT: /* for eject command */ 19756 case DKIOCEJECT: 19757 case CDROMEJECT: 19758 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19759 if (!un->un_f_eject_media_supported) { 19760 err = ENOTTY; 19761 } else { 19762 err = sr_eject(dev); 19763 } 19764 break; 19765 19766 case CDROMVOLCTRL: 19767 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19768 if (!ISCD(un)) { 19769 err = ENOTTY; 19770 } else { 19771 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19772 } 19773 break; 19774 19775 case CDROMSUBCHNL: 19776 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19777 if (!ISCD(un)) { 19778 err = ENOTTY; 19779 } else { 19780 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19781 } 19782 break; 19783 19784 case CDROMREADMODE2: 19785 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19786 if (!ISCD(un)) { 19787 err = ENOTTY; 19788 } else if (un->un_f_cfg_is_atapi == TRUE) { 19789 /* 19790 * If the drive supports READ CD, use that instead of 19791 * switching the LBA size via a MODE SELECT 19792 * Block Descriptor 19793 */ 19794 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19795 } else { 19796 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19797 } 19798 break; 19799 19800 case CDROMREADMODE1: 19801 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19802 if (!ISCD(un)) { 19803 err = ENOTTY; 19804 } else { 19805 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19806 } 19807 break; 19808 19809 case CDROMREADOFFSET: 19810 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19811 if (!ISCD(un)) { 19812 err = ENOTTY; 19813 } else { 19814 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19815 flag); 19816 } 19817 break; 19818 19819 case CDROMSBLKMODE: 19820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19821 /* 19822 * There is no means of changing block size in case of atapi 19823 * drives, thus return ENOTTY if drive type is atapi 19824 */ 19825 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19826 err = ENOTTY; 19827 } else if (un->un_f_mmc_cap == TRUE) { 19828 19829 /* 19830 * MMC Devices do not support changing the 19831 * logical block size 19832 * 19833 * Note: EINVAL is being returned instead of ENOTTY to 19834 * maintain consistancy with the original mmc 19835 * driver update. 19836 */ 19837 err = EINVAL; 19838 } else { 19839 mutex_enter(SD_MUTEX(un)); 19840 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19841 (un->un_ncmds_in_transport > 0)) { 19842 mutex_exit(SD_MUTEX(un)); 19843 err = EINVAL; 19844 } else { 19845 mutex_exit(SD_MUTEX(un)); 19846 err = sr_change_blkmode(dev, cmd, arg, flag); 19847 } 19848 } 19849 break; 19850 19851 case CDROMGBLKMODE: 19852 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19853 if (!ISCD(un)) { 19854 err = ENOTTY; 19855 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19856 (un->un_f_blockcount_is_valid != FALSE)) { 19857 /* 19858 * Drive is an ATAPI drive so return target block 19859 * size for ATAPI drives since we cannot change the 19860 * blocksize on ATAPI drives. Used primarily to detect 19861 * if an ATAPI cdrom is present. 19862 */ 19863 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19864 sizeof (int), flag) != 0) { 19865 err = EFAULT; 19866 } else { 19867 err = 0; 19868 } 19869 19870 } else { 19871 /* 19872 * Drive supports changing block sizes via a Mode 19873 * Select. 19874 */ 19875 err = sr_change_blkmode(dev, cmd, arg, flag); 19876 } 19877 break; 19878 19879 case CDROMGDRVSPEED: 19880 case CDROMSDRVSPEED: 19881 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19882 if (!ISCD(un)) { 19883 err = ENOTTY; 19884 } else if (un->un_f_mmc_cap == TRUE) { 19885 /* 19886 * Note: In the future the driver implementation 19887 * for getting and 19888 * setting cd speed should entail: 19889 * 1) If non-mmc try the Toshiba mode page 19890 * (sr_change_speed) 19891 * 2) If mmc but no support for Real Time Streaming try 19892 * the SET CD SPEED (0xBB) command 19893 * (sr_atapi_change_speed) 19894 * 3) If mmc and support for Real Time Streaming 19895 * try the GET PERFORMANCE and SET STREAMING 19896 * commands (not yet implemented, 4380808) 19897 */ 19898 /* 19899 * As per recent MMC spec, CD-ROM speed is variable 19900 * and changes with LBA. Since there is no such 19901 * things as drive speed now, fail this ioctl. 19902 * 19903 * Note: EINVAL is returned for consistancy of original 19904 * implementation which included support for getting 19905 * the drive speed of mmc devices but not setting 19906 * the drive speed. Thus EINVAL would be returned 19907 * if a set request was made for an mmc device. 19908 * We no longer support get or set speed for 19909 * mmc but need to remain consistant with regard 19910 * to the error code returned. 19911 */ 19912 err = EINVAL; 19913 } else if (un->un_f_cfg_is_atapi == TRUE) { 19914 err = sr_atapi_change_speed(dev, cmd, arg, flag); 19915 } else { 19916 err = sr_change_speed(dev, cmd, arg, flag); 19917 } 19918 break; 19919 19920 case CDROMCDDA: 19921 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 19922 if (!ISCD(un)) { 19923 err = ENOTTY; 19924 } else { 19925 err = sr_read_cdda(dev, (void *)arg, flag); 19926 } 19927 break; 19928 19929 case CDROMCDXA: 19930 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 19931 if (!ISCD(un)) { 19932 err = ENOTTY; 19933 } else { 19934 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 19935 } 19936 break; 19937 19938 case CDROMSUBCODE: 19939 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 19940 if (!ISCD(un)) { 19941 err = ENOTTY; 19942 } else { 19943 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 19944 } 19945 break; 19946 19947 19948 #ifdef SDDEBUG 19949 /* RESET/ABORTS testing ioctls */ 19950 case DKIOCRESET: { 19951 int reset_level; 19952 19953 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 19954 err = EFAULT; 19955 } else { 19956 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 19957 "reset_level = 0x%lx\n", reset_level); 19958 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 19959 err = 0; 19960 } else { 19961 err = EIO; 19962 } 19963 } 19964 break; 19965 } 19966 19967 case DKIOCABORT: 19968 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 19969 if (scsi_abort(SD_ADDRESS(un), NULL)) { 19970 err = 0; 19971 } else { 19972 err = EIO; 19973 } 19974 break; 19975 #endif 19976 19977 #ifdef SD_FAULT_INJECTION 19978 /* SDIOC FaultInjection testing ioctls */ 19979 case SDIOCSTART: 19980 case SDIOCSTOP: 19981 case SDIOCINSERTPKT: 19982 case SDIOCINSERTXB: 19983 case SDIOCINSERTUN: 19984 case SDIOCINSERTARQ: 19985 case SDIOCPUSH: 19986 case SDIOCRETRIEVE: 19987 case SDIOCRUN: 19988 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 19989 "SDIOC detected cmd:0x%X:\n", cmd); 19990 /* call error generator */ 19991 sd_faultinjection_ioctl(cmd, arg, un); 19992 err = 0; 19993 break; 19994 19995 #endif /* SD_FAULT_INJECTION */ 19996 19997 case DKIOCFLUSHWRITECACHE: 19998 { 19999 struct dk_callback *dkc = (struct dk_callback *)arg; 20000 20001 mutex_enter(SD_MUTEX(un)); 20002 if (!un->un_f_sync_cache_supported || 20003 !un->un_f_write_cache_enabled) { 20004 err = un->un_f_sync_cache_supported ? 20005 0 : ENOTSUP; 20006 mutex_exit(SD_MUTEX(un)); 20007 if ((flag & FKIOCTL) && dkc != NULL && 20008 dkc->dkc_callback != NULL) { 20009 (*dkc->dkc_callback)(dkc->dkc_cookie, 20010 err); 20011 /* 20012 * Did callback and reported error. 20013 * Since we did a callback, ioctl 20014 * should return 0. 20015 */ 20016 err = 0; 20017 } 20018 break; 20019 } 20020 mutex_exit(SD_MUTEX(un)); 20021 20022 if ((flag & FKIOCTL) && dkc != NULL && 20023 dkc->dkc_callback != NULL) { 20024 /* async SYNC CACHE request */ 20025 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20026 } else { 20027 /* synchronous SYNC CACHE request */ 20028 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20029 } 20030 } 20031 break; 20032 20033 case DKIOCGETWCE: { 20034 20035 int wce; 20036 20037 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20038 break; 20039 } 20040 20041 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20042 err = EFAULT; 20043 } 20044 break; 20045 } 20046 20047 case DKIOCSETWCE: { 20048 20049 int wce, sync_supported; 20050 20051 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20052 err = EFAULT; 20053 break; 20054 } 20055 20056 /* 20057 * Synchronize multiple threads trying to enable 20058 * or disable the cache via the un_f_wcc_cv 20059 * condition variable. 20060 */ 20061 mutex_enter(SD_MUTEX(un)); 20062 20063 /* 20064 * Don't allow the cache to be enabled if the 20065 * config file has it disabled. 20066 */ 20067 if (un->un_f_opt_disable_cache && wce) { 20068 mutex_exit(SD_MUTEX(un)); 20069 err = EINVAL; 20070 break; 20071 } 20072 20073 /* 20074 * Wait for write cache change in progress 20075 * bit to be clear before proceeding. 20076 */ 20077 while (un->un_f_wcc_inprog) 20078 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20079 20080 un->un_f_wcc_inprog = 1; 20081 20082 if (un->un_f_write_cache_enabled && wce == 0) { 20083 /* 20084 * Disable the write cache. Don't clear 20085 * un_f_write_cache_enabled until after 20086 * the mode select and flush are complete. 20087 */ 20088 sync_supported = un->un_f_sync_cache_supported; 20089 mutex_exit(SD_MUTEX(un)); 20090 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20091 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20092 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20093 } 20094 20095 mutex_enter(SD_MUTEX(un)); 20096 if (err == 0) { 20097 un->un_f_write_cache_enabled = 0; 20098 } 20099 20100 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20101 /* 20102 * Set un_f_write_cache_enabled first, so there is 20103 * no window where the cache is enabled, but the 20104 * bit says it isn't. 20105 */ 20106 un->un_f_write_cache_enabled = 1; 20107 mutex_exit(SD_MUTEX(un)); 20108 20109 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20110 SD_CACHE_ENABLE); 20111 20112 mutex_enter(SD_MUTEX(un)); 20113 20114 if (err) { 20115 un->un_f_write_cache_enabled = 0; 20116 } 20117 } 20118 20119 un->un_f_wcc_inprog = 0; 20120 cv_broadcast(&un->un_wcc_cv); 20121 mutex_exit(SD_MUTEX(un)); 20122 break; 20123 } 20124 20125 default: 20126 err = ENOTTY; 20127 break; 20128 } 20129 mutex_enter(SD_MUTEX(un)); 20130 un->un_ncmds_in_driver--; 20131 ASSERT(un->un_ncmds_in_driver >= 0); 20132 mutex_exit(SD_MUTEX(un)); 20133 20134 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20135 return (err); 20136 } 20137 20138 20139 /* 20140 * Function: sd_dkio_ctrl_info 20141 * 20142 * Description: This routine is the driver entry point for handling controller 20143 * information ioctl requests (DKIOCINFO). 20144 * 20145 * Arguments: dev - the device number 20146 * arg - pointer to user provided dk_cinfo structure 20147 * specifying the controller type and attributes. 20148 * flag - this argument is a pass through to ddi_copyxxx() 20149 * directly from the mode argument of ioctl(). 20150 * 20151 * Return Code: 0 20152 * EFAULT 20153 * ENXIO 20154 */ 20155 20156 static int 20157 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20158 { 20159 struct sd_lun *un = NULL; 20160 struct dk_cinfo *info; 20161 dev_info_t *pdip; 20162 int lun, tgt; 20163 20164 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20165 return (ENXIO); 20166 } 20167 20168 info = (struct dk_cinfo *) 20169 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20170 20171 switch (un->un_ctype) { 20172 case CTYPE_CDROM: 20173 info->dki_ctype = DKC_CDROM; 20174 break; 20175 default: 20176 info->dki_ctype = DKC_SCSI_CCS; 20177 break; 20178 } 20179 pdip = ddi_get_parent(SD_DEVINFO(un)); 20180 info->dki_cnum = ddi_get_instance(pdip); 20181 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20182 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20183 } else { 20184 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20185 DK_DEVLEN - 1); 20186 } 20187 20188 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20189 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20190 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20191 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20192 20193 /* Unit Information */ 20194 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20195 info->dki_slave = ((tgt << 3) | lun); 20196 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20197 DK_DEVLEN - 1); 20198 info->dki_flags = DKI_FMTVOL; 20199 info->dki_partition = SDPART(dev); 20200 20201 /* Max Transfer size of this device in blocks */ 20202 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20203 info->dki_addr = 0; 20204 info->dki_space = 0; 20205 info->dki_prio = 0; 20206 info->dki_vec = 0; 20207 20208 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20209 kmem_free(info, sizeof (struct dk_cinfo)); 20210 return (EFAULT); 20211 } else { 20212 kmem_free(info, sizeof (struct dk_cinfo)); 20213 return (0); 20214 } 20215 } 20216 20217 20218 /* 20219 * Function: sd_get_media_info 20220 * 20221 * Description: This routine is the driver entry point for handling ioctl 20222 * requests for the media type or command set profile used by the 20223 * drive to operate on the media (DKIOCGMEDIAINFO). 20224 * 20225 * Arguments: dev - the device number 20226 * arg - pointer to user provided dk_minfo structure 20227 * specifying the media type, logical block size and 20228 * drive capacity. 20229 * flag - this argument is a pass through to ddi_copyxxx() 20230 * directly from the mode argument of ioctl(). 20231 * 20232 * Return Code: 0 20233 * EACCESS 20234 * EFAULT 20235 * ENXIO 20236 * EIO 20237 */ 20238 20239 static int 20240 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20241 { 20242 struct sd_lun *un = NULL; 20243 struct uscsi_cmd com; 20244 struct scsi_inquiry *sinq; 20245 struct dk_minfo media_info; 20246 u_longlong_t media_capacity; 20247 uint64_t capacity; 20248 uint_t lbasize; 20249 uchar_t *out_data; 20250 uchar_t *rqbuf; 20251 int rval = 0; 20252 int rtn; 20253 20254 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20255 (un->un_state == SD_STATE_OFFLINE)) { 20256 return (ENXIO); 20257 } 20258 20259 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20260 20261 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20262 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20263 20264 /* Issue a TUR to determine if the drive is ready with media present */ 20265 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20266 if (rval == ENXIO) { 20267 goto done; 20268 } 20269 20270 /* Now get configuration data */ 20271 if (ISCD(un)) { 20272 media_info.dki_media_type = DK_CDROM; 20273 20274 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20275 if (un->un_f_mmc_cap == TRUE) { 20276 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20277 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20278 SD_PATH_STANDARD); 20279 20280 if (rtn) { 20281 /* 20282 * Failed for other than an illegal request 20283 * or command not supported 20284 */ 20285 if ((com.uscsi_status == STATUS_CHECK) && 20286 (com.uscsi_rqstatus == STATUS_GOOD)) { 20287 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20288 (rqbuf[12] != 0x20)) { 20289 rval = EIO; 20290 goto done; 20291 } 20292 } 20293 } else { 20294 /* 20295 * The GET CONFIGURATION command succeeded 20296 * so set the media type according to the 20297 * returned data 20298 */ 20299 media_info.dki_media_type = out_data[6]; 20300 media_info.dki_media_type <<= 8; 20301 media_info.dki_media_type |= out_data[7]; 20302 } 20303 } 20304 } else { 20305 /* 20306 * The profile list is not available, so we attempt to identify 20307 * the media type based on the inquiry data 20308 */ 20309 sinq = un->un_sd->sd_inq; 20310 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20311 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20312 /* This is a direct access device or optical disk */ 20313 media_info.dki_media_type = DK_FIXED_DISK; 20314 20315 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20316 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20317 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20318 media_info.dki_media_type = DK_ZIP; 20319 } else if ( 20320 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20321 media_info.dki_media_type = DK_JAZ; 20322 } 20323 } 20324 } else { 20325 /* 20326 * Not a CD, direct access or optical disk so return 20327 * unknown media 20328 */ 20329 media_info.dki_media_type = DK_UNKNOWN; 20330 } 20331 } 20332 20333 /* Now read the capacity so we can provide the lbasize and capacity */ 20334 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20335 SD_PATH_DIRECT)) { 20336 case 0: 20337 break; 20338 case EACCES: 20339 rval = EACCES; 20340 goto done; 20341 default: 20342 rval = EIO; 20343 goto done; 20344 } 20345 20346 media_info.dki_lbsize = lbasize; 20347 media_capacity = capacity; 20348 20349 /* 20350 * sd_send_scsi_READ_CAPACITY() reports capacity in 20351 * un->un_sys_blocksize chunks. So we need to convert it into 20352 * cap.lbasize chunks. 20353 */ 20354 media_capacity *= un->un_sys_blocksize; 20355 media_capacity /= lbasize; 20356 media_info.dki_capacity = media_capacity; 20357 20358 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20359 rval = EFAULT; 20360 /* Put goto. Anybody might add some code below in future */ 20361 goto done; 20362 } 20363 done: 20364 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20365 kmem_free(rqbuf, SENSE_LENGTH); 20366 return (rval); 20367 } 20368 20369 20370 /* 20371 * Function: sd_check_media 20372 * 20373 * Description: This utility routine implements the functionality for the 20374 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20375 * driver state changes from that specified by the user 20376 * (inserted or ejected). For example, if the user specifies 20377 * DKIO_EJECTED and the current media state is inserted this 20378 * routine will immediately return DKIO_INSERTED. However, if the 20379 * current media state is not inserted the user thread will be 20380 * blocked until the drive state changes. If DKIO_NONE is specified 20381 * the user thread will block until a drive state change occurs. 20382 * 20383 * Arguments: dev - the device number 20384 * state - user pointer to a dkio_state, updated with the current 20385 * drive state at return. 20386 * 20387 * Return Code: ENXIO 20388 * EIO 20389 * EAGAIN 20390 * EINTR 20391 */ 20392 20393 static int 20394 sd_check_media(dev_t dev, enum dkio_state state) 20395 { 20396 struct sd_lun *un = NULL; 20397 enum dkio_state prev_state; 20398 opaque_t token = NULL; 20399 int rval = 0; 20400 20401 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20402 return (ENXIO); 20403 } 20404 20405 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20406 20407 mutex_enter(SD_MUTEX(un)); 20408 20409 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20410 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20411 20412 prev_state = un->un_mediastate; 20413 20414 /* is there anything to do? */ 20415 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20416 /* 20417 * submit the request to the scsi_watch service; 20418 * scsi_media_watch_cb() does the real work 20419 */ 20420 mutex_exit(SD_MUTEX(un)); 20421 20422 /* 20423 * This change handles the case where a scsi watch request is 20424 * added to a device that is powered down. To accomplish this 20425 * we power up the device before adding the scsi watch request, 20426 * since the scsi watch sends a TUR directly to the device 20427 * which the device cannot handle if it is powered down. 20428 */ 20429 if (sd_pm_entry(un) != DDI_SUCCESS) { 20430 mutex_enter(SD_MUTEX(un)); 20431 goto done; 20432 } 20433 20434 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20435 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20436 (caddr_t)dev); 20437 20438 sd_pm_exit(un); 20439 20440 mutex_enter(SD_MUTEX(un)); 20441 if (token == NULL) { 20442 rval = EAGAIN; 20443 goto done; 20444 } 20445 20446 /* 20447 * This is a special case IOCTL that doesn't return 20448 * until the media state changes. Routine sdpower 20449 * knows about and handles this so don't count it 20450 * as an active cmd in the driver, which would 20451 * keep the device busy to the pm framework. 20452 * If the count isn't decremented the device can't 20453 * be powered down. 20454 */ 20455 un->un_ncmds_in_driver--; 20456 ASSERT(un->un_ncmds_in_driver >= 0); 20457 20458 /* 20459 * if a prior request had been made, this will be the same 20460 * token, as scsi_watch was designed that way. 20461 */ 20462 un->un_swr_token = token; 20463 un->un_specified_mediastate = state; 20464 20465 /* 20466 * now wait for media change 20467 * we will not be signalled unless mediastate == state but it is 20468 * still better to test for this condition, since there is a 20469 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20470 */ 20471 SD_TRACE(SD_LOG_COMMON, un, 20472 "sd_check_media: waiting for media state change\n"); 20473 while (un->un_mediastate == state) { 20474 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20475 SD_TRACE(SD_LOG_COMMON, un, 20476 "sd_check_media: waiting for media state " 20477 "was interrupted\n"); 20478 un->un_ncmds_in_driver++; 20479 rval = EINTR; 20480 goto done; 20481 } 20482 SD_TRACE(SD_LOG_COMMON, un, 20483 "sd_check_media: received signal, state=%x\n", 20484 un->un_mediastate); 20485 } 20486 /* 20487 * Inc the counter to indicate the device once again 20488 * has an active outstanding cmd. 20489 */ 20490 un->un_ncmds_in_driver++; 20491 } 20492 20493 /* invalidate geometry */ 20494 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20495 sr_ejected(un); 20496 } 20497 20498 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20499 uint64_t capacity; 20500 uint_t lbasize; 20501 20502 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20503 mutex_exit(SD_MUTEX(un)); 20504 /* 20505 * Since the following routines use SD_PATH_DIRECT, we must 20506 * call PM directly before the upcoming disk accesses. This 20507 * may cause the disk to be power/spin up. 20508 */ 20509 20510 if (sd_pm_entry(un) == DDI_SUCCESS) { 20511 rval = sd_send_scsi_READ_CAPACITY(un, 20512 &capacity, 20513 &lbasize, SD_PATH_DIRECT); 20514 if (rval != 0) { 20515 sd_pm_exit(un); 20516 mutex_enter(SD_MUTEX(un)); 20517 goto done; 20518 } 20519 } else { 20520 rval = EIO; 20521 mutex_enter(SD_MUTEX(un)); 20522 goto done; 20523 } 20524 mutex_enter(SD_MUTEX(un)); 20525 20526 sd_update_block_info(un, lbasize, capacity); 20527 20528 /* 20529 * Check if the media in the device is writable or not 20530 */ 20531 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20532 20533 mutex_exit(SD_MUTEX(un)); 20534 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20535 if ((cmlb_validate(un->un_cmlbhandle, 0, 20536 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20537 sd_set_pstats(un); 20538 SD_TRACE(SD_LOG_IO_PARTITION, un, 20539 "sd_check_media: un:0x%p pstats created and " 20540 "set\n", un); 20541 } 20542 20543 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20544 SD_PATH_DIRECT); 20545 sd_pm_exit(un); 20546 20547 mutex_enter(SD_MUTEX(un)); 20548 } 20549 done: 20550 un->un_f_watcht_stopped = FALSE; 20551 if (un->un_swr_token) { 20552 /* 20553 * Use of this local token and the mutex ensures that we avoid 20554 * some race conditions associated with terminating the 20555 * scsi watch. 20556 */ 20557 token = un->un_swr_token; 20558 un->un_swr_token = (opaque_t)NULL; 20559 mutex_exit(SD_MUTEX(un)); 20560 (void) scsi_watch_request_terminate(token, 20561 SCSI_WATCH_TERMINATE_WAIT); 20562 mutex_enter(SD_MUTEX(un)); 20563 } 20564 20565 /* 20566 * Update the capacity kstat value, if no media previously 20567 * (capacity kstat is 0) and a media has been inserted 20568 * (un_f_blockcount_is_valid == TRUE) 20569 */ 20570 if (un->un_errstats) { 20571 struct sd_errstats *stp = NULL; 20572 20573 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20574 if ((stp->sd_capacity.value.ui64 == 0) && 20575 (un->un_f_blockcount_is_valid == TRUE)) { 20576 stp->sd_capacity.value.ui64 = 20577 (uint64_t)((uint64_t)un->un_blockcount * 20578 un->un_sys_blocksize); 20579 } 20580 } 20581 mutex_exit(SD_MUTEX(un)); 20582 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20583 return (rval); 20584 } 20585 20586 20587 /* 20588 * Function: sd_delayed_cv_broadcast 20589 * 20590 * Description: Delayed cv_broadcast to allow for target to recover from media 20591 * insertion. 20592 * 20593 * Arguments: arg - driver soft state (unit) structure 20594 */ 20595 20596 static void 20597 sd_delayed_cv_broadcast(void *arg) 20598 { 20599 struct sd_lun *un = arg; 20600 20601 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20602 20603 mutex_enter(SD_MUTEX(un)); 20604 un->un_dcvb_timeid = NULL; 20605 cv_broadcast(&un->un_state_cv); 20606 mutex_exit(SD_MUTEX(un)); 20607 } 20608 20609 20610 /* 20611 * Function: sd_media_watch_cb 20612 * 20613 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20614 * routine processes the TUR sense data and updates the driver 20615 * state if a transition has occurred. The user thread 20616 * (sd_check_media) is then signalled. 20617 * 20618 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20619 * among multiple watches that share this callback function 20620 * resultp - scsi watch facility result packet containing scsi 20621 * packet, status byte and sense data 20622 * 20623 * Return Code: 0 for success, -1 for failure 20624 */ 20625 20626 static int 20627 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20628 { 20629 struct sd_lun *un; 20630 struct scsi_status *statusp = resultp->statusp; 20631 uint8_t *sensep = (uint8_t *)resultp->sensep; 20632 enum dkio_state state = DKIO_NONE; 20633 dev_t dev = (dev_t)arg; 20634 uchar_t actual_sense_length; 20635 uint8_t skey, asc, ascq; 20636 20637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20638 return (-1); 20639 } 20640 actual_sense_length = resultp->actual_sense_length; 20641 20642 mutex_enter(SD_MUTEX(un)); 20643 SD_TRACE(SD_LOG_COMMON, un, 20644 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20645 *((char *)statusp), (void *)sensep, actual_sense_length); 20646 20647 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20648 un->un_mediastate = DKIO_DEV_GONE; 20649 cv_broadcast(&un->un_state_cv); 20650 mutex_exit(SD_MUTEX(un)); 20651 20652 return (0); 20653 } 20654 20655 /* 20656 * If there was a check condition then sensep points to valid sense data 20657 * If status was not a check condition but a reservation or busy status 20658 * then the new state is DKIO_NONE 20659 */ 20660 if (sensep != NULL) { 20661 skey = scsi_sense_key(sensep); 20662 asc = scsi_sense_asc(sensep); 20663 ascq = scsi_sense_ascq(sensep); 20664 20665 SD_INFO(SD_LOG_COMMON, un, 20666 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20667 skey, asc, ascq); 20668 /* This routine only uses up to 13 bytes of sense data. */ 20669 if (actual_sense_length >= 13) { 20670 if (skey == KEY_UNIT_ATTENTION) { 20671 if (asc == 0x28) { 20672 state = DKIO_INSERTED; 20673 } 20674 } else { 20675 /* 20676 * if 02/04/02 means that the host 20677 * should send start command. Explicitly 20678 * leave the media state as is 20679 * (inserted) as the media is inserted 20680 * and host has stopped device for PM 20681 * reasons. Upon next true read/write 20682 * to this media will bring the 20683 * device to the right state good for 20684 * media access. 20685 */ 20686 if ((skey == KEY_NOT_READY) && 20687 (asc == 0x3a)) { 20688 state = DKIO_EJECTED; 20689 } 20690 20691 /* 20692 * If the drivge is busy with an operation 20693 * or long write, keep the media in an 20694 * inserted state. 20695 */ 20696 20697 if ((skey == KEY_NOT_READY) && 20698 (asc == 0x04) && 20699 ((ascq == 0x02) || 20700 (ascq == 0x07) || 20701 (ascq == 0x08))) { 20702 state = DKIO_INSERTED; 20703 } 20704 } 20705 } 20706 } else if ((*((char *)statusp) == STATUS_GOOD) && 20707 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20708 state = DKIO_INSERTED; 20709 } 20710 20711 SD_TRACE(SD_LOG_COMMON, un, 20712 "sd_media_watch_cb: state=%x, specified=%x\n", 20713 state, un->un_specified_mediastate); 20714 20715 /* 20716 * now signal the waiting thread if this is *not* the specified state; 20717 * delay the signal if the state is DKIO_INSERTED to allow the target 20718 * to recover 20719 */ 20720 if (state != un->un_specified_mediastate) { 20721 un->un_mediastate = state; 20722 if (state == DKIO_INSERTED) { 20723 /* 20724 * delay the signal to give the drive a chance 20725 * to do what it apparently needs to do 20726 */ 20727 SD_TRACE(SD_LOG_COMMON, un, 20728 "sd_media_watch_cb: delayed cv_broadcast\n"); 20729 if (un->un_dcvb_timeid == NULL) { 20730 un->un_dcvb_timeid = 20731 timeout(sd_delayed_cv_broadcast, un, 20732 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20733 } 20734 } else { 20735 SD_TRACE(SD_LOG_COMMON, un, 20736 "sd_media_watch_cb: immediate cv_broadcast\n"); 20737 cv_broadcast(&un->un_state_cv); 20738 } 20739 } 20740 mutex_exit(SD_MUTEX(un)); 20741 return (0); 20742 } 20743 20744 20745 /* 20746 * Function: sd_dkio_get_temp 20747 * 20748 * Description: This routine is the driver entry point for handling ioctl 20749 * requests to get the disk temperature. 20750 * 20751 * Arguments: dev - the device number 20752 * arg - pointer to user provided dk_temperature structure. 20753 * flag - this argument is a pass through to ddi_copyxxx() 20754 * directly from the mode argument of ioctl(). 20755 * 20756 * Return Code: 0 20757 * EFAULT 20758 * ENXIO 20759 * EAGAIN 20760 */ 20761 20762 static int 20763 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20764 { 20765 struct sd_lun *un = NULL; 20766 struct dk_temperature *dktemp = NULL; 20767 uchar_t *temperature_page; 20768 int rval = 0; 20769 int path_flag = SD_PATH_STANDARD; 20770 20771 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20772 return (ENXIO); 20773 } 20774 20775 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20776 20777 /* copyin the disk temp argument to get the user flags */ 20778 if (ddi_copyin((void *)arg, dktemp, 20779 sizeof (struct dk_temperature), flag) != 0) { 20780 rval = EFAULT; 20781 goto done; 20782 } 20783 20784 /* Initialize the temperature to invalid. */ 20785 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20786 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20787 20788 /* 20789 * Note: Investigate removing the "bypass pm" semantic. 20790 * Can we just bypass PM always? 20791 */ 20792 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20793 path_flag = SD_PATH_DIRECT; 20794 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20795 mutex_enter(&un->un_pm_mutex); 20796 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20797 /* 20798 * If DKT_BYPASS_PM is set, and the drive happens to be 20799 * in low power mode, we can not wake it up, Need to 20800 * return EAGAIN. 20801 */ 20802 mutex_exit(&un->un_pm_mutex); 20803 rval = EAGAIN; 20804 goto done; 20805 } else { 20806 /* 20807 * Indicate to PM the device is busy. This is required 20808 * to avoid a race - i.e. the ioctl is issuing a 20809 * command and the pm framework brings down the device 20810 * to low power mode (possible power cut-off on some 20811 * platforms). 20812 */ 20813 mutex_exit(&un->un_pm_mutex); 20814 if (sd_pm_entry(un) != DDI_SUCCESS) { 20815 rval = EAGAIN; 20816 goto done; 20817 } 20818 } 20819 } 20820 20821 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20822 20823 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20824 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20825 goto done2; 20826 } 20827 20828 /* 20829 * For the current temperature verify that the parameter length is 0x02 20830 * and the parameter code is 0x00 20831 */ 20832 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20833 (temperature_page[5] == 0x00)) { 20834 if (temperature_page[9] == 0xFF) { 20835 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20836 } else { 20837 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20838 } 20839 } 20840 20841 /* 20842 * For the reference temperature verify that the parameter 20843 * length is 0x02 and the parameter code is 0x01 20844 */ 20845 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20846 (temperature_page[11] == 0x01)) { 20847 if (temperature_page[15] == 0xFF) { 20848 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20849 } else { 20850 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20851 } 20852 } 20853 20854 /* Do the copyout regardless of the temperature commands status. */ 20855 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20856 flag) != 0) { 20857 rval = EFAULT; 20858 } 20859 20860 done2: 20861 if (path_flag == SD_PATH_DIRECT) { 20862 sd_pm_exit(un); 20863 } 20864 20865 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20866 done: 20867 if (dktemp != NULL) { 20868 kmem_free(dktemp, sizeof (struct dk_temperature)); 20869 } 20870 20871 return (rval); 20872 } 20873 20874 20875 /* 20876 * Function: sd_log_page_supported 20877 * 20878 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20879 * supported log pages. 20880 * 20881 * Arguments: un - 20882 * log_page - 20883 * 20884 * Return Code: -1 - on error (log sense is optional and may not be supported). 20885 * 0 - log page not found. 20886 * 1 - log page found. 20887 */ 20888 20889 static int 20890 sd_log_page_supported(struct sd_lun *un, int log_page) 20891 { 20892 uchar_t *log_page_data; 20893 int i; 20894 int match = 0; 20895 int log_size; 20896 20897 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 20898 20899 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 20900 SD_PATH_DIRECT) != 0) { 20901 SD_ERROR(SD_LOG_COMMON, un, 20902 "sd_log_page_supported: failed log page retrieval\n"); 20903 kmem_free(log_page_data, 0xFF); 20904 return (-1); 20905 } 20906 log_size = log_page_data[3]; 20907 20908 /* 20909 * The list of supported log pages start from the fourth byte. Check 20910 * until we run out of log pages or a match is found. 20911 */ 20912 for (i = 4; (i < (log_size + 4)) && !match; i++) { 20913 if (log_page_data[i] == log_page) { 20914 match++; 20915 } 20916 } 20917 kmem_free(log_page_data, 0xFF); 20918 return (match); 20919 } 20920 20921 20922 /* 20923 * Function: sd_mhdioc_failfast 20924 * 20925 * Description: This routine is the driver entry point for handling ioctl 20926 * requests to enable/disable the multihost failfast option. 20927 * (MHIOCENFAILFAST) 20928 * 20929 * Arguments: dev - the device number 20930 * arg - user specified probing interval. 20931 * flag - this argument is a pass through to ddi_copyxxx() 20932 * directly from the mode argument of ioctl(). 20933 * 20934 * Return Code: 0 20935 * EFAULT 20936 * ENXIO 20937 */ 20938 20939 static int 20940 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 20941 { 20942 struct sd_lun *un = NULL; 20943 int mh_time; 20944 int rval = 0; 20945 20946 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20947 return (ENXIO); 20948 } 20949 20950 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 20951 return (EFAULT); 20952 20953 if (mh_time) { 20954 mutex_enter(SD_MUTEX(un)); 20955 un->un_resvd_status |= SD_FAILFAST; 20956 mutex_exit(SD_MUTEX(un)); 20957 /* 20958 * If mh_time is INT_MAX, then this ioctl is being used for 20959 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 20960 */ 20961 if (mh_time != INT_MAX) { 20962 rval = sd_check_mhd(dev, mh_time); 20963 } 20964 } else { 20965 (void) sd_check_mhd(dev, 0); 20966 mutex_enter(SD_MUTEX(un)); 20967 un->un_resvd_status &= ~SD_FAILFAST; 20968 mutex_exit(SD_MUTEX(un)); 20969 } 20970 return (rval); 20971 } 20972 20973 20974 /* 20975 * Function: sd_mhdioc_takeown 20976 * 20977 * Description: This routine is the driver entry point for handling ioctl 20978 * requests to forcefully acquire exclusive access rights to the 20979 * multihost disk (MHIOCTKOWN). 20980 * 20981 * Arguments: dev - the device number 20982 * arg - user provided structure specifying the delay 20983 * parameters in milliseconds 20984 * flag - this argument is a pass through to ddi_copyxxx() 20985 * directly from the mode argument of ioctl(). 20986 * 20987 * Return Code: 0 20988 * EFAULT 20989 * ENXIO 20990 */ 20991 20992 static int 20993 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 20994 { 20995 struct sd_lun *un = NULL; 20996 struct mhioctkown *tkown = NULL; 20997 int rval = 0; 20998 20999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21000 return (ENXIO); 21001 } 21002 21003 if (arg != NULL) { 21004 tkown = (struct mhioctkown *) 21005 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21006 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21007 if (rval != 0) { 21008 rval = EFAULT; 21009 goto error; 21010 } 21011 } 21012 21013 rval = sd_take_ownership(dev, tkown); 21014 mutex_enter(SD_MUTEX(un)); 21015 if (rval == 0) { 21016 un->un_resvd_status |= SD_RESERVE; 21017 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21018 sd_reinstate_resv_delay = 21019 tkown->reinstate_resv_delay * 1000; 21020 } else { 21021 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21022 } 21023 /* 21024 * Give the scsi_watch routine interval set by 21025 * the MHIOCENFAILFAST ioctl precedence here. 21026 */ 21027 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21028 mutex_exit(SD_MUTEX(un)); 21029 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21030 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21031 "sd_mhdioc_takeown : %d\n", 21032 sd_reinstate_resv_delay); 21033 } else { 21034 mutex_exit(SD_MUTEX(un)); 21035 } 21036 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21037 sd_mhd_reset_notify_cb, (caddr_t)un); 21038 } else { 21039 un->un_resvd_status &= ~SD_RESERVE; 21040 mutex_exit(SD_MUTEX(un)); 21041 } 21042 21043 error: 21044 if (tkown != NULL) { 21045 kmem_free(tkown, sizeof (struct mhioctkown)); 21046 } 21047 return (rval); 21048 } 21049 21050 21051 /* 21052 * Function: sd_mhdioc_release 21053 * 21054 * Description: This routine is the driver entry point for handling ioctl 21055 * requests to release exclusive access rights to the multihost 21056 * disk (MHIOCRELEASE). 21057 * 21058 * Arguments: dev - the device number 21059 * 21060 * Return Code: 0 21061 * ENXIO 21062 */ 21063 21064 static int 21065 sd_mhdioc_release(dev_t dev) 21066 { 21067 struct sd_lun *un = NULL; 21068 timeout_id_t resvd_timeid_save; 21069 int resvd_status_save; 21070 int rval = 0; 21071 21072 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21073 return (ENXIO); 21074 } 21075 21076 mutex_enter(SD_MUTEX(un)); 21077 resvd_status_save = un->un_resvd_status; 21078 un->un_resvd_status &= 21079 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21080 if (un->un_resvd_timeid) { 21081 resvd_timeid_save = un->un_resvd_timeid; 21082 un->un_resvd_timeid = NULL; 21083 mutex_exit(SD_MUTEX(un)); 21084 (void) untimeout(resvd_timeid_save); 21085 } else { 21086 mutex_exit(SD_MUTEX(un)); 21087 } 21088 21089 /* 21090 * destroy any pending timeout thread that may be attempting to 21091 * reinstate reservation on this device. 21092 */ 21093 sd_rmv_resv_reclaim_req(dev); 21094 21095 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21096 mutex_enter(SD_MUTEX(un)); 21097 if ((un->un_mhd_token) && 21098 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21099 mutex_exit(SD_MUTEX(un)); 21100 (void) sd_check_mhd(dev, 0); 21101 } else { 21102 mutex_exit(SD_MUTEX(un)); 21103 } 21104 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21105 sd_mhd_reset_notify_cb, (caddr_t)un); 21106 } else { 21107 /* 21108 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21109 */ 21110 mutex_enter(SD_MUTEX(un)); 21111 un->un_resvd_status = resvd_status_save; 21112 mutex_exit(SD_MUTEX(un)); 21113 } 21114 return (rval); 21115 } 21116 21117 21118 /* 21119 * Function: sd_mhdioc_register_devid 21120 * 21121 * Description: This routine is the driver entry point for handling ioctl 21122 * requests to register the device id (MHIOCREREGISTERDEVID). 21123 * 21124 * Note: The implementation for this ioctl has been updated to 21125 * be consistent with the original PSARC case (1999/357) 21126 * (4375899, 4241671, 4220005) 21127 * 21128 * Arguments: dev - the device number 21129 * 21130 * Return Code: 0 21131 * ENXIO 21132 */ 21133 21134 static int 21135 sd_mhdioc_register_devid(dev_t dev) 21136 { 21137 struct sd_lun *un = NULL; 21138 int rval = 0; 21139 21140 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21141 return (ENXIO); 21142 } 21143 21144 ASSERT(!mutex_owned(SD_MUTEX(un))); 21145 21146 mutex_enter(SD_MUTEX(un)); 21147 21148 /* If a devid already exists, de-register it */ 21149 if (un->un_devid != NULL) { 21150 ddi_devid_unregister(SD_DEVINFO(un)); 21151 /* 21152 * After unregister devid, needs to free devid memory 21153 */ 21154 ddi_devid_free(un->un_devid); 21155 un->un_devid = NULL; 21156 } 21157 21158 /* Check for reservation conflict */ 21159 mutex_exit(SD_MUTEX(un)); 21160 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21161 mutex_enter(SD_MUTEX(un)); 21162 21163 switch (rval) { 21164 case 0: 21165 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21166 break; 21167 case EACCES: 21168 break; 21169 default: 21170 rval = EIO; 21171 } 21172 21173 mutex_exit(SD_MUTEX(un)); 21174 return (rval); 21175 } 21176 21177 21178 /* 21179 * Function: sd_mhdioc_inkeys 21180 * 21181 * Description: This routine is the driver entry point for handling ioctl 21182 * requests to issue the SCSI-3 Persistent In Read Keys command 21183 * to the device (MHIOCGRP_INKEYS). 21184 * 21185 * Arguments: dev - the device number 21186 * arg - user provided in_keys structure 21187 * flag - this argument is a pass through to ddi_copyxxx() 21188 * directly from the mode argument of ioctl(). 21189 * 21190 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21191 * ENXIO 21192 * EFAULT 21193 */ 21194 21195 static int 21196 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21197 { 21198 struct sd_lun *un; 21199 mhioc_inkeys_t inkeys; 21200 int rval = 0; 21201 21202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21203 return (ENXIO); 21204 } 21205 21206 #ifdef _MULTI_DATAMODEL 21207 switch (ddi_model_convert_from(flag & FMODELS)) { 21208 case DDI_MODEL_ILP32: { 21209 struct mhioc_inkeys32 inkeys32; 21210 21211 if (ddi_copyin(arg, &inkeys32, 21212 sizeof (struct mhioc_inkeys32), flag) != 0) { 21213 return (EFAULT); 21214 } 21215 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21216 if ((rval = sd_persistent_reservation_in_read_keys(un, 21217 &inkeys, flag)) != 0) { 21218 return (rval); 21219 } 21220 inkeys32.generation = inkeys.generation; 21221 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21222 flag) != 0) { 21223 return (EFAULT); 21224 } 21225 break; 21226 } 21227 case DDI_MODEL_NONE: 21228 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21229 flag) != 0) { 21230 return (EFAULT); 21231 } 21232 if ((rval = sd_persistent_reservation_in_read_keys(un, 21233 &inkeys, flag)) != 0) { 21234 return (rval); 21235 } 21236 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21237 flag) != 0) { 21238 return (EFAULT); 21239 } 21240 break; 21241 } 21242 21243 #else /* ! _MULTI_DATAMODEL */ 21244 21245 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21246 return (EFAULT); 21247 } 21248 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21249 if (rval != 0) { 21250 return (rval); 21251 } 21252 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21253 return (EFAULT); 21254 } 21255 21256 #endif /* _MULTI_DATAMODEL */ 21257 21258 return (rval); 21259 } 21260 21261 21262 /* 21263 * Function: sd_mhdioc_inresv 21264 * 21265 * Description: This routine is the driver entry point for handling ioctl 21266 * requests to issue the SCSI-3 Persistent In Read Reservations 21267 * command to the device (MHIOCGRP_INKEYS). 21268 * 21269 * Arguments: dev - the device number 21270 * arg - user provided in_resv structure 21271 * flag - this argument is a pass through to ddi_copyxxx() 21272 * directly from the mode argument of ioctl(). 21273 * 21274 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21275 * ENXIO 21276 * EFAULT 21277 */ 21278 21279 static int 21280 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21281 { 21282 struct sd_lun *un; 21283 mhioc_inresvs_t inresvs; 21284 int rval = 0; 21285 21286 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21287 return (ENXIO); 21288 } 21289 21290 #ifdef _MULTI_DATAMODEL 21291 21292 switch (ddi_model_convert_from(flag & FMODELS)) { 21293 case DDI_MODEL_ILP32: { 21294 struct mhioc_inresvs32 inresvs32; 21295 21296 if (ddi_copyin(arg, &inresvs32, 21297 sizeof (struct mhioc_inresvs32), flag) != 0) { 21298 return (EFAULT); 21299 } 21300 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21301 if ((rval = sd_persistent_reservation_in_read_resv(un, 21302 &inresvs, flag)) != 0) { 21303 return (rval); 21304 } 21305 inresvs32.generation = inresvs.generation; 21306 if (ddi_copyout(&inresvs32, arg, 21307 sizeof (struct mhioc_inresvs32), flag) != 0) { 21308 return (EFAULT); 21309 } 21310 break; 21311 } 21312 case DDI_MODEL_NONE: 21313 if (ddi_copyin(arg, &inresvs, 21314 sizeof (mhioc_inresvs_t), flag) != 0) { 21315 return (EFAULT); 21316 } 21317 if ((rval = sd_persistent_reservation_in_read_resv(un, 21318 &inresvs, flag)) != 0) { 21319 return (rval); 21320 } 21321 if (ddi_copyout(&inresvs, arg, 21322 sizeof (mhioc_inresvs_t), flag) != 0) { 21323 return (EFAULT); 21324 } 21325 break; 21326 } 21327 21328 #else /* ! _MULTI_DATAMODEL */ 21329 21330 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21331 return (EFAULT); 21332 } 21333 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21334 if (rval != 0) { 21335 return (rval); 21336 } 21337 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21338 return (EFAULT); 21339 } 21340 21341 #endif /* ! _MULTI_DATAMODEL */ 21342 21343 return (rval); 21344 } 21345 21346 21347 /* 21348 * The following routines support the clustering functionality described below 21349 * and implement lost reservation reclaim functionality. 21350 * 21351 * Clustering 21352 * ---------- 21353 * The clustering code uses two different, independent forms of SCSI 21354 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21355 * Persistent Group Reservations. For any particular disk, it will use either 21356 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21357 * 21358 * SCSI-2 21359 * The cluster software takes ownership of a multi-hosted disk by issuing the 21360 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21361 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 21362 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 21363 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 21364 * meaning of failfast is that if the driver (on this host) ever encounters the 21365 * scsi error return code RESERVATION_CONFLICT from the device, it should 21366 * immediately panic the host. The motivation for this ioctl is that if this 21367 * host does encounter reservation conflict, the underlying cause is that some 21368 * other host of the cluster has decided that this host is no longer in the 21369 * cluster and has seized control of the disks for itself. Since this host is no 21370 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 21371 * does two things: 21372 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21373 * error to panic the host 21374 * (b) it sets up a periodic timer to test whether this host still has 21375 * "access" (in that no other host has reserved the device): if the 21376 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21377 * purpose of that periodic timer is to handle scenarios where the host is 21378 * otherwise temporarily quiescent, temporarily doing no real i/o. 21379 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21380 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21381 * the device itself. 21382 * 21383 * SCSI-3 PGR 21384 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21385 * facility is supported through the shared multihost disk ioctls 21386 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21387 * MHIOCGRP_PREEMPTANDABORT) 21388 * 21389 * Reservation Reclaim: 21390 * -------------------- 21391 * To support the lost reservation reclaim operations this driver creates a 21392 * single thread to handle reinstating reservations on all devices that have 21393 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21394 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21395 * and the reservation reclaim thread loops through the requests to regain the 21396 * lost reservations. 21397 */ 21398 21399 /* 21400 * Function: sd_check_mhd() 21401 * 21402 * Description: This function sets up and submits a scsi watch request or 21403 * terminates an existing watch request. This routine is used in 21404 * support of reservation reclaim. 21405 * 21406 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21407 * among multiple watches that share the callback function 21408 * interval - the number of microseconds specifying the watch 21409 * interval for issuing TEST UNIT READY commands. If 21410 * set to 0 the watch should be terminated. If the 21411 * interval is set to 0 and if the device is required 21412 * to hold reservation while disabling failfast, the 21413 * watch is restarted with an interval of 21414 * reinstate_resv_delay. 21415 * 21416 * Return Code: 0 - Successful submit/terminate of scsi watch request 21417 * ENXIO - Indicates an invalid device was specified 21418 * EAGAIN - Unable to submit the scsi watch request 21419 */ 21420 21421 static int 21422 sd_check_mhd(dev_t dev, int interval) 21423 { 21424 struct sd_lun *un; 21425 opaque_t token; 21426 21427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21428 return (ENXIO); 21429 } 21430 21431 /* is this a watch termination request? */ 21432 if (interval == 0) { 21433 mutex_enter(SD_MUTEX(un)); 21434 /* if there is an existing watch task then terminate it */ 21435 if (un->un_mhd_token) { 21436 token = un->un_mhd_token; 21437 un->un_mhd_token = NULL; 21438 mutex_exit(SD_MUTEX(un)); 21439 (void) scsi_watch_request_terminate(token, 21440 SCSI_WATCH_TERMINATE_WAIT); 21441 mutex_enter(SD_MUTEX(un)); 21442 } else { 21443 mutex_exit(SD_MUTEX(un)); 21444 /* 21445 * Note: If we return here we don't check for the 21446 * failfast case. This is the original legacy 21447 * implementation but perhaps we should be checking 21448 * the failfast case. 21449 */ 21450 return (0); 21451 } 21452 /* 21453 * If the device is required to hold reservation while 21454 * disabling failfast, we need to restart the scsi_watch 21455 * routine with an interval of reinstate_resv_delay. 21456 */ 21457 if (un->un_resvd_status & SD_RESERVE) { 21458 interval = sd_reinstate_resv_delay/1000; 21459 } else { 21460 /* no failfast so bail */ 21461 mutex_exit(SD_MUTEX(un)); 21462 return (0); 21463 } 21464 mutex_exit(SD_MUTEX(un)); 21465 } 21466 21467 /* 21468 * adjust minimum time interval to 1 second, 21469 * and convert from msecs to usecs 21470 */ 21471 if (interval > 0 && interval < 1000) { 21472 interval = 1000; 21473 } 21474 interval *= 1000; 21475 21476 /* 21477 * submit the request to the scsi_watch service 21478 */ 21479 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21480 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21481 if (token == NULL) { 21482 return (EAGAIN); 21483 } 21484 21485 /* 21486 * save token for termination later on 21487 */ 21488 mutex_enter(SD_MUTEX(un)); 21489 un->un_mhd_token = token; 21490 mutex_exit(SD_MUTEX(un)); 21491 return (0); 21492 } 21493 21494 21495 /* 21496 * Function: sd_mhd_watch_cb() 21497 * 21498 * Description: This function is the call back function used by the scsi watch 21499 * facility. The scsi watch facility sends the "Test Unit Ready" 21500 * and processes the status. If applicable (i.e. a "Unit Attention" 21501 * status and automatic "Request Sense" not used) the scsi watch 21502 * facility will send a "Request Sense" and retrieve the sense data 21503 * to be passed to this callback function. In either case the 21504 * automatic "Request Sense" or the facility submitting one, this 21505 * callback is passed the status and sense data. 21506 * 21507 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21508 * among multiple watches that share this callback function 21509 * resultp - scsi watch facility result packet containing scsi 21510 * packet, status byte and sense data 21511 * 21512 * Return Code: 0 - continue the watch task 21513 * non-zero - terminate the watch task 21514 */ 21515 21516 static int 21517 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21518 { 21519 struct sd_lun *un; 21520 struct scsi_status *statusp; 21521 uint8_t *sensep; 21522 struct scsi_pkt *pkt; 21523 uchar_t actual_sense_length; 21524 dev_t dev = (dev_t)arg; 21525 21526 ASSERT(resultp != NULL); 21527 statusp = resultp->statusp; 21528 sensep = (uint8_t *)resultp->sensep; 21529 pkt = resultp->pkt; 21530 actual_sense_length = resultp->actual_sense_length; 21531 21532 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21533 return (ENXIO); 21534 } 21535 21536 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21537 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21538 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21539 21540 /* Begin processing of the status and/or sense data */ 21541 if (pkt->pkt_reason != CMD_CMPLT) { 21542 /* Handle the incomplete packet */ 21543 sd_mhd_watch_incomplete(un, pkt); 21544 return (0); 21545 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21546 if (*((unsigned char *)statusp) 21547 == STATUS_RESERVATION_CONFLICT) { 21548 /* 21549 * Handle a reservation conflict by panicking if 21550 * configured for failfast or by logging the conflict 21551 * and updating the reservation status 21552 */ 21553 mutex_enter(SD_MUTEX(un)); 21554 if ((un->un_resvd_status & SD_FAILFAST) && 21555 (sd_failfast_enable)) { 21556 sd_panic_for_res_conflict(un); 21557 /*NOTREACHED*/ 21558 } 21559 SD_INFO(SD_LOG_IOCTL_MHD, un, 21560 "sd_mhd_watch_cb: Reservation Conflict\n"); 21561 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21562 mutex_exit(SD_MUTEX(un)); 21563 } 21564 } 21565 21566 if (sensep != NULL) { 21567 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21568 mutex_enter(SD_MUTEX(un)); 21569 if ((scsi_sense_asc(sensep) == 21570 SD_SCSI_RESET_SENSE_CODE) && 21571 (un->un_resvd_status & SD_RESERVE)) { 21572 /* 21573 * The additional sense code indicates a power 21574 * on or bus device reset has occurred; update 21575 * the reservation status. 21576 */ 21577 un->un_resvd_status |= 21578 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21579 SD_INFO(SD_LOG_IOCTL_MHD, un, 21580 "sd_mhd_watch_cb: Lost Reservation\n"); 21581 } 21582 } else { 21583 return (0); 21584 } 21585 } else { 21586 mutex_enter(SD_MUTEX(un)); 21587 } 21588 21589 if ((un->un_resvd_status & SD_RESERVE) && 21590 (un->un_resvd_status & SD_LOST_RESERVE)) { 21591 if (un->un_resvd_status & SD_WANT_RESERVE) { 21592 /* 21593 * A reset occurred in between the last probe and this 21594 * one so if a timeout is pending cancel it. 21595 */ 21596 if (un->un_resvd_timeid) { 21597 timeout_id_t temp_id = un->un_resvd_timeid; 21598 un->un_resvd_timeid = NULL; 21599 mutex_exit(SD_MUTEX(un)); 21600 (void) untimeout(temp_id); 21601 mutex_enter(SD_MUTEX(un)); 21602 } 21603 un->un_resvd_status &= ~SD_WANT_RESERVE; 21604 } 21605 if (un->un_resvd_timeid == 0) { 21606 /* Schedule a timeout to handle the lost reservation */ 21607 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21608 (void *)dev, 21609 drv_usectohz(sd_reinstate_resv_delay)); 21610 } 21611 } 21612 mutex_exit(SD_MUTEX(un)); 21613 return (0); 21614 } 21615 21616 21617 /* 21618 * Function: sd_mhd_watch_incomplete() 21619 * 21620 * Description: This function is used to find out why a scsi pkt sent by the 21621 * scsi watch facility was not completed. Under some scenarios this 21622 * routine will return. Otherwise it will send a bus reset to see 21623 * if the drive is still online. 21624 * 21625 * Arguments: un - driver soft state (unit) structure 21626 * pkt - incomplete scsi pkt 21627 */ 21628 21629 static void 21630 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21631 { 21632 int be_chatty; 21633 int perr; 21634 21635 ASSERT(pkt != NULL); 21636 ASSERT(un != NULL); 21637 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21638 perr = (pkt->pkt_statistics & STAT_PERR); 21639 21640 mutex_enter(SD_MUTEX(un)); 21641 if (un->un_state == SD_STATE_DUMPING) { 21642 mutex_exit(SD_MUTEX(un)); 21643 return; 21644 } 21645 21646 switch (pkt->pkt_reason) { 21647 case CMD_UNX_BUS_FREE: 21648 /* 21649 * If we had a parity error that caused the target to drop BSY*, 21650 * don't be chatty about it. 21651 */ 21652 if (perr && be_chatty) { 21653 be_chatty = 0; 21654 } 21655 break; 21656 case CMD_TAG_REJECT: 21657 /* 21658 * The SCSI-2 spec states that a tag reject will be sent by the 21659 * target if tagged queuing is not supported. A tag reject may 21660 * also be sent during certain initialization periods or to 21661 * control internal resources. For the latter case the target 21662 * may also return Queue Full. 21663 * 21664 * If this driver receives a tag reject from a target that is 21665 * going through an init period or controlling internal 21666 * resources tagged queuing will be disabled. This is a less 21667 * than optimal behavior but the driver is unable to determine 21668 * the target state and assumes tagged queueing is not supported 21669 */ 21670 pkt->pkt_flags = 0; 21671 un->un_tagflags = 0; 21672 21673 if (un->un_f_opt_queueing == TRUE) { 21674 un->un_throttle = min(un->un_throttle, 3); 21675 } else { 21676 un->un_throttle = 1; 21677 } 21678 mutex_exit(SD_MUTEX(un)); 21679 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21680 mutex_enter(SD_MUTEX(un)); 21681 break; 21682 case CMD_INCOMPLETE: 21683 /* 21684 * The transport stopped with an abnormal state, fallthrough and 21685 * reset the target and/or bus unless selection did not complete 21686 * (indicated by STATE_GOT_BUS) in which case we don't want to 21687 * go through a target/bus reset 21688 */ 21689 if (pkt->pkt_state == STATE_GOT_BUS) { 21690 break; 21691 } 21692 /*FALLTHROUGH*/ 21693 21694 case CMD_TIMEOUT: 21695 default: 21696 /* 21697 * The lun may still be running the command, so a lun reset 21698 * should be attempted. If the lun reset fails or cannot be 21699 * issued, than try a target reset. Lastly try a bus reset. 21700 */ 21701 if ((pkt->pkt_statistics & 21702 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21703 int reset_retval = 0; 21704 mutex_exit(SD_MUTEX(un)); 21705 if (un->un_f_allow_bus_device_reset == TRUE) { 21706 if (un->un_f_lun_reset_enabled == TRUE) { 21707 reset_retval = 21708 scsi_reset(SD_ADDRESS(un), 21709 RESET_LUN); 21710 } 21711 if (reset_retval == 0) { 21712 reset_retval = 21713 scsi_reset(SD_ADDRESS(un), 21714 RESET_TARGET); 21715 } 21716 } 21717 if (reset_retval == 0) { 21718 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21719 } 21720 mutex_enter(SD_MUTEX(un)); 21721 } 21722 break; 21723 } 21724 21725 /* A device/bus reset has occurred; update the reservation status. */ 21726 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21727 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21728 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21729 un->un_resvd_status |= 21730 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21731 SD_INFO(SD_LOG_IOCTL_MHD, un, 21732 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21733 } 21734 } 21735 21736 /* 21737 * The disk has been turned off; Update the device state. 21738 * 21739 * Note: Should we be offlining the disk here? 21740 */ 21741 if (pkt->pkt_state == STATE_GOT_BUS) { 21742 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21743 "Disk not responding to selection\n"); 21744 if (un->un_state != SD_STATE_OFFLINE) { 21745 New_state(un, SD_STATE_OFFLINE); 21746 } 21747 } else if (be_chatty) { 21748 /* 21749 * suppress messages if they are all the same pkt reason; 21750 * with TQ, many (up to 256) are returned with the same 21751 * pkt_reason 21752 */ 21753 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21754 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21755 "sd_mhd_watch_incomplete: " 21756 "SCSI transport failed: reason '%s'\n", 21757 scsi_rname(pkt->pkt_reason)); 21758 } 21759 } 21760 un->un_last_pkt_reason = pkt->pkt_reason; 21761 mutex_exit(SD_MUTEX(un)); 21762 } 21763 21764 21765 /* 21766 * Function: sd_sname() 21767 * 21768 * Description: This is a simple little routine to return a string containing 21769 * a printable description of command status byte for use in 21770 * logging. 21771 * 21772 * Arguments: status - pointer to a status byte 21773 * 21774 * Return Code: char * - string containing status description. 21775 */ 21776 21777 static char * 21778 sd_sname(uchar_t status) 21779 { 21780 switch (status & STATUS_MASK) { 21781 case STATUS_GOOD: 21782 return ("good status"); 21783 case STATUS_CHECK: 21784 return ("check condition"); 21785 case STATUS_MET: 21786 return ("condition met"); 21787 case STATUS_BUSY: 21788 return ("busy"); 21789 case STATUS_INTERMEDIATE: 21790 return ("intermediate"); 21791 case STATUS_INTERMEDIATE_MET: 21792 return ("intermediate - condition met"); 21793 case STATUS_RESERVATION_CONFLICT: 21794 return ("reservation_conflict"); 21795 case STATUS_TERMINATED: 21796 return ("command terminated"); 21797 case STATUS_QFULL: 21798 return ("queue full"); 21799 default: 21800 return ("<unknown status>"); 21801 } 21802 } 21803 21804 21805 /* 21806 * Function: sd_mhd_resvd_recover() 21807 * 21808 * Description: This function adds a reservation entry to the 21809 * sd_resv_reclaim_request list and signals the reservation 21810 * reclaim thread that there is work pending. If the reservation 21811 * reclaim thread has not been previously created this function 21812 * will kick it off. 21813 * 21814 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21815 * among multiple watches that share this callback function 21816 * 21817 * Context: This routine is called by timeout() and is run in interrupt 21818 * context. It must not sleep or call other functions which may 21819 * sleep. 21820 */ 21821 21822 static void 21823 sd_mhd_resvd_recover(void *arg) 21824 { 21825 dev_t dev = (dev_t)arg; 21826 struct sd_lun *un; 21827 struct sd_thr_request *sd_treq = NULL; 21828 struct sd_thr_request *sd_cur = NULL; 21829 struct sd_thr_request *sd_prev = NULL; 21830 int already_there = 0; 21831 21832 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21833 return; 21834 } 21835 21836 mutex_enter(SD_MUTEX(un)); 21837 un->un_resvd_timeid = NULL; 21838 if (un->un_resvd_status & SD_WANT_RESERVE) { 21839 /* 21840 * There was a reset so don't issue the reserve, allow the 21841 * sd_mhd_watch_cb callback function to notice this and 21842 * reschedule the timeout for reservation. 21843 */ 21844 mutex_exit(SD_MUTEX(un)); 21845 return; 21846 } 21847 mutex_exit(SD_MUTEX(un)); 21848 21849 /* 21850 * Add this device to the sd_resv_reclaim_request list and the 21851 * sd_resv_reclaim_thread should take care of the rest. 21852 * 21853 * Note: We can't sleep in this context so if the memory allocation 21854 * fails allow the sd_mhd_watch_cb callback function to notice this and 21855 * reschedule the timeout for reservation. (4378460) 21856 */ 21857 sd_treq = (struct sd_thr_request *) 21858 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21859 if (sd_treq == NULL) { 21860 return; 21861 } 21862 21863 sd_treq->sd_thr_req_next = NULL; 21864 sd_treq->dev = dev; 21865 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21866 if (sd_tr.srq_thr_req_head == NULL) { 21867 sd_tr.srq_thr_req_head = sd_treq; 21868 } else { 21869 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21870 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21871 if (sd_cur->dev == dev) { 21872 /* 21873 * already in Queue so don't log 21874 * another request for the device 21875 */ 21876 already_there = 1; 21877 break; 21878 } 21879 sd_prev = sd_cur; 21880 } 21881 if (!already_there) { 21882 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21883 "logging request for %lx\n", dev); 21884 sd_prev->sd_thr_req_next = sd_treq; 21885 } else { 21886 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21887 } 21888 } 21889 21890 /* 21891 * Create a kernel thread to do the reservation reclaim and free up this 21892 * thread. We cannot block this thread while we go away to do the 21893 * reservation reclaim 21894 */ 21895 if (sd_tr.srq_resv_reclaim_thread == NULL) 21896 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 21897 sd_resv_reclaim_thread, NULL, 21898 0, &p0, TS_RUN, v.v_maxsyspri - 2); 21899 21900 /* Tell the reservation reclaim thread that it has work to do */ 21901 cv_signal(&sd_tr.srq_resv_reclaim_cv); 21902 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21903 } 21904 21905 /* 21906 * Function: sd_resv_reclaim_thread() 21907 * 21908 * Description: This function implements the reservation reclaim operations 21909 * 21910 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21911 * among multiple watches that share this callback function 21912 */ 21913 21914 static void 21915 sd_resv_reclaim_thread() 21916 { 21917 struct sd_lun *un; 21918 struct sd_thr_request *sd_mhreq; 21919 21920 /* Wait for work */ 21921 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21922 if (sd_tr.srq_thr_req_head == NULL) { 21923 cv_wait(&sd_tr.srq_resv_reclaim_cv, 21924 &sd_tr.srq_resv_reclaim_mutex); 21925 } 21926 21927 /* Loop while we have work */ 21928 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 21929 un = ddi_get_soft_state(sd_state, 21930 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 21931 if (un == NULL) { 21932 /* 21933 * softstate structure is NULL so just 21934 * dequeue the request and continue 21935 */ 21936 sd_tr.srq_thr_req_head = 21937 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21938 kmem_free(sd_tr.srq_thr_cur_req, 21939 sizeof (struct sd_thr_request)); 21940 continue; 21941 } 21942 21943 /* dequeue the request */ 21944 sd_mhreq = sd_tr.srq_thr_cur_req; 21945 sd_tr.srq_thr_req_head = 21946 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21947 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21948 21949 /* 21950 * Reclaim reservation only if SD_RESERVE is still set. There 21951 * may have been a call to MHIOCRELEASE before we got here. 21952 */ 21953 mutex_enter(SD_MUTEX(un)); 21954 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21955 /* 21956 * Note: The SD_LOST_RESERVE flag is cleared before 21957 * reclaiming the reservation. If this is done after the 21958 * call to sd_reserve_release a reservation loss in the 21959 * window between pkt completion of reserve cmd and 21960 * mutex_enter below may not be recognized 21961 */ 21962 un->un_resvd_status &= ~SD_LOST_RESERVE; 21963 mutex_exit(SD_MUTEX(un)); 21964 21965 if (sd_reserve_release(sd_mhreq->dev, 21966 SD_RESERVE) == 0) { 21967 mutex_enter(SD_MUTEX(un)); 21968 un->un_resvd_status |= SD_RESERVE; 21969 mutex_exit(SD_MUTEX(un)); 21970 SD_INFO(SD_LOG_IOCTL_MHD, un, 21971 "sd_resv_reclaim_thread: " 21972 "Reservation Recovered\n"); 21973 } else { 21974 mutex_enter(SD_MUTEX(un)); 21975 un->un_resvd_status |= SD_LOST_RESERVE; 21976 mutex_exit(SD_MUTEX(un)); 21977 SD_INFO(SD_LOG_IOCTL_MHD, un, 21978 "sd_resv_reclaim_thread: Failed " 21979 "Reservation Recovery\n"); 21980 } 21981 } else { 21982 mutex_exit(SD_MUTEX(un)); 21983 } 21984 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21985 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 21986 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 21987 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 21988 /* 21989 * wakeup the destroy thread if anyone is waiting on 21990 * us to complete. 21991 */ 21992 cv_signal(&sd_tr.srq_inprocess_cv); 21993 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21994 "sd_resv_reclaim_thread: cv_signalling current request \n"); 21995 } 21996 21997 /* 21998 * cleanup the sd_tr structure now that this thread will not exist 21999 */ 22000 ASSERT(sd_tr.srq_thr_req_head == NULL); 22001 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22002 sd_tr.srq_resv_reclaim_thread = NULL; 22003 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22004 thread_exit(); 22005 } 22006 22007 22008 /* 22009 * Function: sd_rmv_resv_reclaim_req() 22010 * 22011 * Description: This function removes any pending reservation reclaim requests 22012 * for the specified device. 22013 * 22014 * Arguments: dev - the device 'dev_t' 22015 */ 22016 22017 static void 22018 sd_rmv_resv_reclaim_req(dev_t dev) 22019 { 22020 struct sd_thr_request *sd_mhreq; 22021 struct sd_thr_request *sd_prev; 22022 22023 /* Remove a reservation reclaim request from the list */ 22024 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22025 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22026 /* 22027 * We are attempting to reinstate reservation for 22028 * this device. We wait for sd_reserve_release() 22029 * to return before we return. 22030 */ 22031 cv_wait(&sd_tr.srq_inprocess_cv, 22032 &sd_tr.srq_resv_reclaim_mutex); 22033 } else { 22034 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22035 if (sd_mhreq && sd_mhreq->dev == dev) { 22036 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22037 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22038 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22039 return; 22040 } 22041 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22042 if (sd_mhreq && sd_mhreq->dev == dev) { 22043 break; 22044 } 22045 sd_prev = sd_mhreq; 22046 } 22047 if (sd_mhreq != NULL) { 22048 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22049 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22050 } 22051 } 22052 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22053 } 22054 22055 22056 /* 22057 * Function: sd_mhd_reset_notify_cb() 22058 * 22059 * Description: This is a call back function for scsi_reset_notify. This 22060 * function updates the softstate reserved status and logs the 22061 * reset. The driver scsi watch facility callback function 22062 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22063 * will reclaim the reservation. 22064 * 22065 * Arguments: arg - driver soft state (unit) structure 22066 */ 22067 22068 static void 22069 sd_mhd_reset_notify_cb(caddr_t arg) 22070 { 22071 struct sd_lun *un = (struct sd_lun *)arg; 22072 22073 mutex_enter(SD_MUTEX(un)); 22074 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22075 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22076 SD_INFO(SD_LOG_IOCTL_MHD, un, 22077 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22078 } 22079 mutex_exit(SD_MUTEX(un)); 22080 } 22081 22082 22083 /* 22084 * Function: sd_take_ownership() 22085 * 22086 * Description: This routine implements an algorithm to achieve a stable 22087 * reservation on disks which don't implement priority reserve, 22088 * and makes sure that other host lose re-reservation attempts. 22089 * This algorithm contains of a loop that keeps issuing the RESERVE 22090 * for some period of time (min_ownership_delay, default 6 seconds) 22091 * During that loop, it looks to see if there has been a bus device 22092 * reset or bus reset (both of which cause an existing reservation 22093 * to be lost). If the reservation is lost issue RESERVE until a 22094 * period of min_ownership_delay with no resets has gone by, or 22095 * until max_ownership_delay has expired. This loop ensures that 22096 * the host really did manage to reserve the device, in spite of 22097 * resets. The looping for min_ownership_delay (default six 22098 * seconds) is important to early generation clustering products, 22099 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22100 * MHIOCENFAILFAST periodic timer of two seconds. By having 22101 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22102 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22103 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22104 * have already noticed, via the MHIOCENFAILFAST polling, that it 22105 * no longer "owns" the disk and will have panicked itself. Thus, 22106 * the host issuing the MHIOCTKOWN is assured (with timing 22107 * dependencies) that by the time it actually starts to use the 22108 * disk for real work, the old owner is no longer accessing it. 22109 * 22110 * min_ownership_delay is the minimum amount of time for which the 22111 * disk must be reserved continuously devoid of resets before the 22112 * MHIOCTKOWN ioctl will return success. 22113 * 22114 * max_ownership_delay indicates the amount of time by which the 22115 * take ownership should succeed or timeout with an error. 22116 * 22117 * Arguments: dev - the device 'dev_t' 22118 * *p - struct containing timing info. 22119 * 22120 * Return Code: 0 for success or error code 22121 */ 22122 22123 static int 22124 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22125 { 22126 struct sd_lun *un; 22127 int rval; 22128 int err; 22129 int reservation_count = 0; 22130 int min_ownership_delay = 6000000; /* in usec */ 22131 int max_ownership_delay = 30000000; /* in usec */ 22132 clock_t start_time; /* starting time of this algorithm */ 22133 clock_t end_time; /* time limit for giving up */ 22134 clock_t ownership_time; /* time limit for stable ownership */ 22135 clock_t current_time; 22136 clock_t previous_current_time; 22137 22138 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22139 return (ENXIO); 22140 } 22141 22142 /* 22143 * Attempt a device reservation. A priority reservation is requested. 22144 */ 22145 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22146 != SD_SUCCESS) { 22147 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22148 "sd_take_ownership: return(1)=%d\n", rval); 22149 return (rval); 22150 } 22151 22152 /* Update the softstate reserved status to indicate the reservation */ 22153 mutex_enter(SD_MUTEX(un)); 22154 un->un_resvd_status |= SD_RESERVE; 22155 un->un_resvd_status &= 22156 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22157 mutex_exit(SD_MUTEX(un)); 22158 22159 if (p != NULL) { 22160 if (p->min_ownership_delay != 0) { 22161 min_ownership_delay = p->min_ownership_delay * 1000; 22162 } 22163 if (p->max_ownership_delay != 0) { 22164 max_ownership_delay = p->max_ownership_delay * 1000; 22165 } 22166 } 22167 SD_INFO(SD_LOG_IOCTL_MHD, un, 22168 "sd_take_ownership: min, max delays: %d, %d\n", 22169 min_ownership_delay, max_ownership_delay); 22170 22171 start_time = ddi_get_lbolt(); 22172 current_time = start_time; 22173 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22174 end_time = start_time + drv_usectohz(max_ownership_delay); 22175 22176 while (current_time - end_time < 0) { 22177 delay(drv_usectohz(500000)); 22178 22179 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22180 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22181 mutex_enter(SD_MUTEX(un)); 22182 rval = (un->un_resvd_status & 22183 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22184 mutex_exit(SD_MUTEX(un)); 22185 break; 22186 } 22187 } 22188 previous_current_time = current_time; 22189 current_time = ddi_get_lbolt(); 22190 mutex_enter(SD_MUTEX(un)); 22191 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22192 ownership_time = ddi_get_lbolt() + 22193 drv_usectohz(min_ownership_delay); 22194 reservation_count = 0; 22195 } else { 22196 reservation_count++; 22197 } 22198 un->un_resvd_status |= SD_RESERVE; 22199 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22200 mutex_exit(SD_MUTEX(un)); 22201 22202 SD_INFO(SD_LOG_IOCTL_MHD, un, 22203 "sd_take_ownership: ticks for loop iteration=%ld, " 22204 "reservation=%s\n", (current_time - previous_current_time), 22205 reservation_count ? "ok" : "reclaimed"); 22206 22207 if (current_time - ownership_time >= 0 && 22208 reservation_count >= 4) { 22209 rval = 0; /* Achieved a stable ownership */ 22210 break; 22211 } 22212 if (current_time - end_time >= 0) { 22213 rval = EACCES; /* No ownership in max possible time */ 22214 break; 22215 } 22216 } 22217 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22218 "sd_take_ownership: return(2)=%d\n", rval); 22219 return (rval); 22220 } 22221 22222 22223 /* 22224 * Function: sd_reserve_release() 22225 * 22226 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22227 * PRIORITY RESERVE commands based on a user specified command type 22228 * 22229 * Arguments: dev - the device 'dev_t' 22230 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22231 * SD_RESERVE, SD_RELEASE 22232 * 22233 * Return Code: 0 or Error Code 22234 */ 22235 22236 static int 22237 sd_reserve_release(dev_t dev, int cmd) 22238 { 22239 struct uscsi_cmd *com = NULL; 22240 struct sd_lun *un = NULL; 22241 char cdb[CDB_GROUP0]; 22242 int rval; 22243 22244 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22245 (cmd == SD_PRIORITY_RESERVE)); 22246 22247 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22248 return (ENXIO); 22249 } 22250 22251 /* instantiate and initialize the command and cdb */ 22252 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22253 bzero(cdb, CDB_GROUP0); 22254 com->uscsi_flags = USCSI_SILENT; 22255 com->uscsi_timeout = un->un_reserve_release_time; 22256 com->uscsi_cdblen = CDB_GROUP0; 22257 com->uscsi_cdb = cdb; 22258 if (cmd == SD_RELEASE) { 22259 cdb[0] = SCMD_RELEASE; 22260 } else { 22261 cdb[0] = SCMD_RESERVE; 22262 } 22263 22264 /* Send the command. */ 22265 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22266 SD_PATH_STANDARD); 22267 22268 /* 22269 * "break" a reservation that is held by another host, by issuing a 22270 * reset if priority reserve is desired, and we could not get the 22271 * device. 22272 */ 22273 if ((cmd == SD_PRIORITY_RESERVE) && 22274 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22275 /* 22276 * First try to reset the LUN. If we cannot, then try a target 22277 * reset, followed by a bus reset if the target reset fails. 22278 */ 22279 int reset_retval = 0; 22280 if (un->un_f_lun_reset_enabled == TRUE) { 22281 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22282 } 22283 if (reset_retval == 0) { 22284 /* The LUN reset either failed or was not issued */ 22285 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22286 } 22287 if ((reset_retval == 0) && 22288 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22289 rval = EIO; 22290 kmem_free(com, sizeof (*com)); 22291 return (rval); 22292 } 22293 22294 bzero(com, sizeof (struct uscsi_cmd)); 22295 com->uscsi_flags = USCSI_SILENT; 22296 com->uscsi_cdb = cdb; 22297 com->uscsi_cdblen = CDB_GROUP0; 22298 com->uscsi_timeout = 5; 22299 22300 /* 22301 * Reissue the last reserve command, this time without request 22302 * sense. Assume that it is just a regular reserve command. 22303 */ 22304 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22305 SD_PATH_STANDARD); 22306 } 22307 22308 /* Return an error if still getting a reservation conflict. */ 22309 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22310 rval = EACCES; 22311 } 22312 22313 kmem_free(com, sizeof (*com)); 22314 return (rval); 22315 } 22316 22317 22318 #define SD_NDUMP_RETRIES 12 22319 /* 22320 * System Crash Dump routine 22321 */ 22322 22323 static int 22324 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22325 { 22326 int instance; 22327 int partition; 22328 int i; 22329 int err; 22330 struct sd_lun *un; 22331 struct scsi_pkt *wr_pktp; 22332 struct buf *wr_bp; 22333 struct buf wr_buf; 22334 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22335 daddr_t tgt_blkno; /* rmw - blkno for target */ 22336 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22337 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22338 size_t io_start_offset; 22339 int doing_rmw = FALSE; 22340 int rval; 22341 #if defined(__i386) || defined(__amd64) 22342 ssize_t dma_resid; 22343 daddr_t oblkno; 22344 #endif 22345 diskaddr_t nblks = 0; 22346 diskaddr_t start_block; 22347 22348 instance = SDUNIT(dev); 22349 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22350 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22351 return (ENXIO); 22352 } 22353 22354 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22355 22356 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22357 22358 partition = SDPART(dev); 22359 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22360 22361 /* Validate blocks to dump at against partition size. */ 22362 22363 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22364 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22365 22366 if ((blkno + nblk) > nblks) { 22367 SD_TRACE(SD_LOG_DUMP, un, 22368 "sddump: dump range larger than partition: " 22369 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22370 blkno, nblk, nblks); 22371 return (EINVAL); 22372 } 22373 22374 mutex_enter(&un->un_pm_mutex); 22375 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22376 struct scsi_pkt *start_pktp; 22377 22378 mutex_exit(&un->un_pm_mutex); 22379 22380 /* 22381 * use pm framework to power on HBA 1st 22382 */ 22383 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22384 22385 /* 22386 * Dump no long uses sdpower to power on a device, it's 22387 * in-line here so it can be done in polled mode. 22388 */ 22389 22390 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22391 22392 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22393 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22394 22395 if (start_pktp == NULL) { 22396 /* We were not given a SCSI packet, fail. */ 22397 return (EIO); 22398 } 22399 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22400 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22401 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22402 start_pktp->pkt_flags = FLAG_NOINTR; 22403 22404 mutex_enter(SD_MUTEX(un)); 22405 SD_FILL_SCSI1_LUN(un, start_pktp); 22406 mutex_exit(SD_MUTEX(un)); 22407 /* 22408 * Scsi_poll returns 0 (success) if the command completes and 22409 * the status block is STATUS_GOOD. 22410 */ 22411 if (sd_scsi_poll(un, start_pktp) != 0) { 22412 scsi_destroy_pkt(start_pktp); 22413 return (EIO); 22414 } 22415 scsi_destroy_pkt(start_pktp); 22416 (void) sd_ddi_pm_resume(un); 22417 } else { 22418 mutex_exit(&un->un_pm_mutex); 22419 } 22420 22421 mutex_enter(SD_MUTEX(un)); 22422 un->un_throttle = 0; 22423 22424 /* 22425 * The first time through, reset the specific target device. 22426 * However, when cpr calls sddump we know that sd is in a 22427 * a good state so no bus reset is required. 22428 * Clear sense data via Request Sense cmd. 22429 * In sddump we don't care about allow_bus_device_reset anymore 22430 */ 22431 22432 if ((un->un_state != SD_STATE_SUSPENDED) && 22433 (un->un_state != SD_STATE_DUMPING)) { 22434 22435 New_state(un, SD_STATE_DUMPING); 22436 22437 if (un->un_f_is_fibre == FALSE) { 22438 mutex_exit(SD_MUTEX(un)); 22439 /* 22440 * Attempt a bus reset for parallel scsi. 22441 * 22442 * Note: A bus reset is required because on some host 22443 * systems (i.e. E420R) a bus device reset is 22444 * insufficient to reset the state of the target. 22445 * 22446 * Note: Don't issue the reset for fibre-channel, 22447 * because this tends to hang the bus (loop) for 22448 * too long while everyone is logging out and in 22449 * and the deadman timer for dumping will fire 22450 * before the dump is complete. 22451 */ 22452 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22453 mutex_enter(SD_MUTEX(un)); 22454 Restore_state(un); 22455 mutex_exit(SD_MUTEX(un)); 22456 return (EIO); 22457 } 22458 22459 /* Delay to give the device some recovery time. */ 22460 drv_usecwait(10000); 22461 22462 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22463 SD_INFO(SD_LOG_DUMP, un, 22464 "sddump: sd_send_polled_RQS failed\n"); 22465 } 22466 mutex_enter(SD_MUTEX(un)); 22467 } 22468 } 22469 22470 /* 22471 * Convert the partition-relative block number to a 22472 * disk physical block number. 22473 */ 22474 blkno += start_block; 22475 22476 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22477 22478 22479 /* 22480 * Check if the device has a non-512 block size. 22481 */ 22482 wr_bp = NULL; 22483 if (NOT_DEVBSIZE(un)) { 22484 tgt_byte_offset = blkno * un->un_sys_blocksize; 22485 tgt_byte_count = nblk * un->un_sys_blocksize; 22486 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22487 (tgt_byte_count % un->un_tgt_blocksize)) { 22488 doing_rmw = TRUE; 22489 /* 22490 * Calculate the block number and number of block 22491 * in terms of the media block size. 22492 */ 22493 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22494 tgt_nblk = 22495 ((tgt_byte_offset + tgt_byte_count + 22496 (un->un_tgt_blocksize - 1)) / 22497 un->un_tgt_blocksize) - tgt_blkno; 22498 22499 /* 22500 * Invoke the routine which is going to do read part 22501 * of read-modify-write. 22502 * Note that this routine returns a pointer to 22503 * a valid bp in wr_bp. 22504 */ 22505 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22506 &wr_bp); 22507 if (err) { 22508 mutex_exit(SD_MUTEX(un)); 22509 return (err); 22510 } 22511 /* 22512 * Offset is being calculated as - 22513 * (original block # * system block size) - 22514 * (new block # * target block size) 22515 */ 22516 io_start_offset = 22517 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22518 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22519 22520 ASSERT((io_start_offset >= 0) && 22521 (io_start_offset < un->un_tgt_blocksize)); 22522 /* 22523 * Do the modify portion of read modify write. 22524 */ 22525 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22526 (size_t)nblk * un->un_sys_blocksize); 22527 } else { 22528 doing_rmw = FALSE; 22529 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22530 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22531 } 22532 22533 /* Convert blkno and nblk to target blocks */ 22534 blkno = tgt_blkno; 22535 nblk = tgt_nblk; 22536 } else { 22537 wr_bp = &wr_buf; 22538 bzero(wr_bp, sizeof (struct buf)); 22539 wr_bp->b_flags = B_BUSY; 22540 wr_bp->b_un.b_addr = addr; 22541 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22542 wr_bp->b_resid = 0; 22543 } 22544 22545 mutex_exit(SD_MUTEX(un)); 22546 22547 /* 22548 * Obtain a SCSI packet for the write command. 22549 * It should be safe to call the allocator here without 22550 * worrying about being locked for DVMA mapping because 22551 * the address we're passed is already a DVMA mapping 22552 * 22553 * We are also not going to worry about semaphore ownership 22554 * in the dump buffer. Dumping is single threaded at present. 22555 */ 22556 22557 wr_pktp = NULL; 22558 22559 #if defined(__i386) || defined(__amd64) 22560 dma_resid = wr_bp->b_bcount; 22561 oblkno = blkno; 22562 while (dma_resid != 0) { 22563 #endif 22564 22565 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22566 wr_bp->b_flags &= ~B_ERROR; 22567 22568 #if defined(__i386) || defined(__amd64) 22569 blkno = oblkno + 22570 ((wr_bp->b_bcount - dma_resid) / 22571 un->un_tgt_blocksize); 22572 nblk = dma_resid / un->un_tgt_blocksize; 22573 22574 if (wr_pktp) { 22575 /* Partial DMA transfers after initial transfer */ 22576 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22577 blkno, nblk); 22578 } else { 22579 /* Initial transfer */ 22580 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22581 un->un_pkt_flags, NULL_FUNC, NULL, 22582 blkno, nblk); 22583 } 22584 #else 22585 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22586 0, NULL_FUNC, NULL, blkno, nblk); 22587 #endif 22588 22589 if (rval == 0) { 22590 /* We were given a SCSI packet, continue. */ 22591 break; 22592 } 22593 22594 if (i == 0) { 22595 if (wr_bp->b_flags & B_ERROR) { 22596 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22597 "no resources for dumping; " 22598 "error code: 0x%x, retrying", 22599 geterror(wr_bp)); 22600 } else { 22601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22602 "no resources for dumping; retrying"); 22603 } 22604 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22605 if (wr_bp->b_flags & B_ERROR) { 22606 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22607 "no resources for dumping; error code: " 22608 "0x%x, retrying\n", geterror(wr_bp)); 22609 } 22610 } else { 22611 if (wr_bp->b_flags & B_ERROR) { 22612 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22613 "no resources for dumping; " 22614 "error code: 0x%x, retries failed, " 22615 "giving up.\n", geterror(wr_bp)); 22616 } else { 22617 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22618 "no resources for dumping; " 22619 "retries failed, giving up.\n"); 22620 } 22621 mutex_enter(SD_MUTEX(un)); 22622 Restore_state(un); 22623 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22624 mutex_exit(SD_MUTEX(un)); 22625 scsi_free_consistent_buf(wr_bp); 22626 } else { 22627 mutex_exit(SD_MUTEX(un)); 22628 } 22629 return (EIO); 22630 } 22631 drv_usecwait(10000); 22632 } 22633 22634 #if defined(__i386) || defined(__amd64) 22635 /* 22636 * save the resid from PARTIAL_DMA 22637 */ 22638 dma_resid = wr_pktp->pkt_resid; 22639 if (dma_resid != 0) 22640 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22641 wr_pktp->pkt_resid = 0; 22642 #endif 22643 22644 /* SunBug 1222170 */ 22645 wr_pktp->pkt_flags = FLAG_NOINTR; 22646 22647 err = EIO; 22648 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22649 22650 /* 22651 * Scsi_poll returns 0 (success) if the command completes and 22652 * the status block is STATUS_GOOD. We should only check 22653 * errors if this condition is not true. Even then we should 22654 * send our own request sense packet only if we have a check 22655 * condition and auto request sense has not been performed by 22656 * the hba. 22657 */ 22658 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22659 22660 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22661 (wr_pktp->pkt_resid == 0)) { 22662 err = SD_SUCCESS; 22663 break; 22664 } 22665 22666 /* 22667 * Check CMD_DEV_GONE 1st, give up if device is gone. 22668 */ 22669 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22670 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22671 "Device is gone\n"); 22672 break; 22673 } 22674 22675 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22676 SD_INFO(SD_LOG_DUMP, un, 22677 "sddump: write failed with CHECK, try # %d\n", i); 22678 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22679 (void) sd_send_polled_RQS(un); 22680 } 22681 22682 continue; 22683 } 22684 22685 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22686 int reset_retval = 0; 22687 22688 SD_INFO(SD_LOG_DUMP, un, 22689 "sddump: write failed with BUSY, try # %d\n", i); 22690 22691 if (un->un_f_lun_reset_enabled == TRUE) { 22692 reset_retval = scsi_reset(SD_ADDRESS(un), 22693 RESET_LUN); 22694 } 22695 if (reset_retval == 0) { 22696 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22697 } 22698 (void) sd_send_polled_RQS(un); 22699 22700 } else { 22701 SD_INFO(SD_LOG_DUMP, un, 22702 "sddump: write failed with 0x%x, try # %d\n", 22703 SD_GET_PKT_STATUS(wr_pktp), i); 22704 mutex_enter(SD_MUTEX(un)); 22705 sd_reset_target(un, wr_pktp); 22706 mutex_exit(SD_MUTEX(un)); 22707 } 22708 22709 /* 22710 * If we are not getting anywhere with lun/target resets, 22711 * let's reset the bus. 22712 */ 22713 if (i == SD_NDUMP_RETRIES/2) { 22714 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22715 (void) sd_send_polled_RQS(un); 22716 } 22717 22718 } 22719 #if defined(__i386) || defined(__amd64) 22720 } /* dma_resid */ 22721 #endif 22722 22723 scsi_destroy_pkt(wr_pktp); 22724 mutex_enter(SD_MUTEX(un)); 22725 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22726 mutex_exit(SD_MUTEX(un)); 22727 scsi_free_consistent_buf(wr_bp); 22728 } else { 22729 mutex_exit(SD_MUTEX(un)); 22730 } 22731 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22732 return (err); 22733 } 22734 22735 /* 22736 * Function: sd_scsi_poll() 22737 * 22738 * Description: This is a wrapper for the scsi_poll call. 22739 * 22740 * Arguments: sd_lun - The unit structure 22741 * scsi_pkt - The scsi packet being sent to the device. 22742 * 22743 * Return Code: 0 - Command completed successfully with good status 22744 * -1 - Command failed. This could indicate a check condition 22745 * or other status value requiring recovery action. 22746 * 22747 */ 22748 22749 static int 22750 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22751 { 22752 int status; 22753 22754 ASSERT(un != NULL); 22755 ASSERT(!mutex_owned(SD_MUTEX(un))); 22756 ASSERT(pktp != NULL); 22757 22758 status = SD_SUCCESS; 22759 22760 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22761 pktp->pkt_flags |= un->un_tagflags; 22762 pktp->pkt_flags &= ~FLAG_NODISCON; 22763 } 22764 22765 status = sd_ddi_scsi_poll(pktp); 22766 /* 22767 * Scsi_poll returns 0 (success) if the command completes and the 22768 * status block is STATUS_GOOD. We should only check errors if this 22769 * condition is not true. Even then we should send our own request 22770 * sense packet only if we have a check condition and auto 22771 * request sense has not been performed by the hba. 22772 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22773 */ 22774 if ((status != SD_SUCCESS) && 22775 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22776 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22777 (pktp->pkt_reason != CMD_DEV_GONE)) 22778 (void) sd_send_polled_RQS(un); 22779 22780 return (status); 22781 } 22782 22783 /* 22784 * Function: sd_send_polled_RQS() 22785 * 22786 * Description: This sends the request sense command to a device. 22787 * 22788 * Arguments: sd_lun - The unit structure 22789 * 22790 * Return Code: 0 - Command completed successfully with good status 22791 * -1 - Command failed. 22792 * 22793 */ 22794 22795 static int 22796 sd_send_polled_RQS(struct sd_lun *un) 22797 { 22798 int ret_val; 22799 struct scsi_pkt *rqs_pktp; 22800 struct buf *rqs_bp; 22801 22802 ASSERT(un != NULL); 22803 ASSERT(!mutex_owned(SD_MUTEX(un))); 22804 22805 ret_val = SD_SUCCESS; 22806 22807 rqs_pktp = un->un_rqs_pktp; 22808 rqs_bp = un->un_rqs_bp; 22809 22810 mutex_enter(SD_MUTEX(un)); 22811 22812 if (un->un_sense_isbusy) { 22813 ret_val = SD_FAILURE; 22814 mutex_exit(SD_MUTEX(un)); 22815 return (ret_val); 22816 } 22817 22818 /* 22819 * If the request sense buffer (and packet) is not in use, 22820 * let's set the un_sense_isbusy and send our packet 22821 */ 22822 un->un_sense_isbusy = 1; 22823 rqs_pktp->pkt_resid = 0; 22824 rqs_pktp->pkt_reason = 0; 22825 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22826 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22827 22828 mutex_exit(SD_MUTEX(un)); 22829 22830 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22831 " 0x%p\n", rqs_bp->b_un.b_addr); 22832 22833 /* 22834 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22835 * axle - it has a call into us! 22836 */ 22837 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22838 SD_INFO(SD_LOG_COMMON, un, 22839 "sd_send_polled_RQS: RQS failed\n"); 22840 } 22841 22842 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22843 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22844 22845 mutex_enter(SD_MUTEX(un)); 22846 un->un_sense_isbusy = 0; 22847 mutex_exit(SD_MUTEX(un)); 22848 22849 return (ret_val); 22850 } 22851 22852 /* 22853 * Defines needed for localized version of the scsi_poll routine. 22854 */ 22855 #define SD_CSEC 10000 /* usecs */ 22856 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22857 22858 22859 /* 22860 * Function: sd_ddi_scsi_poll() 22861 * 22862 * Description: Localized version of the scsi_poll routine. The purpose is to 22863 * send a scsi_pkt to a device as a polled command. This version 22864 * is to ensure more robust handling of transport errors. 22865 * Specifically this routine cures not ready, coming ready 22866 * transition for power up and reset of sonoma's. This can take 22867 * up to 45 seconds for power-on and 20 seconds for reset of a 22868 * sonoma lun. 22869 * 22870 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22871 * 22872 * Return Code: 0 - Command completed successfully with good status 22873 * -1 - Command failed. 22874 * 22875 */ 22876 22877 static int 22878 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22879 { 22880 int busy_count; 22881 int timeout; 22882 int rval = SD_FAILURE; 22883 int savef; 22884 uint8_t *sensep; 22885 long savet; 22886 void (*savec)(); 22887 /* 22888 * The following is defined in machdep.c and is used in determining if 22889 * the scsi transport system will do polled I/O instead of interrupt 22890 * I/O when called from xx_dump(). 22891 */ 22892 extern int do_polled_io; 22893 22894 /* 22895 * save old flags in pkt, to restore at end 22896 */ 22897 savef = pkt->pkt_flags; 22898 savec = pkt->pkt_comp; 22899 savet = pkt->pkt_time; 22900 22901 pkt->pkt_flags |= FLAG_NOINTR; 22902 22903 /* 22904 * XXX there is nothing in the SCSA spec that states that we should not 22905 * do a callback for polled cmds; however, removing this will break sd 22906 * and probably other target drivers 22907 */ 22908 pkt->pkt_comp = NULL; 22909 22910 /* 22911 * we don't like a polled command without timeout. 22912 * 60 seconds seems long enough. 22913 */ 22914 if (pkt->pkt_time == 0) { 22915 pkt->pkt_time = SCSI_POLL_TIMEOUT; 22916 } 22917 22918 /* 22919 * Send polled cmd. 22920 * 22921 * We do some error recovery for various errors. Tran_busy, 22922 * queue full, and non-dispatched commands are retried every 10 msec. 22923 * as they are typically transient failures. Busy status and Not 22924 * Ready are retried every second as this status takes a while to 22925 * change. Unit attention is retried for pkt_time (60) times 22926 * with no delay. 22927 */ 22928 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 22929 22930 for (busy_count = 0; busy_count < timeout; busy_count++) { 22931 int rc; 22932 int poll_delay; 22933 22934 /* 22935 * Initialize pkt status variables. 22936 */ 22937 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 22938 22939 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 22940 if (rc != TRAN_BUSY) { 22941 /* Transport failed - give up. */ 22942 break; 22943 } else { 22944 /* Transport busy - try again. */ 22945 poll_delay = 1 * SD_CSEC; /* 10 msec */ 22946 } 22947 } else { 22948 /* 22949 * Transport accepted - check pkt status. 22950 */ 22951 rc = (*pkt->pkt_scbp) & STATUS_MASK; 22952 if (pkt->pkt_reason == CMD_CMPLT && 22953 rc == STATUS_CHECK && 22954 pkt->pkt_state & STATE_ARQ_DONE) { 22955 struct scsi_arq_status *arqstat = 22956 (struct scsi_arq_status *)(pkt->pkt_scbp); 22957 22958 sensep = (uint8_t *)&arqstat->sts_sensedata; 22959 } else { 22960 sensep = NULL; 22961 } 22962 22963 if ((pkt->pkt_reason == CMD_CMPLT) && 22964 (rc == STATUS_GOOD)) { 22965 /* No error - we're done */ 22966 rval = SD_SUCCESS; 22967 break; 22968 22969 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 22970 /* Lost connection - give up */ 22971 break; 22972 22973 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 22974 (pkt->pkt_state == 0)) { 22975 /* Pkt not dispatched - try again. */ 22976 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22977 22978 } else if ((pkt->pkt_reason == CMD_CMPLT) && 22979 (rc == STATUS_QFULL)) { 22980 /* Queue full - try again. */ 22981 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22982 22983 } else if ((pkt->pkt_reason == CMD_CMPLT) && 22984 (rc == STATUS_BUSY)) { 22985 /* Busy - try again. */ 22986 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 22987 busy_count += (SD_SEC_TO_CSEC - 1); 22988 22989 } else if ((sensep != NULL) && 22990 (scsi_sense_key(sensep) == 22991 KEY_UNIT_ATTENTION)) { 22992 /* Unit Attention - try again */ 22993 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 22994 continue; 22995 22996 } else if ((sensep != NULL) && 22997 (scsi_sense_key(sensep) == KEY_NOT_READY) && 22998 (scsi_sense_asc(sensep) == 0x04) && 22999 (scsi_sense_ascq(sensep) == 0x01)) { 23000 /* Not ready -> ready - try again. */ 23001 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23002 busy_count += (SD_SEC_TO_CSEC - 1); 23003 23004 } else { 23005 /* BAD status - give up. */ 23006 break; 23007 } 23008 } 23009 23010 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23011 !do_polled_io) { 23012 delay(drv_usectohz(poll_delay)); 23013 } else { 23014 /* we busy wait during cpr_dump or interrupt threads */ 23015 drv_usecwait(poll_delay); 23016 } 23017 } 23018 23019 pkt->pkt_flags = savef; 23020 pkt->pkt_comp = savec; 23021 pkt->pkt_time = savet; 23022 return (rval); 23023 } 23024 23025 23026 /* 23027 * Function: sd_persistent_reservation_in_read_keys 23028 * 23029 * Description: This routine is the driver entry point for handling CD-ROM 23030 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23031 * by sending the SCSI-3 PRIN commands to the device. 23032 * Processes the read keys command response by copying the 23033 * reservation key information into the user provided buffer. 23034 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23035 * 23036 * Arguments: un - Pointer to soft state struct for the target. 23037 * usrp - user provided pointer to multihost Persistent In Read 23038 * Keys structure (mhioc_inkeys_t) 23039 * flag - this argument is a pass through to ddi_copyxxx() 23040 * directly from the mode argument of ioctl(). 23041 * 23042 * Return Code: 0 - Success 23043 * EACCES 23044 * ENOTSUP 23045 * errno return code from sd_send_scsi_cmd() 23046 * 23047 * Context: Can sleep. Does not return until command is completed. 23048 */ 23049 23050 static int 23051 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23052 mhioc_inkeys_t *usrp, int flag) 23053 { 23054 #ifdef _MULTI_DATAMODEL 23055 struct mhioc_key_list32 li32; 23056 #endif 23057 sd_prin_readkeys_t *in; 23058 mhioc_inkeys_t *ptr; 23059 mhioc_key_list_t li; 23060 uchar_t *data_bufp; 23061 int data_len; 23062 int rval; 23063 size_t copysz; 23064 23065 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23066 return (EINVAL); 23067 } 23068 bzero(&li, sizeof (mhioc_key_list_t)); 23069 23070 /* 23071 * Get the listsize from user 23072 */ 23073 #ifdef _MULTI_DATAMODEL 23074 23075 switch (ddi_model_convert_from(flag & FMODELS)) { 23076 case DDI_MODEL_ILP32: 23077 copysz = sizeof (struct mhioc_key_list32); 23078 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23079 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23080 "sd_persistent_reservation_in_read_keys: " 23081 "failed ddi_copyin: mhioc_key_list32_t\n"); 23082 rval = EFAULT; 23083 goto done; 23084 } 23085 li.listsize = li32.listsize; 23086 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23087 break; 23088 23089 case DDI_MODEL_NONE: 23090 copysz = sizeof (mhioc_key_list_t); 23091 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23092 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23093 "sd_persistent_reservation_in_read_keys: " 23094 "failed ddi_copyin: mhioc_key_list_t\n"); 23095 rval = EFAULT; 23096 goto done; 23097 } 23098 break; 23099 } 23100 23101 #else /* ! _MULTI_DATAMODEL */ 23102 copysz = sizeof (mhioc_key_list_t); 23103 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23104 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23105 "sd_persistent_reservation_in_read_keys: " 23106 "failed ddi_copyin: mhioc_key_list_t\n"); 23107 rval = EFAULT; 23108 goto done; 23109 } 23110 #endif 23111 23112 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23113 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23114 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23115 23116 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23117 data_len, data_bufp)) != 0) { 23118 goto done; 23119 } 23120 in = (sd_prin_readkeys_t *)data_bufp; 23121 ptr->generation = BE_32(in->generation); 23122 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23123 23124 /* 23125 * Return the min(listsize, listlen) keys 23126 */ 23127 #ifdef _MULTI_DATAMODEL 23128 23129 switch (ddi_model_convert_from(flag & FMODELS)) { 23130 case DDI_MODEL_ILP32: 23131 li32.listlen = li.listlen; 23132 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23133 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23134 "sd_persistent_reservation_in_read_keys: " 23135 "failed ddi_copyout: mhioc_key_list32_t\n"); 23136 rval = EFAULT; 23137 goto done; 23138 } 23139 break; 23140 23141 case DDI_MODEL_NONE: 23142 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23143 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23144 "sd_persistent_reservation_in_read_keys: " 23145 "failed ddi_copyout: mhioc_key_list_t\n"); 23146 rval = EFAULT; 23147 goto done; 23148 } 23149 break; 23150 } 23151 23152 #else /* ! _MULTI_DATAMODEL */ 23153 23154 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23155 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23156 "sd_persistent_reservation_in_read_keys: " 23157 "failed ddi_copyout: mhioc_key_list_t\n"); 23158 rval = EFAULT; 23159 goto done; 23160 } 23161 23162 #endif /* _MULTI_DATAMODEL */ 23163 23164 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23165 li.listsize * MHIOC_RESV_KEY_SIZE); 23166 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23167 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23168 "sd_persistent_reservation_in_read_keys: " 23169 "failed ddi_copyout: keylist\n"); 23170 rval = EFAULT; 23171 } 23172 done: 23173 kmem_free(data_bufp, data_len); 23174 return (rval); 23175 } 23176 23177 23178 /* 23179 * Function: sd_persistent_reservation_in_read_resv 23180 * 23181 * Description: This routine is the driver entry point for handling CD-ROM 23182 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23183 * by sending the SCSI-3 PRIN commands to the device. 23184 * Process the read persistent reservations command response by 23185 * copying the reservation information into the user provided 23186 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23187 * 23188 * Arguments: un - Pointer to soft state struct for the target. 23189 * usrp - user provided pointer to multihost Persistent In Read 23190 * Keys structure (mhioc_inkeys_t) 23191 * flag - this argument is a pass through to ddi_copyxxx() 23192 * directly from the mode argument of ioctl(). 23193 * 23194 * Return Code: 0 - Success 23195 * EACCES 23196 * ENOTSUP 23197 * errno return code from sd_send_scsi_cmd() 23198 * 23199 * Context: Can sleep. Does not return until command is completed. 23200 */ 23201 23202 static int 23203 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23204 mhioc_inresvs_t *usrp, int flag) 23205 { 23206 #ifdef _MULTI_DATAMODEL 23207 struct mhioc_resv_desc_list32 resvlist32; 23208 #endif 23209 sd_prin_readresv_t *in; 23210 mhioc_inresvs_t *ptr; 23211 sd_readresv_desc_t *readresv_ptr; 23212 mhioc_resv_desc_list_t resvlist; 23213 mhioc_resv_desc_t resvdesc; 23214 uchar_t *data_bufp; 23215 int data_len; 23216 int rval; 23217 int i; 23218 size_t copysz; 23219 mhioc_resv_desc_t *bufp; 23220 23221 if ((ptr = usrp) == NULL) { 23222 return (EINVAL); 23223 } 23224 23225 /* 23226 * Get the listsize from user 23227 */ 23228 #ifdef _MULTI_DATAMODEL 23229 switch (ddi_model_convert_from(flag & FMODELS)) { 23230 case DDI_MODEL_ILP32: 23231 copysz = sizeof (struct mhioc_resv_desc_list32); 23232 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23233 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23234 "sd_persistent_reservation_in_read_resv: " 23235 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23236 rval = EFAULT; 23237 goto done; 23238 } 23239 resvlist.listsize = resvlist32.listsize; 23240 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23241 break; 23242 23243 case DDI_MODEL_NONE: 23244 copysz = sizeof (mhioc_resv_desc_list_t); 23245 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23246 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23247 "sd_persistent_reservation_in_read_resv: " 23248 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23249 rval = EFAULT; 23250 goto done; 23251 } 23252 break; 23253 } 23254 #else /* ! _MULTI_DATAMODEL */ 23255 copysz = sizeof (mhioc_resv_desc_list_t); 23256 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23257 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23258 "sd_persistent_reservation_in_read_resv: " 23259 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23260 rval = EFAULT; 23261 goto done; 23262 } 23263 #endif /* ! _MULTI_DATAMODEL */ 23264 23265 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23266 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23267 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23268 23269 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23270 data_len, data_bufp)) != 0) { 23271 goto done; 23272 } 23273 in = (sd_prin_readresv_t *)data_bufp; 23274 ptr->generation = BE_32(in->generation); 23275 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23276 23277 /* 23278 * Return the min(listsize, listlen( keys 23279 */ 23280 #ifdef _MULTI_DATAMODEL 23281 23282 switch (ddi_model_convert_from(flag & FMODELS)) { 23283 case DDI_MODEL_ILP32: 23284 resvlist32.listlen = resvlist.listlen; 23285 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23286 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23287 "sd_persistent_reservation_in_read_resv: " 23288 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23289 rval = EFAULT; 23290 goto done; 23291 } 23292 break; 23293 23294 case DDI_MODEL_NONE: 23295 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23296 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23297 "sd_persistent_reservation_in_read_resv: " 23298 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23299 rval = EFAULT; 23300 goto done; 23301 } 23302 break; 23303 } 23304 23305 #else /* ! _MULTI_DATAMODEL */ 23306 23307 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23308 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23309 "sd_persistent_reservation_in_read_resv: " 23310 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23311 rval = EFAULT; 23312 goto done; 23313 } 23314 23315 #endif /* ! _MULTI_DATAMODEL */ 23316 23317 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23318 bufp = resvlist.list; 23319 copysz = sizeof (mhioc_resv_desc_t); 23320 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23321 i++, readresv_ptr++, bufp++) { 23322 23323 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23324 MHIOC_RESV_KEY_SIZE); 23325 resvdesc.type = readresv_ptr->type; 23326 resvdesc.scope = readresv_ptr->scope; 23327 resvdesc.scope_specific_addr = 23328 BE_32(readresv_ptr->scope_specific_addr); 23329 23330 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23331 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23332 "sd_persistent_reservation_in_read_resv: " 23333 "failed ddi_copyout: resvlist\n"); 23334 rval = EFAULT; 23335 goto done; 23336 } 23337 } 23338 done: 23339 kmem_free(data_bufp, data_len); 23340 return (rval); 23341 } 23342 23343 23344 /* 23345 * Function: sr_change_blkmode() 23346 * 23347 * Description: This routine is the driver entry point for handling CD-ROM 23348 * block mode ioctl requests. Support for returning and changing 23349 * the current block size in use by the device is implemented. The 23350 * LBA size is changed via a MODE SELECT Block Descriptor. 23351 * 23352 * This routine issues a mode sense with an allocation length of 23353 * 12 bytes for the mode page header and a single block descriptor. 23354 * 23355 * Arguments: dev - the device 'dev_t' 23356 * cmd - the request type; one of CDROMGBLKMODE (get) or 23357 * CDROMSBLKMODE (set) 23358 * data - current block size or requested block size 23359 * flag - this argument is a pass through to ddi_copyxxx() directly 23360 * from the mode argument of ioctl(). 23361 * 23362 * Return Code: the code returned by sd_send_scsi_cmd() 23363 * EINVAL if invalid arguments are provided 23364 * EFAULT if ddi_copyxxx() fails 23365 * ENXIO if fail ddi_get_soft_state 23366 * EIO if invalid mode sense block descriptor length 23367 * 23368 */ 23369 23370 static int 23371 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23372 { 23373 struct sd_lun *un = NULL; 23374 struct mode_header *sense_mhp, *select_mhp; 23375 struct block_descriptor *sense_desc, *select_desc; 23376 int current_bsize; 23377 int rval = EINVAL; 23378 uchar_t *sense = NULL; 23379 uchar_t *select = NULL; 23380 23381 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23382 23383 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23384 return (ENXIO); 23385 } 23386 23387 /* 23388 * The block length is changed via the Mode Select block descriptor, the 23389 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23390 * required as part of this routine. Therefore the mode sense allocation 23391 * length is specified to be the length of a mode page header and a 23392 * block descriptor. 23393 */ 23394 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23395 23396 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23397 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23398 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23399 "sr_change_blkmode: Mode Sense Failed\n"); 23400 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23401 return (rval); 23402 } 23403 23404 /* Check the block descriptor len to handle only 1 block descriptor */ 23405 sense_mhp = (struct mode_header *)sense; 23406 if ((sense_mhp->bdesc_length == 0) || 23407 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23408 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23409 "sr_change_blkmode: Mode Sense returned invalid block" 23410 " descriptor length\n"); 23411 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23412 return (EIO); 23413 } 23414 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23415 current_bsize = ((sense_desc->blksize_hi << 16) | 23416 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23417 23418 /* Process command */ 23419 switch (cmd) { 23420 case CDROMGBLKMODE: 23421 /* Return the block size obtained during the mode sense */ 23422 if (ddi_copyout(¤t_bsize, (void *)data, 23423 sizeof (int), flag) != 0) 23424 rval = EFAULT; 23425 break; 23426 case CDROMSBLKMODE: 23427 /* Validate the requested block size */ 23428 switch (data) { 23429 case CDROM_BLK_512: 23430 case CDROM_BLK_1024: 23431 case CDROM_BLK_2048: 23432 case CDROM_BLK_2056: 23433 case CDROM_BLK_2336: 23434 case CDROM_BLK_2340: 23435 case CDROM_BLK_2352: 23436 case CDROM_BLK_2368: 23437 case CDROM_BLK_2448: 23438 case CDROM_BLK_2646: 23439 case CDROM_BLK_2647: 23440 break; 23441 default: 23442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23443 "sr_change_blkmode: " 23444 "Block Size '%ld' Not Supported\n", data); 23445 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23446 return (EINVAL); 23447 } 23448 23449 /* 23450 * The current block size matches the requested block size so 23451 * there is no need to send the mode select to change the size 23452 */ 23453 if (current_bsize == data) { 23454 break; 23455 } 23456 23457 /* Build the select data for the requested block size */ 23458 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23459 select_mhp = (struct mode_header *)select; 23460 select_desc = 23461 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23462 /* 23463 * The LBA size is changed via the block descriptor, so the 23464 * descriptor is built according to the user data 23465 */ 23466 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23467 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23468 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23469 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23470 23471 /* Send the mode select for the requested block size */ 23472 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23473 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23474 SD_PATH_STANDARD)) != 0) { 23475 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23476 "sr_change_blkmode: Mode Select Failed\n"); 23477 /* 23478 * The mode select failed for the requested block size, 23479 * so reset the data for the original block size and 23480 * send it to the target. The error is indicated by the 23481 * return value for the failed mode select. 23482 */ 23483 select_desc->blksize_hi = sense_desc->blksize_hi; 23484 select_desc->blksize_mid = sense_desc->blksize_mid; 23485 select_desc->blksize_lo = sense_desc->blksize_lo; 23486 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23487 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23488 SD_PATH_STANDARD); 23489 } else { 23490 ASSERT(!mutex_owned(SD_MUTEX(un))); 23491 mutex_enter(SD_MUTEX(un)); 23492 sd_update_block_info(un, (uint32_t)data, 0); 23493 mutex_exit(SD_MUTEX(un)); 23494 } 23495 break; 23496 default: 23497 /* should not reach here, but check anyway */ 23498 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23499 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23500 rval = EINVAL; 23501 break; 23502 } 23503 23504 if (select) { 23505 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23506 } 23507 if (sense) { 23508 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23509 } 23510 return (rval); 23511 } 23512 23513 23514 /* 23515 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23516 * implement driver support for getting and setting the CD speed. The command 23517 * set used will be based on the device type. If the device has not been 23518 * identified as MMC the Toshiba vendor specific mode page will be used. If 23519 * the device is MMC but does not support the Real Time Streaming feature 23520 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23521 * be used to read the speed. 23522 */ 23523 23524 /* 23525 * Function: sr_change_speed() 23526 * 23527 * Description: This routine is the driver entry point for handling CD-ROM 23528 * drive speed ioctl requests for devices supporting the Toshiba 23529 * vendor specific drive speed mode page. Support for returning 23530 * and changing the current drive speed in use by the device is 23531 * implemented. 23532 * 23533 * Arguments: dev - the device 'dev_t' 23534 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23535 * CDROMSDRVSPEED (set) 23536 * data - current drive speed or requested drive speed 23537 * flag - this argument is a pass through to ddi_copyxxx() directly 23538 * from the mode argument of ioctl(). 23539 * 23540 * Return Code: the code returned by sd_send_scsi_cmd() 23541 * EINVAL if invalid arguments are provided 23542 * EFAULT if ddi_copyxxx() fails 23543 * ENXIO if fail ddi_get_soft_state 23544 * EIO if invalid mode sense block descriptor length 23545 */ 23546 23547 static int 23548 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23549 { 23550 struct sd_lun *un = NULL; 23551 struct mode_header *sense_mhp, *select_mhp; 23552 struct mode_speed *sense_page, *select_page; 23553 int current_speed; 23554 int rval = EINVAL; 23555 int bd_len; 23556 uchar_t *sense = NULL; 23557 uchar_t *select = NULL; 23558 23559 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23560 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23561 return (ENXIO); 23562 } 23563 23564 /* 23565 * Note: The drive speed is being modified here according to a Toshiba 23566 * vendor specific mode page (0x31). 23567 */ 23568 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23569 23570 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23571 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23572 SD_PATH_STANDARD)) != 0) { 23573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23574 "sr_change_speed: Mode Sense Failed\n"); 23575 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23576 return (rval); 23577 } 23578 sense_mhp = (struct mode_header *)sense; 23579 23580 /* Check the block descriptor len to handle only 1 block descriptor */ 23581 bd_len = sense_mhp->bdesc_length; 23582 if (bd_len > MODE_BLK_DESC_LENGTH) { 23583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23584 "sr_change_speed: Mode Sense returned invalid block " 23585 "descriptor length\n"); 23586 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23587 return (EIO); 23588 } 23589 23590 sense_page = (struct mode_speed *) 23591 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23592 current_speed = sense_page->speed; 23593 23594 /* Process command */ 23595 switch (cmd) { 23596 case CDROMGDRVSPEED: 23597 /* Return the drive speed obtained during the mode sense */ 23598 if (current_speed == 0x2) { 23599 current_speed = CDROM_TWELVE_SPEED; 23600 } 23601 if (ddi_copyout(¤t_speed, (void *)data, 23602 sizeof (int), flag) != 0) { 23603 rval = EFAULT; 23604 } 23605 break; 23606 case CDROMSDRVSPEED: 23607 /* Validate the requested drive speed */ 23608 switch ((uchar_t)data) { 23609 case CDROM_TWELVE_SPEED: 23610 data = 0x2; 23611 /*FALLTHROUGH*/ 23612 case CDROM_NORMAL_SPEED: 23613 case CDROM_DOUBLE_SPEED: 23614 case CDROM_QUAD_SPEED: 23615 case CDROM_MAXIMUM_SPEED: 23616 break; 23617 default: 23618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23619 "sr_change_speed: " 23620 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23621 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23622 return (EINVAL); 23623 } 23624 23625 /* 23626 * The current drive speed matches the requested drive speed so 23627 * there is no need to send the mode select to change the speed 23628 */ 23629 if (current_speed == data) { 23630 break; 23631 } 23632 23633 /* Build the select data for the requested drive speed */ 23634 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23635 select_mhp = (struct mode_header *)select; 23636 select_mhp->bdesc_length = 0; 23637 select_page = 23638 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23639 select_page = 23640 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23641 select_page->mode_page.code = CDROM_MODE_SPEED; 23642 select_page->mode_page.length = 2; 23643 select_page->speed = (uchar_t)data; 23644 23645 /* Send the mode select for the requested block size */ 23646 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23647 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23648 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23649 /* 23650 * The mode select failed for the requested drive speed, 23651 * so reset the data for the original drive speed and 23652 * send it to the target. The error is indicated by the 23653 * return value for the failed mode select. 23654 */ 23655 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23656 "sr_drive_speed: Mode Select Failed\n"); 23657 select_page->speed = sense_page->speed; 23658 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23659 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23660 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23661 } 23662 break; 23663 default: 23664 /* should not reach here, but check anyway */ 23665 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23666 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23667 rval = EINVAL; 23668 break; 23669 } 23670 23671 if (select) { 23672 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23673 } 23674 if (sense) { 23675 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23676 } 23677 23678 return (rval); 23679 } 23680 23681 23682 /* 23683 * Function: sr_atapi_change_speed() 23684 * 23685 * Description: This routine is the driver entry point for handling CD-ROM 23686 * drive speed ioctl requests for MMC devices that do not support 23687 * the Real Time Streaming feature (0x107). 23688 * 23689 * Note: This routine will use the SET SPEED command which may not 23690 * be supported by all devices. 23691 * 23692 * Arguments: dev- the device 'dev_t' 23693 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23694 * CDROMSDRVSPEED (set) 23695 * data- current drive speed or requested drive speed 23696 * flag- this argument is a pass through to ddi_copyxxx() directly 23697 * from the mode argument of ioctl(). 23698 * 23699 * Return Code: the code returned by sd_send_scsi_cmd() 23700 * EINVAL if invalid arguments are provided 23701 * EFAULT if ddi_copyxxx() fails 23702 * ENXIO if fail ddi_get_soft_state 23703 * EIO if invalid mode sense block descriptor length 23704 */ 23705 23706 static int 23707 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23708 { 23709 struct sd_lun *un; 23710 struct uscsi_cmd *com = NULL; 23711 struct mode_header_grp2 *sense_mhp; 23712 uchar_t *sense_page; 23713 uchar_t *sense = NULL; 23714 char cdb[CDB_GROUP5]; 23715 int bd_len; 23716 int current_speed = 0; 23717 int max_speed = 0; 23718 int rval; 23719 23720 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23721 23722 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23723 return (ENXIO); 23724 } 23725 23726 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23727 23728 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23729 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23730 SD_PATH_STANDARD)) != 0) { 23731 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23732 "sr_atapi_change_speed: Mode Sense Failed\n"); 23733 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23734 return (rval); 23735 } 23736 23737 /* Check the block descriptor len to handle only 1 block descriptor */ 23738 sense_mhp = (struct mode_header_grp2 *)sense; 23739 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23740 if (bd_len > MODE_BLK_DESC_LENGTH) { 23741 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23742 "sr_atapi_change_speed: Mode Sense returned invalid " 23743 "block descriptor length\n"); 23744 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23745 return (EIO); 23746 } 23747 23748 /* Calculate the current and maximum drive speeds */ 23749 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23750 current_speed = (sense_page[14] << 8) | sense_page[15]; 23751 max_speed = (sense_page[8] << 8) | sense_page[9]; 23752 23753 /* Process the command */ 23754 switch (cmd) { 23755 case CDROMGDRVSPEED: 23756 current_speed /= SD_SPEED_1X; 23757 if (ddi_copyout(¤t_speed, (void *)data, 23758 sizeof (int), flag) != 0) 23759 rval = EFAULT; 23760 break; 23761 case CDROMSDRVSPEED: 23762 /* Convert the speed code to KB/sec */ 23763 switch ((uchar_t)data) { 23764 case CDROM_NORMAL_SPEED: 23765 current_speed = SD_SPEED_1X; 23766 break; 23767 case CDROM_DOUBLE_SPEED: 23768 current_speed = 2 * SD_SPEED_1X; 23769 break; 23770 case CDROM_QUAD_SPEED: 23771 current_speed = 4 * SD_SPEED_1X; 23772 break; 23773 case CDROM_TWELVE_SPEED: 23774 current_speed = 12 * SD_SPEED_1X; 23775 break; 23776 case CDROM_MAXIMUM_SPEED: 23777 current_speed = 0xffff; 23778 break; 23779 default: 23780 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23781 "sr_atapi_change_speed: invalid drive speed %d\n", 23782 (uchar_t)data); 23783 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23784 return (EINVAL); 23785 } 23786 23787 /* Check the request against the drive's max speed. */ 23788 if (current_speed != 0xffff) { 23789 if (current_speed > max_speed) { 23790 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23791 return (EINVAL); 23792 } 23793 } 23794 23795 /* 23796 * Build and send the SET SPEED command 23797 * 23798 * Note: The SET SPEED (0xBB) command used in this routine is 23799 * obsolete per the SCSI MMC spec but still supported in the 23800 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23801 * therefore the command is still implemented in this routine. 23802 */ 23803 bzero(cdb, sizeof (cdb)); 23804 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23805 cdb[2] = (uchar_t)(current_speed >> 8); 23806 cdb[3] = (uchar_t)current_speed; 23807 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23808 com->uscsi_cdb = (caddr_t)cdb; 23809 com->uscsi_cdblen = CDB_GROUP5; 23810 com->uscsi_bufaddr = NULL; 23811 com->uscsi_buflen = 0; 23812 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23813 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23814 break; 23815 default: 23816 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23817 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23818 rval = EINVAL; 23819 } 23820 23821 if (sense) { 23822 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23823 } 23824 if (com) { 23825 kmem_free(com, sizeof (*com)); 23826 } 23827 return (rval); 23828 } 23829 23830 23831 /* 23832 * Function: sr_pause_resume() 23833 * 23834 * Description: This routine is the driver entry point for handling CD-ROM 23835 * pause/resume ioctl requests. This only affects the audio play 23836 * operation. 23837 * 23838 * Arguments: dev - the device 'dev_t' 23839 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23840 * for setting the resume bit of the cdb. 23841 * 23842 * Return Code: the code returned by sd_send_scsi_cmd() 23843 * EINVAL if invalid mode specified 23844 * 23845 */ 23846 23847 static int 23848 sr_pause_resume(dev_t dev, int cmd) 23849 { 23850 struct sd_lun *un; 23851 struct uscsi_cmd *com; 23852 char cdb[CDB_GROUP1]; 23853 int rval; 23854 23855 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23856 return (ENXIO); 23857 } 23858 23859 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23860 bzero(cdb, CDB_GROUP1); 23861 cdb[0] = SCMD_PAUSE_RESUME; 23862 switch (cmd) { 23863 case CDROMRESUME: 23864 cdb[8] = 1; 23865 break; 23866 case CDROMPAUSE: 23867 cdb[8] = 0; 23868 break; 23869 default: 23870 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23871 " Command '%x' Not Supported\n", cmd); 23872 rval = EINVAL; 23873 goto done; 23874 } 23875 23876 com->uscsi_cdb = cdb; 23877 com->uscsi_cdblen = CDB_GROUP1; 23878 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23879 23880 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23881 SD_PATH_STANDARD); 23882 23883 done: 23884 kmem_free(com, sizeof (*com)); 23885 return (rval); 23886 } 23887 23888 23889 /* 23890 * Function: sr_play_msf() 23891 * 23892 * Description: This routine is the driver entry point for handling CD-ROM 23893 * ioctl requests to output the audio signals at the specified 23894 * starting address and continue the audio play until the specified 23895 * ending address (CDROMPLAYMSF) The address is in Minute Second 23896 * Frame (MSF) format. 23897 * 23898 * Arguments: dev - the device 'dev_t' 23899 * data - pointer to user provided audio msf structure, 23900 * specifying start/end addresses. 23901 * flag - this argument is a pass through to ddi_copyxxx() 23902 * directly from the mode argument of ioctl(). 23903 * 23904 * Return Code: the code returned by sd_send_scsi_cmd() 23905 * EFAULT if ddi_copyxxx() fails 23906 * ENXIO if fail ddi_get_soft_state 23907 * EINVAL if data pointer is NULL 23908 */ 23909 23910 static int 23911 sr_play_msf(dev_t dev, caddr_t data, int flag) 23912 { 23913 struct sd_lun *un; 23914 struct uscsi_cmd *com; 23915 struct cdrom_msf msf_struct; 23916 struct cdrom_msf *msf = &msf_struct; 23917 char cdb[CDB_GROUP1]; 23918 int rval; 23919 23920 if (data == NULL) { 23921 return (EINVAL); 23922 } 23923 23924 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23925 return (ENXIO); 23926 } 23927 23928 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 23929 return (EFAULT); 23930 } 23931 23932 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23933 bzero(cdb, CDB_GROUP1); 23934 cdb[0] = SCMD_PLAYAUDIO_MSF; 23935 if (un->un_f_cfg_playmsf_bcd == TRUE) { 23936 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 23937 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 23938 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 23939 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 23940 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 23941 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 23942 } else { 23943 cdb[3] = msf->cdmsf_min0; 23944 cdb[4] = msf->cdmsf_sec0; 23945 cdb[5] = msf->cdmsf_frame0; 23946 cdb[6] = msf->cdmsf_min1; 23947 cdb[7] = msf->cdmsf_sec1; 23948 cdb[8] = msf->cdmsf_frame1; 23949 } 23950 com->uscsi_cdb = cdb; 23951 com->uscsi_cdblen = CDB_GROUP1; 23952 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23953 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23954 SD_PATH_STANDARD); 23955 kmem_free(com, sizeof (*com)); 23956 return (rval); 23957 } 23958 23959 23960 /* 23961 * Function: sr_play_trkind() 23962 * 23963 * Description: This routine is the driver entry point for handling CD-ROM 23964 * ioctl requests to output the audio signals at the specified 23965 * starting address and continue the audio play until the specified 23966 * ending address (CDROMPLAYTRKIND). The address is in Track Index 23967 * format. 23968 * 23969 * Arguments: dev - the device 'dev_t' 23970 * data - pointer to user provided audio track/index structure, 23971 * specifying start/end addresses. 23972 * flag - this argument is a pass through to ddi_copyxxx() 23973 * directly from the mode argument of ioctl(). 23974 * 23975 * Return Code: the code returned by sd_send_scsi_cmd() 23976 * EFAULT if ddi_copyxxx() fails 23977 * ENXIO if fail ddi_get_soft_state 23978 * EINVAL if data pointer is NULL 23979 */ 23980 23981 static int 23982 sr_play_trkind(dev_t dev, caddr_t data, int flag) 23983 { 23984 struct cdrom_ti ti_struct; 23985 struct cdrom_ti *ti = &ti_struct; 23986 struct uscsi_cmd *com = NULL; 23987 char cdb[CDB_GROUP1]; 23988 int rval; 23989 23990 if (data == NULL) { 23991 return (EINVAL); 23992 } 23993 23994 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 23995 return (EFAULT); 23996 } 23997 23998 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23999 bzero(cdb, CDB_GROUP1); 24000 cdb[0] = SCMD_PLAYAUDIO_TI; 24001 cdb[4] = ti->cdti_trk0; 24002 cdb[5] = ti->cdti_ind0; 24003 cdb[7] = ti->cdti_trk1; 24004 cdb[8] = ti->cdti_ind1; 24005 com->uscsi_cdb = cdb; 24006 com->uscsi_cdblen = CDB_GROUP1; 24007 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24008 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24009 SD_PATH_STANDARD); 24010 kmem_free(com, sizeof (*com)); 24011 return (rval); 24012 } 24013 24014 24015 /* 24016 * Function: sr_read_all_subcodes() 24017 * 24018 * Description: This routine is the driver entry point for handling CD-ROM 24019 * ioctl requests to return raw subcode data while the target is 24020 * playing audio (CDROMSUBCODE). 24021 * 24022 * Arguments: dev - the device 'dev_t' 24023 * data - pointer to user provided cdrom subcode structure, 24024 * specifying the transfer length and address. 24025 * flag - this argument is a pass through to ddi_copyxxx() 24026 * directly from the mode argument of ioctl(). 24027 * 24028 * Return Code: the code returned by sd_send_scsi_cmd() 24029 * EFAULT if ddi_copyxxx() fails 24030 * ENXIO if fail ddi_get_soft_state 24031 * EINVAL if data pointer is NULL 24032 */ 24033 24034 static int 24035 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24036 { 24037 struct sd_lun *un = NULL; 24038 struct uscsi_cmd *com = NULL; 24039 struct cdrom_subcode *subcode = NULL; 24040 int rval; 24041 size_t buflen; 24042 char cdb[CDB_GROUP5]; 24043 24044 #ifdef _MULTI_DATAMODEL 24045 /* To support ILP32 applications in an LP64 world */ 24046 struct cdrom_subcode32 cdrom_subcode32; 24047 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24048 #endif 24049 if (data == NULL) { 24050 return (EINVAL); 24051 } 24052 24053 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24054 return (ENXIO); 24055 } 24056 24057 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24058 24059 #ifdef _MULTI_DATAMODEL 24060 switch (ddi_model_convert_from(flag & FMODELS)) { 24061 case DDI_MODEL_ILP32: 24062 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24063 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24064 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24065 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24066 return (EFAULT); 24067 } 24068 /* Convert the ILP32 uscsi data from the application to LP64 */ 24069 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24070 break; 24071 case DDI_MODEL_NONE: 24072 if (ddi_copyin(data, subcode, 24073 sizeof (struct cdrom_subcode), flag)) { 24074 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24075 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24076 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24077 return (EFAULT); 24078 } 24079 break; 24080 } 24081 #else /* ! _MULTI_DATAMODEL */ 24082 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24083 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24084 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24085 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24086 return (EFAULT); 24087 } 24088 #endif /* _MULTI_DATAMODEL */ 24089 24090 /* 24091 * Since MMC-2 expects max 3 bytes for length, check if the 24092 * length input is greater than 3 bytes 24093 */ 24094 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24095 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24096 "sr_read_all_subcodes: " 24097 "cdrom transfer length too large: %d (limit %d)\n", 24098 subcode->cdsc_length, 0xFFFFFF); 24099 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24100 return (EINVAL); 24101 } 24102 24103 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24104 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24105 bzero(cdb, CDB_GROUP5); 24106 24107 if (un->un_f_mmc_cap == TRUE) { 24108 cdb[0] = (char)SCMD_READ_CD; 24109 cdb[2] = (char)0xff; 24110 cdb[3] = (char)0xff; 24111 cdb[4] = (char)0xff; 24112 cdb[5] = (char)0xff; 24113 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24114 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24115 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24116 cdb[10] = 1; 24117 } else { 24118 /* 24119 * Note: A vendor specific command (0xDF) is being used her to 24120 * request a read of all subcodes. 24121 */ 24122 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24123 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24124 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24125 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24126 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24127 } 24128 com->uscsi_cdb = cdb; 24129 com->uscsi_cdblen = CDB_GROUP5; 24130 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24131 com->uscsi_buflen = buflen; 24132 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24133 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24134 SD_PATH_STANDARD); 24135 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24136 kmem_free(com, sizeof (*com)); 24137 return (rval); 24138 } 24139 24140 24141 /* 24142 * Function: sr_read_subchannel() 24143 * 24144 * Description: This routine is the driver entry point for handling CD-ROM 24145 * ioctl requests to return the Q sub-channel data of the CD 24146 * current position block. (CDROMSUBCHNL) The data includes the 24147 * track number, index number, absolute CD-ROM address (LBA or MSF 24148 * format per the user) , track relative CD-ROM address (LBA or MSF 24149 * format per the user), control data and audio status. 24150 * 24151 * Arguments: dev - the device 'dev_t' 24152 * data - pointer to user provided cdrom sub-channel structure 24153 * flag - this argument is a pass through to ddi_copyxxx() 24154 * directly from the mode argument of ioctl(). 24155 * 24156 * Return Code: the code returned by sd_send_scsi_cmd() 24157 * EFAULT if ddi_copyxxx() fails 24158 * ENXIO if fail ddi_get_soft_state 24159 * EINVAL if data pointer is NULL 24160 */ 24161 24162 static int 24163 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24164 { 24165 struct sd_lun *un; 24166 struct uscsi_cmd *com; 24167 struct cdrom_subchnl subchanel; 24168 struct cdrom_subchnl *subchnl = &subchanel; 24169 char cdb[CDB_GROUP1]; 24170 caddr_t buffer; 24171 int rval; 24172 24173 if (data == NULL) { 24174 return (EINVAL); 24175 } 24176 24177 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24178 (un->un_state == SD_STATE_OFFLINE)) { 24179 return (ENXIO); 24180 } 24181 24182 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24183 return (EFAULT); 24184 } 24185 24186 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24187 bzero(cdb, CDB_GROUP1); 24188 cdb[0] = SCMD_READ_SUBCHANNEL; 24189 /* Set the MSF bit based on the user requested address format */ 24190 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24191 /* 24192 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24193 * returned 24194 */ 24195 cdb[2] = 0x40; 24196 /* 24197 * Set byte 3 to specify the return data format. A value of 0x01 24198 * indicates that the CD-ROM current position should be returned. 24199 */ 24200 cdb[3] = 0x01; 24201 cdb[8] = 0x10; 24202 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24203 com->uscsi_cdb = cdb; 24204 com->uscsi_cdblen = CDB_GROUP1; 24205 com->uscsi_bufaddr = buffer; 24206 com->uscsi_buflen = 16; 24207 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24208 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24209 SD_PATH_STANDARD); 24210 if (rval != 0) { 24211 kmem_free(buffer, 16); 24212 kmem_free(com, sizeof (*com)); 24213 return (rval); 24214 } 24215 24216 /* Process the returned Q sub-channel data */ 24217 subchnl->cdsc_audiostatus = buffer[1]; 24218 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24219 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24220 subchnl->cdsc_trk = buffer[6]; 24221 subchnl->cdsc_ind = buffer[7]; 24222 if (subchnl->cdsc_format & CDROM_LBA) { 24223 subchnl->cdsc_absaddr.lba = 24224 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24225 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24226 subchnl->cdsc_reladdr.lba = 24227 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24228 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24229 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24230 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24231 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24232 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24233 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24234 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24235 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24236 } else { 24237 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24238 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24239 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24240 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24241 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24242 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24243 } 24244 kmem_free(buffer, 16); 24245 kmem_free(com, sizeof (*com)); 24246 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24247 != 0) { 24248 return (EFAULT); 24249 } 24250 return (rval); 24251 } 24252 24253 24254 /* 24255 * Function: sr_read_tocentry() 24256 * 24257 * Description: This routine is the driver entry point for handling CD-ROM 24258 * ioctl requests to read from the Table of Contents (TOC) 24259 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24260 * fields, the starting address (LBA or MSF format per the user) 24261 * and the data mode if the user specified track is a data track. 24262 * 24263 * Note: The READ HEADER (0x44) command used in this routine is 24264 * obsolete per the SCSI MMC spec but still supported in the 24265 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24266 * therefore the command is still implemented in this routine. 24267 * 24268 * Arguments: dev - the device 'dev_t' 24269 * data - pointer to user provided toc entry structure, 24270 * specifying the track # and the address format 24271 * (LBA or MSF). 24272 * flag - this argument is a pass through to ddi_copyxxx() 24273 * directly from the mode argument of ioctl(). 24274 * 24275 * Return Code: the code returned by sd_send_scsi_cmd() 24276 * EFAULT if ddi_copyxxx() fails 24277 * ENXIO if fail ddi_get_soft_state 24278 * EINVAL if data pointer is NULL 24279 */ 24280 24281 static int 24282 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24283 { 24284 struct sd_lun *un = NULL; 24285 struct uscsi_cmd *com; 24286 struct cdrom_tocentry toc_entry; 24287 struct cdrom_tocentry *entry = &toc_entry; 24288 caddr_t buffer; 24289 int rval; 24290 char cdb[CDB_GROUP1]; 24291 24292 if (data == NULL) { 24293 return (EINVAL); 24294 } 24295 24296 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24297 (un->un_state == SD_STATE_OFFLINE)) { 24298 return (ENXIO); 24299 } 24300 24301 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24302 return (EFAULT); 24303 } 24304 24305 /* Validate the requested track and address format */ 24306 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24307 return (EINVAL); 24308 } 24309 24310 if (entry->cdte_track == 0) { 24311 return (EINVAL); 24312 } 24313 24314 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24315 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24316 bzero(cdb, CDB_GROUP1); 24317 24318 cdb[0] = SCMD_READ_TOC; 24319 /* Set the MSF bit based on the user requested address format */ 24320 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24321 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24322 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24323 } else { 24324 cdb[6] = entry->cdte_track; 24325 } 24326 24327 /* 24328 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24329 * (4 byte TOC response header + 8 byte track descriptor) 24330 */ 24331 cdb[8] = 12; 24332 com->uscsi_cdb = cdb; 24333 com->uscsi_cdblen = CDB_GROUP1; 24334 com->uscsi_bufaddr = buffer; 24335 com->uscsi_buflen = 0x0C; 24336 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24337 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24338 SD_PATH_STANDARD); 24339 if (rval != 0) { 24340 kmem_free(buffer, 12); 24341 kmem_free(com, sizeof (*com)); 24342 return (rval); 24343 } 24344 24345 /* Process the toc entry */ 24346 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24347 entry->cdte_ctrl = (buffer[5] & 0x0F); 24348 if (entry->cdte_format & CDROM_LBA) { 24349 entry->cdte_addr.lba = 24350 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24351 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24352 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24353 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24354 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24355 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24356 /* 24357 * Send a READ TOC command using the LBA address format to get 24358 * the LBA for the track requested so it can be used in the 24359 * READ HEADER request 24360 * 24361 * Note: The MSF bit of the READ HEADER command specifies the 24362 * output format. The block address specified in that command 24363 * must be in LBA format. 24364 */ 24365 cdb[1] = 0; 24366 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24367 SD_PATH_STANDARD); 24368 if (rval != 0) { 24369 kmem_free(buffer, 12); 24370 kmem_free(com, sizeof (*com)); 24371 return (rval); 24372 } 24373 } else { 24374 entry->cdte_addr.msf.minute = buffer[9]; 24375 entry->cdte_addr.msf.second = buffer[10]; 24376 entry->cdte_addr.msf.frame = buffer[11]; 24377 /* 24378 * Send a READ TOC command using the LBA address format to get 24379 * the LBA for the track requested so it can be used in the 24380 * READ HEADER request 24381 * 24382 * Note: The MSF bit of the READ HEADER command specifies the 24383 * output format. The block address specified in that command 24384 * must be in LBA format. 24385 */ 24386 cdb[1] = 0; 24387 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24388 SD_PATH_STANDARD); 24389 if (rval != 0) { 24390 kmem_free(buffer, 12); 24391 kmem_free(com, sizeof (*com)); 24392 return (rval); 24393 } 24394 } 24395 24396 /* 24397 * Build and send the READ HEADER command to determine the data mode of 24398 * the user specified track. 24399 */ 24400 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24401 (entry->cdte_track != CDROM_LEADOUT)) { 24402 bzero(cdb, CDB_GROUP1); 24403 cdb[0] = SCMD_READ_HEADER; 24404 cdb[2] = buffer[8]; 24405 cdb[3] = buffer[9]; 24406 cdb[4] = buffer[10]; 24407 cdb[5] = buffer[11]; 24408 cdb[8] = 0x08; 24409 com->uscsi_buflen = 0x08; 24410 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24411 SD_PATH_STANDARD); 24412 if (rval == 0) { 24413 entry->cdte_datamode = buffer[0]; 24414 } else { 24415 /* 24416 * READ HEADER command failed, since this is 24417 * obsoleted in one spec, its better to return 24418 * -1 for an invlid track so that we can still 24419 * recieve the rest of the TOC data. 24420 */ 24421 entry->cdte_datamode = (uchar_t)-1; 24422 } 24423 } else { 24424 entry->cdte_datamode = (uchar_t)-1; 24425 } 24426 24427 kmem_free(buffer, 12); 24428 kmem_free(com, sizeof (*com)); 24429 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24430 return (EFAULT); 24431 24432 return (rval); 24433 } 24434 24435 24436 /* 24437 * Function: sr_read_tochdr() 24438 * 24439 * Description: This routine is the driver entry point for handling CD-ROM 24440 * ioctl requests to read the Table of Contents (TOC) header 24441 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24442 * and ending track numbers 24443 * 24444 * Arguments: dev - the device 'dev_t' 24445 * data - pointer to user provided toc header structure, 24446 * specifying the starting and ending track numbers. 24447 * flag - this argument is a pass through to ddi_copyxxx() 24448 * directly from the mode argument of ioctl(). 24449 * 24450 * Return Code: the code returned by sd_send_scsi_cmd() 24451 * EFAULT if ddi_copyxxx() fails 24452 * ENXIO if fail ddi_get_soft_state 24453 * EINVAL if data pointer is NULL 24454 */ 24455 24456 static int 24457 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24458 { 24459 struct sd_lun *un; 24460 struct uscsi_cmd *com; 24461 struct cdrom_tochdr toc_header; 24462 struct cdrom_tochdr *hdr = &toc_header; 24463 char cdb[CDB_GROUP1]; 24464 int rval; 24465 caddr_t buffer; 24466 24467 if (data == NULL) { 24468 return (EINVAL); 24469 } 24470 24471 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24472 (un->un_state == SD_STATE_OFFLINE)) { 24473 return (ENXIO); 24474 } 24475 24476 buffer = kmem_zalloc(4, KM_SLEEP); 24477 bzero(cdb, CDB_GROUP1); 24478 cdb[0] = SCMD_READ_TOC; 24479 /* 24480 * Specifying a track number of 0x00 in the READ TOC command indicates 24481 * that the TOC header should be returned 24482 */ 24483 cdb[6] = 0x00; 24484 /* 24485 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24486 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24487 */ 24488 cdb[8] = 0x04; 24489 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24490 com->uscsi_cdb = cdb; 24491 com->uscsi_cdblen = CDB_GROUP1; 24492 com->uscsi_bufaddr = buffer; 24493 com->uscsi_buflen = 0x04; 24494 com->uscsi_timeout = 300; 24495 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24496 24497 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24498 SD_PATH_STANDARD); 24499 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24500 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24501 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24502 } else { 24503 hdr->cdth_trk0 = buffer[2]; 24504 hdr->cdth_trk1 = buffer[3]; 24505 } 24506 kmem_free(buffer, 4); 24507 kmem_free(com, sizeof (*com)); 24508 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24509 return (EFAULT); 24510 } 24511 return (rval); 24512 } 24513 24514 24515 /* 24516 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24517 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24518 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24519 * digital audio and extended architecture digital audio. These modes are 24520 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24521 * MMC specs. 24522 * 24523 * In addition to support for the various data formats these routines also 24524 * include support for devices that implement only the direct access READ 24525 * commands (0x08, 0x28), devices that implement the READ_CD commands 24526 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24527 * READ CDXA commands (0xD8, 0xDB) 24528 */ 24529 24530 /* 24531 * Function: sr_read_mode1() 24532 * 24533 * Description: This routine is the driver entry point for handling CD-ROM 24534 * ioctl read mode1 requests (CDROMREADMODE1). 24535 * 24536 * Arguments: dev - the device 'dev_t' 24537 * data - pointer to user provided cd read structure specifying 24538 * the lba buffer address and length. 24539 * flag - this argument is a pass through to ddi_copyxxx() 24540 * directly from the mode argument of ioctl(). 24541 * 24542 * Return Code: the code returned by sd_send_scsi_cmd() 24543 * EFAULT if ddi_copyxxx() fails 24544 * ENXIO if fail ddi_get_soft_state 24545 * EINVAL if data pointer is NULL 24546 */ 24547 24548 static int 24549 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24550 { 24551 struct sd_lun *un; 24552 struct cdrom_read mode1_struct; 24553 struct cdrom_read *mode1 = &mode1_struct; 24554 int rval; 24555 #ifdef _MULTI_DATAMODEL 24556 /* To support ILP32 applications in an LP64 world */ 24557 struct cdrom_read32 cdrom_read32; 24558 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24559 #endif /* _MULTI_DATAMODEL */ 24560 24561 if (data == NULL) { 24562 return (EINVAL); 24563 } 24564 24565 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24566 (un->un_state == SD_STATE_OFFLINE)) { 24567 return (ENXIO); 24568 } 24569 24570 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24571 "sd_read_mode1: entry: un:0x%p\n", un); 24572 24573 #ifdef _MULTI_DATAMODEL 24574 switch (ddi_model_convert_from(flag & FMODELS)) { 24575 case DDI_MODEL_ILP32: 24576 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24577 return (EFAULT); 24578 } 24579 /* Convert the ILP32 uscsi data from the application to LP64 */ 24580 cdrom_read32tocdrom_read(cdrd32, mode1); 24581 break; 24582 case DDI_MODEL_NONE: 24583 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24584 return (EFAULT); 24585 } 24586 } 24587 #else /* ! _MULTI_DATAMODEL */ 24588 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24589 return (EFAULT); 24590 } 24591 #endif /* _MULTI_DATAMODEL */ 24592 24593 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24594 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24595 24596 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24597 "sd_read_mode1: exit: un:0x%p\n", un); 24598 24599 return (rval); 24600 } 24601 24602 24603 /* 24604 * Function: sr_read_cd_mode2() 24605 * 24606 * Description: This routine is the driver entry point for handling CD-ROM 24607 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24608 * support the READ CD (0xBE) command or the 1st generation 24609 * READ CD (0xD4) command. 24610 * 24611 * Arguments: dev - the device 'dev_t' 24612 * data - pointer to user provided cd read structure specifying 24613 * the lba buffer address and length. 24614 * flag - this argument is a pass through to ddi_copyxxx() 24615 * directly from the mode argument of ioctl(). 24616 * 24617 * Return Code: the code returned by sd_send_scsi_cmd() 24618 * EFAULT if ddi_copyxxx() fails 24619 * ENXIO if fail ddi_get_soft_state 24620 * EINVAL if data pointer is NULL 24621 */ 24622 24623 static int 24624 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24625 { 24626 struct sd_lun *un; 24627 struct uscsi_cmd *com; 24628 struct cdrom_read mode2_struct; 24629 struct cdrom_read *mode2 = &mode2_struct; 24630 uchar_t cdb[CDB_GROUP5]; 24631 int nblocks; 24632 int rval; 24633 #ifdef _MULTI_DATAMODEL 24634 /* To support ILP32 applications in an LP64 world */ 24635 struct cdrom_read32 cdrom_read32; 24636 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24637 #endif /* _MULTI_DATAMODEL */ 24638 24639 if (data == NULL) { 24640 return (EINVAL); 24641 } 24642 24643 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24644 (un->un_state == SD_STATE_OFFLINE)) { 24645 return (ENXIO); 24646 } 24647 24648 #ifdef _MULTI_DATAMODEL 24649 switch (ddi_model_convert_from(flag & FMODELS)) { 24650 case DDI_MODEL_ILP32: 24651 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24652 return (EFAULT); 24653 } 24654 /* Convert the ILP32 uscsi data from the application to LP64 */ 24655 cdrom_read32tocdrom_read(cdrd32, mode2); 24656 break; 24657 case DDI_MODEL_NONE: 24658 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24659 return (EFAULT); 24660 } 24661 break; 24662 } 24663 24664 #else /* ! _MULTI_DATAMODEL */ 24665 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24666 return (EFAULT); 24667 } 24668 #endif /* _MULTI_DATAMODEL */ 24669 24670 bzero(cdb, sizeof (cdb)); 24671 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24672 /* Read command supported by 1st generation atapi drives */ 24673 cdb[0] = SCMD_READ_CDD4; 24674 } else { 24675 /* Universal CD Access Command */ 24676 cdb[0] = SCMD_READ_CD; 24677 } 24678 24679 /* 24680 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24681 */ 24682 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24683 24684 /* set the start address */ 24685 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24686 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24687 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24688 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24689 24690 /* set the transfer length */ 24691 nblocks = mode2->cdread_buflen / 2336; 24692 cdb[6] = (uchar_t)(nblocks >> 16); 24693 cdb[7] = (uchar_t)(nblocks >> 8); 24694 cdb[8] = (uchar_t)nblocks; 24695 24696 /* set the filter bits */ 24697 cdb[9] = CDROM_READ_CD_USERDATA; 24698 24699 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24700 com->uscsi_cdb = (caddr_t)cdb; 24701 com->uscsi_cdblen = sizeof (cdb); 24702 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24703 com->uscsi_buflen = mode2->cdread_buflen; 24704 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24705 24706 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24707 SD_PATH_STANDARD); 24708 kmem_free(com, sizeof (*com)); 24709 return (rval); 24710 } 24711 24712 24713 /* 24714 * Function: sr_read_mode2() 24715 * 24716 * Description: This routine is the driver entry point for handling CD-ROM 24717 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24718 * do not support the READ CD (0xBE) command. 24719 * 24720 * Arguments: dev - the device 'dev_t' 24721 * data - pointer to user provided cd read structure specifying 24722 * the lba buffer address and length. 24723 * flag - this argument is a pass through to ddi_copyxxx() 24724 * directly from the mode argument of ioctl(). 24725 * 24726 * Return Code: the code returned by sd_send_scsi_cmd() 24727 * EFAULT if ddi_copyxxx() fails 24728 * ENXIO if fail ddi_get_soft_state 24729 * EINVAL if data pointer is NULL 24730 * EIO if fail to reset block size 24731 * EAGAIN if commands are in progress in the driver 24732 */ 24733 24734 static int 24735 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24736 { 24737 struct sd_lun *un; 24738 struct cdrom_read mode2_struct; 24739 struct cdrom_read *mode2 = &mode2_struct; 24740 int rval; 24741 uint32_t restore_blksize; 24742 struct uscsi_cmd *com; 24743 uchar_t cdb[CDB_GROUP0]; 24744 int nblocks; 24745 24746 #ifdef _MULTI_DATAMODEL 24747 /* To support ILP32 applications in an LP64 world */ 24748 struct cdrom_read32 cdrom_read32; 24749 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24750 #endif /* _MULTI_DATAMODEL */ 24751 24752 if (data == NULL) { 24753 return (EINVAL); 24754 } 24755 24756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24757 (un->un_state == SD_STATE_OFFLINE)) { 24758 return (ENXIO); 24759 } 24760 24761 /* 24762 * Because this routine will update the device and driver block size 24763 * being used we want to make sure there are no commands in progress. 24764 * If commands are in progress the user will have to try again. 24765 * 24766 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24767 * in sdioctl to protect commands from sdioctl through to the top of 24768 * sd_uscsi_strategy. See sdioctl for details. 24769 */ 24770 mutex_enter(SD_MUTEX(un)); 24771 if (un->un_ncmds_in_driver != 1) { 24772 mutex_exit(SD_MUTEX(un)); 24773 return (EAGAIN); 24774 } 24775 mutex_exit(SD_MUTEX(un)); 24776 24777 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24778 "sd_read_mode2: entry: un:0x%p\n", un); 24779 24780 #ifdef _MULTI_DATAMODEL 24781 switch (ddi_model_convert_from(flag & FMODELS)) { 24782 case DDI_MODEL_ILP32: 24783 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24784 return (EFAULT); 24785 } 24786 /* Convert the ILP32 uscsi data from the application to LP64 */ 24787 cdrom_read32tocdrom_read(cdrd32, mode2); 24788 break; 24789 case DDI_MODEL_NONE: 24790 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24791 return (EFAULT); 24792 } 24793 break; 24794 } 24795 #else /* ! _MULTI_DATAMODEL */ 24796 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24797 return (EFAULT); 24798 } 24799 #endif /* _MULTI_DATAMODEL */ 24800 24801 /* Store the current target block size for restoration later */ 24802 restore_blksize = un->un_tgt_blocksize; 24803 24804 /* Change the device and soft state target block size to 2336 */ 24805 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24806 rval = EIO; 24807 goto done; 24808 } 24809 24810 24811 bzero(cdb, sizeof (cdb)); 24812 24813 /* set READ operation */ 24814 cdb[0] = SCMD_READ; 24815 24816 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24817 mode2->cdread_lba >>= 2; 24818 24819 /* set the start address */ 24820 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24821 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24822 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24823 24824 /* set the transfer length */ 24825 nblocks = mode2->cdread_buflen / 2336; 24826 cdb[4] = (uchar_t)nblocks & 0xFF; 24827 24828 /* build command */ 24829 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24830 com->uscsi_cdb = (caddr_t)cdb; 24831 com->uscsi_cdblen = sizeof (cdb); 24832 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24833 com->uscsi_buflen = mode2->cdread_buflen; 24834 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24835 24836 /* 24837 * Issue SCSI command with user space address for read buffer. 24838 * 24839 * This sends the command through main channel in the driver. 24840 * 24841 * Since this is accessed via an IOCTL call, we go through the 24842 * standard path, so that if the device was powered down, then 24843 * it would be 'awakened' to handle the command. 24844 */ 24845 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24846 SD_PATH_STANDARD); 24847 24848 kmem_free(com, sizeof (*com)); 24849 24850 /* Restore the device and soft state target block size */ 24851 if (sr_sector_mode(dev, restore_blksize) != 0) { 24852 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24853 "can't do switch back to mode 1\n"); 24854 /* 24855 * If sd_send_scsi_READ succeeded we still need to report 24856 * an error because we failed to reset the block size 24857 */ 24858 if (rval == 0) { 24859 rval = EIO; 24860 } 24861 } 24862 24863 done: 24864 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24865 "sd_read_mode2: exit: un:0x%p\n", un); 24866 24867 return (rval); 24868 } 24869 24870 24871 /* 24872 * Function: sr_sector_mode() 24873 * 24874 * Description: This utility function is used by sr_read_mode2 to set the target 24875 * block size based on the user specified size. This is a legacy 24876 * implementation based upon a vendor specific mode page 24877 * 24878 * Arguments: dev - the device 'dev_t' 24879 * data - flag indicating if block size is being set to 2336 or 24880 * 512. 24881 * 24882 * Return Code: the code returned by sd_send_scsi_cmd() 24883 * EFAULT if ddi_copyxxx() fails 24884 * ENXIO if fail ddi_get_soft_state 24885 * EINVAL if data pointer is NULL 24886 */ 24887 24888 static int 24889 sr_sector_mode(dev_t dev, uint32_t blksize) 24890 { 24891 struct sd_lun *un; 24892 uchar_t *sense; 24893 uchar_t *select; 24894 int rval; 24895 24896 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24897 (un->un_state == SD_STATE_OFFLINE)) { 24898 return (ENXIO); 24899 } 24900 24901 sense = kmem_zalloc(20, KM_SLEEP); 24902 24903 /* Note: This is a vendor specific mode page (0x81) */ 24904 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 24905 SD_PATH_STANDARD)) != 0) { 24906 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24907 "sr_sector_mode: Mode Sense failed\n"); 24908 kmem_free(sense, 20); 24909 return (rval); 24910 } 24911 select = kmem_zalloc(20, KM_SLEEP); 24912 select[3] = 0x08; 24913 select[10] = ((blksize >> 8) & 0xff); 24914 select[11] = (blksize & 0xff); 24915 select[12] = 0x01; 24916 select[13] = 0x06; 24917 select[14] = sense[14]; 24918 select[15] = sense[15]; 24919 if (blksize == SD_MODE2_BLKSIZE) { 24920 select[14] |= 0x01; 24921 } 24922 24923 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 24924 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24925 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24926 "sr_sector_mode: Mode Select failed\n"); 24927 } else { 24928 /* 24929 * Only update the softstate block size if we successfully 24930 * changed the device block mode. 24931 */ 24932 mutex_enter(SD_MUTEX(un)); 24933 sd_update_block_info(un, blksize, 0); 24934 mutex_exit(SD_MUTEX(un)); 24935 } 24936 kmem_free(sense, 20); 24937 kmem_free(select, 20); 24938 return (rval); 24939 } 24940 24941 24942 /* 24943 * Function: sr_read_cdda() 24944 * 24945 * Description: This routine is the driver entry point for handling CD-ROM 24946 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 24947 * the target supports CDDA these requests are handled via a vendor 24948 * specific command (0xD8) If the target does not support CDDA 24949 * these requests are handled via the READ CD command (0xBE). 24950 * 24951 * Arguments: dev - the device 'dev_t' 24952 * data - pointer to user provided CD-DA structure specifying 24953 * the track starting address, transfer length, and 24954 * subcode options. 24955 * flag - this argument is a pass through to ddi_copyxxx() 24956 * directly from the mode argument of ioctl(). 24957 * 24958 * Return Code: the code returned by sd_send_scsi_cmd() 24959 * EFAULT if ddi_copyxxx() fails 24960 * ENXIO if fail ddi_get_soft_state 24961 * EINVAL if invalid arguments are provided 24962 * ENOTTY 24963 */ 24964 24965 static int 24966 sr_read_cdda(dev_t dev, caddr_t data, int flag) 24967 { 24968 struct sd_lun *un; 24969 struct uscsi_cmd *com; 24970 struct cdrom_cdda *cdda; 24971 int rval; 24972 size_t buflen; 24973 char cdb[CDB_GROUP5]; 24974 24975 #ifdef _MULTI_DATAMODEL 24976 /* To support ILP32 applications in an LP64 world */ 24977 struct cdrom_cdda32 cdrom_cdda32; 24978 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 24979 #endif /* _MULTI_DATAMODEL */ 24980 24981 if (data == NULL) { 24982 return (EINVAL); 24983 } 24984 24985 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24986 return (ENXIO); 24987 } 24988 24989 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 24990 24991 #ifdef _MULTI_DATAMODEL 24992 switch (ddi_model_convert_from(flag & FMODELS)) { 24993 case DDI_MODEL_ILP32: 24994 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 24995 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24996 "sr_read_cdda: ddi_copyin Failed\n"); 24997 kmem_free(cdda, sizeof (struct cdrom_cdda)); 24998 return (EFAULT); 24999 } 25000 /* Convert the ILP32 uscsi data from the application to LP64 */ 25001 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25002 break; 25003 case DDI_MODEL_NONE: 25004 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25005 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25006 "sr_read_cdda: ddi_copyin Failed\n"); 25007 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25008 return (EFAULT); 25009 } 25010 break; 25011 } 25012 #else /* ! _MULTI_DATAMODEL */ 25013 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25014 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25015 "sr_read_cdda: ddi_copyin Failed\n"); 25016 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25017 return (EFAULT); 25018 } 25019 #endif /* _MULTI_DATAMODEL */ 25020 25021 /* 25022 * Since MMC-2 expects max 3 bytes for length, check if the 25023 * length input is greater than 3 bytes 25024 */ 25025 if ((cdda->cdda_length & 0xFF000000) != 0) { 25026 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25027 "cdrom transfer length too large: %d (limit %d)\n", 25028 cdda->cdda_length, 0xFFFFFF); 25029 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25030 return (EINVAL); 25031 } 25032 25033 switch (cdda->cdda_subcode) { 25034 case CDROM_DA_NO_SUBCODE: 25035 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25036 break; 25037 case CDROM_DA_SUBQ: 25038 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25039 break; 25040 case CDROM_DA_ALL_SUBCODE: 25041 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25042 break; 25043 case CDROM_DA_SUBCODE_ONLY: 25044 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25045 break; 25046 default: 25047 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25048 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25049 cdda->cdda_subcode); 25050 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25051 return (EINVAL); 25052 } 25053 25054 /* Build and send the command */ 25055 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25056 bzero(cdb, CDB_GROUP5); 25057 25058 if (un->un_f_cfg_cdda == TRUE) { 25059 cdb[0] = (char)SCMD_READ_CD; 25060 cdb[1] = 0x04; 25061 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25062 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25063 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25064 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25065 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25066 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25067 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25068 cdb[9] = 0x10; 25069 switch (cdda->cdda_subcode) { 25070 case CDROM_DA_NO_SUBCODE : 25071 cdb[10] = 0x0; 25072 break; 25073 case CDROM_DA_SUBQ : 25074 cdb[10] = 0x2; 25075 break; 25076 case CDROM_DA_ALL_SUBCODE : 25077 cdb[10] = 0x1; 25078 break; 25079 case CDROM_DA_SUBCODE_ONLY : 25080 /* FALLTHROUGH */ 25081 default : 25082 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25083 kmem_free(com, sizeof (*com)); 25084 return (ENOTTY); 25085 } 25086 } else { 25087 cdb[0] = (char)SCMD_READ_CDDA; 25088 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25089 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25090 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25091 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25092 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25093 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25094 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25095 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25096 cdb[10] = cdda->cdda_subcode; 25097 } 25098 25099 com->uscsi_cdb = cdb; 25100 com->uscsi_cdblen = CDB_GROUP5; 25101 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25102 com->uscsi_buflen = buflen; 25103 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25104 25105 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25106 SD_PATH_STANDARD); 25107 25108 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25109 kmem_free(com, sizeof (*com)); 25110 return (rval); 25111 } 25112 25113 25114 /* 25115 * Function: sr_read_cdxa() 25116 * 25117 * Description: This routine is the driver entry point for handling CD-ROM 25118 * ioctl requests to return CD-XA (Extended Architecture) data. 25119 * (CDROMCDXA). 25120 * 25121 * Arguments: dev - the device 'dev_t' 25122 * data - pointer to user provided CD-XA structure specifying 25123 * the data starting address, transfer length, and format 25124 * flag - this argument is a pass through to ddi_copyxxx() 25125 * directly from the mode argument of ioctl(). 25126 * 25127 * Return Code: the code returned by sd_send_scsi_cmd() 25128 * EFAULT if ddi_copyxxx() fails 25129 * ENXIO if fail ddi_get_soft_state 25130 * EINVAL if data pointer is NULL 25131 */ 25132 25133 static int 25134 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25135 { 25136 struct sd_lun *un; 25137 struct uscsi_cmd *com; 25138 struct cdrom_cdxa *cdxa; 25139 int rval; 25140 size_t buflen; 25141 char cdb[CDB_GROUP5]; 25142 uchar_t read_flags; 25143 25144 #ifdef _MULTI_DATAMODEL 25145 /* To support ILP32 applications in an LP64 world */ 25146 struct cdrom_cdxa32 cdrom_cdxa32; 25147 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25148 #endif /* _MULTI_DATAMODEL */ 25149 25150 if (data == NULL) { 25151 return (EINVAL); 25152 } 25153 25154 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25155 return (ENXIO); 25156 } 25157 25158 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25159 25160 #ifdef _MULTI_DATAMODEL 25161 switch (ddi_model_convert_from(flag & FMODELS)) { 25162 case DDI_MODEL_ILP32: 25163 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25164 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25165 return (EFAULT); 25166 } 25167 /* 25168 * Convert the ILP32 uscsi data from the 25169 * application to LP64 for internal use. 25170 */ 25171 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25172 break; 25173 case DDI_MODEL_NONE: 25174 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25175 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25176 return (EFAULT); 25177 } 25178 break; 25179 } 25180 #else /* ! _MULTI_DATAMODEL */ 25181 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25182 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25183 return (EFAULT); 25184 } 25185 #endif /* _MULTI_DATAMODEL */ 25186 25187 /* 25188 * Since MMC-2 expects max 3 bytes for length, check if the 25189 * length input is greater than 3 bytes 25190 */ 25191 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25192 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25193 "cdrom transfer length too large: %d (limit %d)\n", 25194 cdxa->cdxa_length, 0xFFFFFF); 25195 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25196 return (EINVAL); 25197 } 25198 25199 switch (cdxa->cdxa_format) { 25200 case CDROM_XA_DATA: 25201 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25202 read_flags = 0x10; 25203 break; 25204 case CDROM_XA_SECTOR_DATA: 25205 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25206 read_flags = 0xf8; 25207 break; 25208 case CDROM_XA_DATA_W_ERROR: 25209 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25210 read_flags = 0xfc; 25211 break; 25212 default: 25213 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25214 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25215 cdxa->cdxa_format); 25216 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25217 return (EINVAL); 25218 } 25219 25220 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25221 bzero(cdb, CDB_GROUP5); 25222 if (un->un_f_mmc_cap == TRUE) { 25223 cdb[0] = (char)SCMD_READ_CD; 25224 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25225 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25226 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25227 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25228 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25229 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25230 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25231 cdb[9] = (char)read_flags; 25232 } else { 25233 /* 25234 * Note: A vendor specific command (0xDB) is being used her to 25235 * request a read of all subcodes. 25236 */ 25237 cdb[0] = (char)SCMD_READ_CDXA; 25238 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25239 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25240 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25241 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25242 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25243 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25244 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25245 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25246 cdb[10] = cdxa->cdxa_format; 25247 } 25248 com->uscsi_cdb = cdb; 25249 com->uscsi_cdblen = CDB_GROUP5; 25250 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25251 com->uscsi_buflen = buflen; 25252 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25253 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25254 SD_PATH_STANDARD); 25255 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25256 kmem_free(com, sizeof (*com)); 25257 return (rval); 25258 } 25259 25260 25261 /* 25262 * Function: sr_eject() 25263 * 25264 * Description: This routine is the driver entry point for handling CD-ROM 25265 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25266 * 25267 * Arguments: dev - the device 'dev_t' 25268 * 25269 * Return Code: the code returned by sd_send_scsi_cmd() 25270 */ 25271 25272 static int 25273 sr_eject(dev_t dev) 25274 { 25275 struct sd_lun *un; 25276 int rval; 25277 25278 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25279 (un->un_state == SD_STATE_OFFLINE)) { 25280 return (ENXIO); 25281 } 25282 25283 /* 25284 * To prevent race conditions with the eject 25285 * command, keep track of an eject command as 25286 * it progresses. If we are already handling 25287 * an eject command in the driver for the given 25288 * unit and another request to eject is received 25289 * immediately return EAGAIN so we don't lose 25290 * the command if the current eject command fails. 25291 */ 25292 mutex_enter(SD_MUTEX(un)); 25293 if (un->un_f_ejecting == TRUE) { 25294 mutex_exit(SD_MUTEX(un)); 25295 return (EAGAIN); 25296 } 25297 un->un_f_ejecting = TRUE; 25298 mutex_exit(SD_MUTEX(un)); 25299 25300 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25301 SD_PATH_STANDARD)) != 0) { 25302 mutex_enter(SD_MUTEX(un)); 25303 un->un_f_ejecting = FALSE; 25304 mutex_exit(SD_MUTEX(un)); 25305 return (rval); 25306 } 25307 25308 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25309 SD_PATH_STANDARD); 25310 25311 if (rval == 0) { 25312 mutex_enter(SD_MUTEX(un)); 25313 sr_ejected(un); 25314 un->un_mediastate = DKIO_EJECTED; 25315 un->un_f_ejecting = FALSE; 25316 cv_broadcast(&un->un_state_cv); 25317 mutex_exit(SD_MUTEX(un)); 25318 } else { 25319 mutex_enter(SD_MUTEX(un)); 25320 un->un_f_ejecting = FALSE; 25321 mutex_exit(SD_MUTEX(un)); 25322 } 25323 return (rval); 25324 } 25325 25326 25327 /* 25328 * Function: sr_ejected() 25329 * 25330 * Description: This routine updates the soft state structure to invalidate the 25331 * geometry information after the media has been ejected or a 25332 * media eject has been detected. 25333 * 25334 * Arguments: un - driver soft state (unit) structure 25335 */ 25336 25337 static void 25338 sr_ejected(struct sd_lun *un) 25339 { 25340 struct sd_errstats *stp; 25341 25342 ASSERT(un != NULL); 25343 ASSERT(mutex_owned(SD_MUTEX(un))); 25344 25345 un->un_f_blockcount_is_valid = FALSE; 25346 un->un_f_tgt_blocksize_is_valid = FALSE; 25347 mutex_exit(SD_MUTEX(un)); 25348 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25349 mutex_enter(SD_MUTEX(un)); 25350 25351 if (un->un_errstats != NULL) { 25352 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25353 stp->sd_capacity.value.ui64 = 0; 25354 } 25355 } 25356 25357 25358 /* 25359 * Function: sr_check_wp() 25360 * 25361 * Description: This routine checks the write protection of a removable 25362 * media disk and hotpluggable devices via the write protect bit of 25363 * the Mode Page Header device specific field. Some devices choke 25364 * on unsupported mode page. In order to workaround this issue, 25365 * this routine has been implemented to use 0x3f mode page(request 25366 * for all pages) for all device types. 25367 * 25368 * Arguments: dev - the device 'dev_t' 25369 * 25370 * Return Code: int indicating if the device is write protected (1) or not (0) 25371 * 25372 * Context: Kernel thread. 25373 * 25374 */ 25375 25376 static int 25377 sr_check_wp(dev_t dev) 25378 { 25379 struct sd_lun *un; 25380 uchar_t device_specific; 25381 uchar_t *sense; 25382 int hdrlen; 25383 int rval = FALSE; 25384 25385 /* 25386 * Note: The return codes for this routine should be reworked to 25387 * properly handle the case of a NULL softstate. 25388 */ 25389 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25390 return (FALSE); 25391 } 25392 25393 if (un->un_f_cfg_is_atapi == TRUE) { 25394 /* 25395 * The mode page contents are not required; set the allocation 25396 * length for the mode page header only 25397 */ 25398 hdrlen = MODE_HEADER_LENGTH_GRP2; 25399 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25400 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25401 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25402 goto err_exit; 25403 device_specific = 25404 ((struct mode_header_grp2 *)sense)->device_specific; 25405 } else { 25406 hdrlen = MODE_HEADER_LENGTH; 25407 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25408 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25409 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25410 goto err_exit; 25411 device_specific = 25412 ((struct mode_header *)sense)->device_specific; 25413 } 25414 25415 /* 25416 * Write protect mode sense failed; not all disks 25417 * understand this query. Return FALSE assuming that 25418 * these devices are not writable. 25419 */ 25420 if (device_specific & WRITE_PROTECT) { 25421 rval = TRUE; 25422 } 25423 25424 err_exit: 25425 kmem_free(sense, hdrlen); 25426 return (rval); 25427 } 25428 25429 /* 25430 * Function: sr_volume_ctrl() 25431 * 25432 * Description: This routine is the driver entry point for handling CD-ROM 25433 * audio output volume ioctl requests. (CDROMVOLCTRL) 25434 * 25435 * Arguments: dev - the device 'dev_t' 25436 * data - pointer to user audio volume control structure 25437 * flag - this argument is a pass through to ddi_copyxxx() 25438 * directly from the mode argument of ioctl(). 25439 * 25440 * Return Code: the code returned by sd_send_scsi_cmd() 25441 * EFAULT if ddi_copyxxx() fails 25442 * ENXIO if fail ddi_get_soft_state 25443 * EINVAL if data pointer is NULL 25444 * 25445 */ 25446 25447 static int 25448 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25449 { 25450 struct sd_lun *un; 25451 struct cdrom_volctrl volume; 25452 struct cdrom_volctrl *vol = &volume; 25453 uchar_t *sense_page; 25454 uchar_t *select_page; 25455 uchar_t *sense; 25456 uchar_t *select; 25457 int sense_buflen; 25458 int select_buflen; 25459 int rval; 25460 25461 if (data == NULL) { 25462 return (EINVAL); 25463 } 25464 25465 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25466 (un->un_state == SD_STATE_OFFLINE)) { 25467 return (ENXIO); 25468 } 25469 25470 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25471 return (EFAULT); 25472 } 25473 25474 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25475 struct mode_header_grp2 *sense_mhp; 25476 struct mode_header_grp2 *select_mhp; 25477 int bd_len; 25478 25479 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25480 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25481 MODEPAGE_AUDIO_CTRL_LEN; 25482 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25483 select = kmem_zalloc(select_buflen, KM_SLEEP); 25484 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25485 sense_buflen, MODEPAGE_AUDIO_CTRL, 25486 SD_PATH_STANDARD)) != 0) { 25487 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25488 "sr_volume_ctrl: Mode Sense Failed\n"); 25489 kmem_free(sense, sense_buflen); 25490 kmem_free(select, select_buflen); 25491 return (rval); 25492 } 25493 sense_mhp = (struct mode_header_grp2 *)sense; 25494 select_mhp = (struct mode_header_grp2 *)select; 25495 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25496 sense_mhp->bdesc_length_lo; 25497 if (bd_len > MODE_BLK_DESC_LENGTH) { 25498 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25499 "sr_volume_ctrl: Mode Sense returned invalid " 25500 "block descriptor length\n"); 25501 kmem_free(sense, sense_buflen); 25502 kmem_free(select, select_buflen); 25503 return (EIO); 25504 } 25505 sense_page = (uchar_t *) 25506 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25507 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25508 select_mhp->length_msb = 0; 25509 select_mhp->length_lsb = 0; 25510 select_mhp->bdesc_length_hi = 0; 25511 select_mhp->bdesc_length_lo = 0; 25512 } else { 25513 struct mode_header *sense_mhp, *select_mhp; 25514 25515 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25516 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25517 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25518 select = kmem_zalloc(select_buflen, KM_SLEEP); 25519 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25520 sense_buflen, MODEPAGE_AUDIO_CTRL, 25521 SD_PATH_STANDARD)) != 0) { 25522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25523 "sr_volume_ctrl: Mode Sense Failed\n"); 25524 kmem_free(sense, sense_buflen); 25525 kmem_free(select, select_buflen); 25526 return (rval); 25527 } 25528 sense_mhp = (struct mode_header *)sense; 25529 select_mhp = (struct mode_header *)select; 25530 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25531 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25532 "sr_volume_ctrl: Mode Sense returned invalid " 25533 "block descriptor length\n"); 25534 kmem_free(sense, sense_buflen); 25535 kmem_free(select, select_buflen); 25536 return (EIO); 25537 } 25538 sense_page = (uchar_t *) 25539 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25540 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25541 select_mhp->length = 0; 25542 select_mhp->bdesc_length = 0; 25543 } 25544 /* 25545 * Note: An audio control data structure could be created and overlayed 25546 * on the following in place of the array indexing method implemented. 25547 */ 25548 25549 /* Build the select data for the user volume data */ 25550 select_page[0] = MODEPAGE_AUDIO_CTRL; 25551 select_page[1] = 0xE; 25552 /* Set the immediate bit */ 25553 select_page[2] = 0x04; 25554 /* Zero out reserved fields */ 25555 select_page[3] = 0x00; 25556 select_page[4] = 0x00; 25557 /* Return sense data for fields not to be modified */ 25558 select_page[5] = sense_page[5]; 25559 select_page[6] = sense_page[6]; 25560 select_page[7] = sense_page[7]; 25561 /* Set the user specified volume levels for channel 0 and 1 */ 25562 select_page[8] = 0x01; 25563 select_page[9] = vol->channel0; 25564 select_page[10] = 0x02; 25565 select_page[11] = vol->channel1; 25566 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25567 select_page[12] = sense_page[12]; 25568 select_page[13] = sense_page[13]; 25569 select_page[14] = sense_page[14]; 25570 select_page[15] = sense_page[15]; 25571 25572 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25573 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25574 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25575 } else { 25576 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25577 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25578 } 25579 25580 kmem_free(sense, sense_buflen); 25581 kmem_free(select, select_buflen); 25582 return (rval); 25583 } 25584 25585 25586 /* 25587 * Function: sr_read_sony_session_offset() 25588 * 25589 * Description: This routine is the driver entry point for handling CD-ROM 25590 * ioctl requests for session offset information. (CDROMREADOFFSET) 25591 * The address of the first track in the last session of a 25592 * multi-session CD-ROM is returned 25593 * 25594 * Note: This routine uses a vendor specific key value in the 25595 * command control field without implementing any vendor check here 25596 * or in the ioctl routine. 25597 * 25598 * Arguments: dev - the device 'dev_t' 25599 * data - pointer to an int to hold the requested address 25600 * flag - this argument is a pass through to ddi_copyxxx() 25601 * directly from the mode argument of ioctl(). 25602 * 25603 * Return Code: the code returned by sd_send_scsi_cmd() 25604 * EFAULT if ddi_copyxxx() fails 25605 * ENXIO if fail ddi_get_soft_state 25606 * EINVAL if data pointer is NULL 25607 */ 25608 25609 static int 25610 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25611 { 25612 struct sd_lun *un; 25613 struct uscsi_cmd *com; 25614 caddr_t buffer; 25615 char cdb[CDB_GROUP1]; 25616 int session_offset = 0; 25617 int rval; 25618 25619 if (data == NULL) { 25620 return (EINVAL); 25621 } 25622 25623 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25624 (un->un_state == SD_STATE_OFFLINE)) { 25625 return (ENXIO); 25626 } 25627 25628 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25629 bzero(cdb, CDB_GROUP1); 25630 cdb[0] = SCMD_READ_TOC; 25631 /* 25632 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25633 * (4 byte TOC response header + 8 byte response data) 25634 */ 25635 cdb[8] = SONY_SESSION_OFFSET_LEN; 25636 /* Byte 9 is the control byte. A vendor specific value is used */ 25637 cdb[9] = SONY_SESSION_OFFSET_KEY; 25638 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25639 com->uscsi_cdb = cdb; 25640 com->uscsi_cdblen = CDB_GROUP1; 25641 com->uscsi_bufaddr = buffer; 25642 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25643 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25644 25645 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25646 SD_PATH_STANDARD); 25647 if (rval != 0) { 25648 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25649 kmem_free(com, sizeof (*com)); 25650 return (rval); 25651 } 25652 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25653 session_offset = 25654 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25655 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25656 /* 25657 * Offset returned offset in current lbasize block's. Convert to 25658 * 2k block's to return to the user 25659 */ 25660 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25661 session_offset >>= 2; 25662 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25663 session_offset >>= 1; 25664 } 25665 } 25666 25667 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25668 rval = EFAULT; 25669 } 25670 25671 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25672 kmem_free(com, sizeof (*com)); 25673 return (rval); 25674 } 25675 25676 25677 /* 25678 * Function: sd_wm_cache_constructor() 25679 * 25680 * Description: Cache Constructor for the wmap cache for the read/modify/write 25681 * devices. 25682 * 25683 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25684 * un - sd_lun structure for the device. 25685 * flag - the km flags passed to constructor 25686 * 25687 * Return Code: 0 on success. 25688 * -1 on failure. 25689 */ 25690 25691 /*ARGSUSED*/ 25692 static int 25693 sd_wm_cache_constructor(void *wm, void *un, int flags) 25694 { 25695 bzero(wm, sizeof (struct sd_w_map)); 25696 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25697 return (0); 25698 } 25699 25700 25701 /* 25702 * Function: sd_wm_cache_destructor() 25703 * 25704 * Description: Cache destructor for the wmap cache for the read/modify/write 25705 * devices. 25706 * 25707 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25708 * un - sd_lun structure for the device. 25709 */ 25710 /*ARGSUSED*/ 25711 static void 25712 sd_wm_cache_destructor(void *wm, void *un) 25713 { 25714 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25715 } 25716 25717 25718 /* 25719 * Function: sd_range_lock() 25720 * 25721 * Description: Lock the range of blocks specified as parameter to ensure 25722 * that read, modify write is atomic and no other i/o writes 25723 * to the same location. The range is specified in terms 25724 * of start and end blocks. Block numbers are the actual 25725 * media block numbers and not system. 25726 * 25727 * Arguments: un - sd_lun structure for the device. 25728 * startb - The starting block number 25729 * endb - The end block number 25730 * typ - type of i/o - simple/read_modify_write 25731 * 25732 * Return Code: wm - pointer to the wmap structure. 25733 * 25734 * Context: This routine can sleep. 25735 */ 25736 25737 static struct sd_w_map * 25738 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25739 { 25740 struct sd_w_map *wmp = NULL; 25741 struct sd_w_map *sl_wmp = NULL; 25742 struct sd_w_map *tmp_wmp; 25743 wm_state state = SD_WM_CHK_LIST; 25744 25745 25746 ASSERT(un != NULL); 25747 ASSERT(!mutex_owned(SD_MUTEX(un))); 25748 25749 mutex_enter(SD_MUTEX(un)); 25750 25751 while (state != SD_WM_DONE) { 25752 25753 switch (state) { 25754 case SD_WM_CHK_LIST: 25755 /* 25756 * This is the starting state. Check the wmap list 25757 * to see if the range is currently available. 25758 */ 25759 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25760 /* 25761 * If this is a simple write and no rmw 25762 * i/o is pending then try to lock the 25763 * range as the range should be available. 25764 */ 25765 state = SD_WM_LOCK_RANGE; 25766 } else { 25767 tmp_wmp = sd_get_range(un, startb, endb); 25768 if (tmp_wmp != NULL) { 25769 if ((wmp != NULL) && ONLIST(un, wmp)) { 25770 /* 25771 * Should not keep onlist wmps 25772 * while waiting this macro 25773 * will also do wmp = NULL; 25774 */ 25775 FREE_ONLIST_WMAP(un, wmp); 25776 } 25777 /* 25778 * sl_wmp is the wmap on which wait 25779 * is done, since the tmp_wmp points 25780 * to the inuse wmap, set sl_wmp to 25781 * tmp_wmp and change the state to sleep 25782 */ 25783 sl_wmp = tmp_wmp; 25784 state = SD_WM_WAIT_MAP; 25785 } else { 25786 state = SD_WM_LOCK_RANGE; 25787 } 25788 25789 } 25790 break; 25791 25792 case SD_WM_LOCK_RANGE: 25793 ASSERT(un->un_wm_cache); 25794 /* 25795 * The range need to be locked, try to get a wmap. 25796 * First attempt it with NO_SLEEP, want to avoid a sleep 25797 * if possible as we will have to release the sd mutex 25798 * if we have to sleep. 25799 */ 25800 if (wmp == NULL) 25801 wmp = kmem_cache_alloc(un->un_wm_cache, 25802 KM_NOSLEEP); 25803 if (wmp == NULL) { 25804 mutex_exit(SD_MUTEX(un)); 25805 _NOTE(DATA_READABLE_WITHOUT_LOCK 25806 (sd_lun::un_wm_cache)) 25807 wmp = kmem_cache_alloc(un->un_wm_cache, 25808 KM_SLEEP); 25809 mutex_enter(SD_MUTEX(un)); 25810 /* 25811 * we released the mutex so recheck and go to 25812 * check list state. 25813 */ 25814 state = SD_WM_CHK_LIST; 25815 } else { 25816 /* 25817 * We exit out of state machine since we 25818 * have the wmap. Do the housekeeping first. 25819 * place the wmap on the wmap list if it is not 25820 * on it already and then set the state to done. 25821 */ 25822 wmp->wm_start = startb; 25823 wmp->wm_end = endb; 25824 wmp->wm_flags = typ | SD_WM_BUSY; 25825 if (typ & SD_WTYPE_RMW) { 25826 un->un_rmw_count++; 25827 } 25828 /* 25829 * If not already on the list then link 25830 */ 25831 if (!ONLIST(un, wmp)) { 25832 wmp->wm_next = un->un_wm; 25833 wmp->wm_prev = NULL; 25834 if (wmp->wm_next) 25835 wmp->wm_next->wm_prev = wmp; 25836 un->un_wm = wmp; 25837 } 25838 state = SD_WM_DONE; 25839 } 25840 break; 25841 25842 case SD_WM_WAIT_MAP: 25843 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25844 /* 25845 * Wait is done on sl_wmp, which is set in the 25846 * check_list state. 25847 */ 25848 sl_wmp->wm_wanted_count++; 25849 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25850 sl_wmp->wm_wanted_count--; 25851 /* 25852 * We can reuse the memory from the completed sl_wmp 25853 * lock range for our new lock, but only if noone is 25854 * waiting for it. 25855 */ 25856 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25857 if (sl_wmp->wm_wanted_count == 0) { 25858 if (wmp != NULL) 25859 CHK_N_FREEWMP(un, wmp); 25860 wmp = sl_wmp; 25861 } 25862 sl_wmp = NULL; 25863 /* 25864 * After waking up, need to recheck for availability of 25865 * range. 25866 */ 25867 state = SD_WM_CHK_LIST; 25868 break; 25869 25870 default: 25871 panic("sd_range_lock: " 25872 "Unknown state %d in sd_range_lock", state); 25873 /*NOTREACHED*/ 25874 } /* switch(state) */ 25875 25876 } /* while(state != SD_WM_DONE) */ 25877 25878 mutex_exit(SD_MUTEX(un)); 25879 25880 ASSERT(wmp != NULL); 25881 25882 return (wmp); 25883 } 25884 25885 25886 /* 25887 * Function: sd_get_range() 25888 * 25889 * Description: Find if there any overlapping I/O to this one 25890 * Returns the write-map of 1st such I/O, NULL otherwise. 25891 * 25892 * Arguments: un - sd_lun structure for the device. 25893 * startb - The starting block number 25894 * endb - The end block number 25895 * 25896 * Return Code: wm - pointer to the wmap structure. 25897 */ 25898 25899 static struct sd_w_map * 25900 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 25901 { 25902 struct sd_w_map *wmp; 25903 25904 ASSERT(un != NULL); 25905 25906 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 25907 if (!(wmp->wm_flags & SD_WM_BUSY)) { 25908 continue; 25909 } 25910 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 25911 break; 25912 } 25913 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 25914 break; 25915 } 25916 } 25917 25918 return (wmp); 25919 } 25920 25921 25922 /* 25923 * Function: sd_free_inlist_wmap() 25924 * 25925 * Description: Unlink and free a write map struct. 25926 * 25927 * Arguments: un - sd_lun structure for the device. 25928 * wmp - sd_w_map which needs to be unlinked. 25929 */ 25930 25931 static void 25932 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 25933 { 25934 ASSERT(un != NULL); 25935 25936 if (un->un_wm == wmp) { 25937 un->un_wm = wmp->wm_next; 25938 } else { 25939 wmp->wm_prev->wm_next = wmp->wm_next; 25940 } 25941 25942 if (wmp->wm_next) { 25943 wmp->wm_next->wm_prev = wmp->wm_prev; 25944 } 25945 25946 wmp->wm_next = wmp->wm_prev = NULL; 25947 25948 kmem_cache_free(un->un_wm_cache, wmp); 25949 } 25950 25951 25952 /* 25953 * Function: sd_range_unlock() 25954 * 25955 * Description: Unlock the range locked by wm. 25956 * Free write map if nobody else is waiting on it. 25957 * 25958 * Arguments: un - sd_lun structure for the device. 25959 * wmp - sd_w_map which needs to be unlinked. 25960 */ 25961 25962 static void 25963 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 25964 { 25965 ASSERT(un != NULL); 25966 ASSERT(wm != NULL); 25967 ASSERT(!mutex_owned(SD_MUTEX(un))); 25968 25969 mutex_enter(SD_MUTEX(un)); 25970 25971 if (wm->wm_flags & SD_WTYPE_RMW) { 25972 un->un_rmw_count--; 25973 } 25974 25975 if (wm->wm_wanted_count) { 25976 wm->wm_flags = 0; 25977 /* 25978 * Broadcast that the wmap is available now. 25979 */ 25980 cv_broadcast(&wm->wm_avail); 25981 } else { 25982 /* 25983 * If no one is waiting on the map, it should be free'ed. 25984 */ 25985 sd_free_inlist_wmap(un, wm); 25986 } 25987 25988 mutex_exit(SD_MUTEX(un)); 25989 } 25990 25991 25992 /* 25993 * Function: sd_read_modify_write_task 25994 * 25995 * Description: Called from a taskq thread to initiate the write phase of 25996 * a read-modify-write request. This is used for targets where 25997 * un->un_sys_blocksize != un->un_tgt_blocksize. 25998 * 25999 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26000 * 26001 * Context: Called under taskq thread context. 26002 */ 26003 26004 static void 26005 sd_read_modify_write_task(void *arg) 26006 { 26007 struct sd_mapblocksize_info *bsp; 26008 struct buf *bp; 26009 struct sd_xbuf *xp; 26010 struct sd_lun *un; 26011 26012 bp = arg; /* The bp is given in arg */ 26013 ASSERT(bp != NULL); 26014 26015 /* Get the pointer to the layer-private data struct */ 26016 xp = SD_GET_XBUF(bp); 26017 ASSERT(xp != NULL); 26018 bsp = xp->xb_private; 26019 ASSERT(bsp != NULL); 26020 26021 un = SD_GET_UN(bp); 26022 ASSERT(un != NULL); 26023 ASSERT(!mutex_owned(SD_MUTEX(un))); 26024 26025 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26026 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26027 26028 /* 26029 * This is the write phase of a read-modify-write request, called 26030 * under the context of a taskq thread in response to the completion 26031 * of the read portion of the rmw request completing under interrupt 26032 * context. The write request must be sent from here down the iostart 26033 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26034 * we use the layer index saved in the layer-private data area. 26035 */ 26036 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26037 26038 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26039 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26040 } 26041 26042 26043 /* 26044 * Function: sddump_do_read_of_rmw() 26045 * 26046 * Description: This routine will be called from sddump, If sddump is called 26047 * with an I/O which not aligned on device blocksize boundary 26048 * then the write has to be converted to read-modify-write. 26049 * Do the read part here in order to keep sddump simple. 26050 * Note - That the sd_mutex is held across the call to this 26051 * routine. 26052 * 26053 * Arguments: un - sd_lun 26054 * blkno - block number in terms of media block size. 26055 * nblk - number of blocks. 26056 * bpp - pointer to pointer to the buf structure. On return 26057 * from this function, *bpp points to the valid buffer 26058 * to which the write has to be done. 26059 * 26060 * Return Code: 0 for success or errno-type return code 26061 */ 26062 26063 static int 26064 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26065 struct buf **bpp) 26066 { 26067 int err; 26068 int i; 26069 int rval; 26070 struct buf *bp; 26071 struct scsi_pkt *pkt = NULL; 26072 uint32_t target_blocksize; 26073 26074 ASSERT(un != NULL); 26075 ASSERT(mutex_owned(SD_MUTEX(un))); 26076 26077 target_blocksize = un->un_tgt_blocksize; 26078 26079 mutex_exit(SD_MUTEX(un)); 26080 26081 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26082 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26083 if (bp == NULL) { 26084 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26085 "no resources for dumping; giving up"); 26086 err = ENOMEM; 26087 goto done; 26088 } 26089 26090 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26091 blkno, nblk); 26092 if (rval != 0) { 26093 scsi_free_consistent_buf(bp); 26094 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26095 "no resources for dumping; giving up"); 26096 err = ENOMEM; 26097 goto done; 26098 } 26099 26100 pkt->pkt_flags |= FLAG_NOINTR; 26101 26102 err = EIO; 26103 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26104 26105 /* 26106 * Scsi_poll returns 0 (success) if the command completes and 26107 * the status block is STATUS_GOOD. We should only check 26108 * errors if this condition is not true. Even then we should 26109 * send our own request sense packet only if we have a check 26110 * condition and auto request sense has not been performed by 26111 * the hba. 26112 */ 26113 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26114 26115 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26116 err = 0; 26117 break; 26118 } 26119 26120 /* 26121 * Check CMD_DEV_GONE 1st, give up if device is gone, 26122 * no need to read RQS data. 26123 */ 26124 if (pkt->pkt_reason == CMD_DEV_GONE) { 26125 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26126 "Device is gone\n"); 26127 break; 26128 } 26129 26130 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26131 SD_INFO(SD_LOG_DUMP, un, 26132 "sddump: read failed with CHECK, try # %d\n", i); 26133 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26134 (void) sd_send_polled_RQS(un); 26135 } 26136 26137 continue; 26138 } 26139 26140 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26141 int reset_retval = 0; 26142 26143 SD_INFO(SD_LOG_DUMP, un, 26144 "sddump: read failed with BUSY, try # %d\n", i); 26145 26146 if (un->un_f_lun_reset_enabled == TRUE) { 26147 reset_retval = scsi_reset(SD_ADDRESS(un), 26148 RESET_LUN); 26149 } 26150 if (reset_retval == 0) { 26151 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26152 } 26153 (void) sd_send_polled_RQS(un); 26154 26155 } else { 26156 SD_INFO(SD_LOG_DUMP, un, 26157 "sddump: read failed with 0x%x, try # %d\n", 26158 SD_GET_PKT_STATUS(pkt), i); 26159 mutex_enter(SD_MUTEX(un)); 26160 sd_reset_target(un, pkt); 26161 mutex_exit(SD_MUTEX(un)); 26162 } 26163 26164 /* 26165 * If we are not getting anywhere with lun/target resets, 26166 * let's reset the bus. 26167 */ 26168 if (i > SD_NDUMP_RETRIES/2) { 26169 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26170 (void) sd_send_polled_RQS(un); 26171 } 26172 26173 } 26174 scsi_destroy_pkt(pkt); 26175 26176 if (err != 0) { 26177 scsi_free_consistent_buf(bp); 26178 *bpp = NULL; 26179 } else { 26180 *bpp = bp; 26181 } 26182 26183 done: 26184 mutex_enter(SD_MUTEX(un)); 26185 return (err); 26186 } 26187 26188 26189 /* 26190 * Function: sd_failfast_flushq 26191 * 26192 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26193 * in b_flags and move them onto the failfast queue, then kick 26194 * off a thread to return all bp's on the failfast queue to 26195 * their owners with an error set. 26196 * 26197 * Arguments: un - pointer to the soft state struct for the instance. 26198 * 26199 * Context: may execute in interrupt context. 26200 */ 26201 26202 static void 26203 sd_failfast_flushq(struct sd_lun *un) 26204 { 26205 struct buf *bp; 26206 struct buf *next_waitq_bp; 26207 struct buf *prev_waitq_bp = NULL; 26208 26209 ASSERT(un != NULL); 26210 ASSERT(mutex_owned(SD_MUTEX(un))); 26211 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26212 ASSERT(un->un_failfast_bp == NULL); 26213 26214 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26215 "sd_failfast_flushq: entry: un:0x%p\n", un); 26216 26217 /* 26218 * Check if we should flush all bufs when entering failfast state, or 26219 * just those with B_FAILFAST set. 26220 */ 26221 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26222 /* 26223 * Move *all* bp's on the wait queue to the failfast flush 26224 * queue, including those that do NOT have B_FAILFAST set. 26225 */ 26226 if (un->un_failfast_headp == NULL) { 26227 ASSERT(un->un_failfast_tailp == NULL); 26228 un->un_failfast_headp = un->un_waitq_headp; 26229 } else { 26230 ASSERT(un->un_failfast_tailp != NULL); 26231 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26232 } 26233 26234 un->un_failfast_tailp = un->un_waitq_tailp; 26235 26236 /* update kstat for each bp moved out of the waitq */ 26237 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26238 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26239 } 26240 26241 /* empty the waitq */ 26242 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26243 26244 } else { 26245 /* 26246 * Go thru the wait queue, pick off all entries with 26247 * B_FAILFAST set, and move these onto the failfast queue. 26248 */ 26249 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26250 /* 26251 * Save the pointer to the next bp on the wait queue, 26252 * so we get to it on the next iteration of this loop. 26253 */ 26254 next_waitq_bp = bp->av_forw; 26255 26256 /* 26257 * If this bp from the wait queue does NOT have 26258 * B_FAILFAST set, just move on to the next element 26259 * in the wait queue. Note, this is the only place 26260 * where it is correct to set prev_waitq_bp. 26261 */ 26262 if ((bp->b_flags & B_FAILFAST) == 0) { 26263 prev_waitq_bp = bp; 26264 continue; 26265 } 26266 26267 /* 26268 * Remove the bp from the wait queue. 26269 */ 26270 if (bp == un->un_waitq_headp) { 26271 /* The bp is the first element of the waitq. */ 26272 un->un_waitq_headp = next_waitq_bp; 26273 if (un->un_waitq_headp == NULL) { 26274 /* The wait queue is now empty */ 26275 un->un_waitq_tailp = NULL; 26276 } 26277 } else { 26278 /* 26279 * The bp is either somewhere in the middle 26280 * or at the end of the wait queue. 26281 */ 26282 ASSERT(un->un_waitq_headp != NULL); 26283 ASSERT(prev_waitq_bp != NULL); 26284 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26285 == 0); 26286 if (bp == un->un_waitq_tailp) { 26287 /* bp is the last entry on the waitq. */ 26288 ASSERT(next_waitq_bp == NULL); 26289 un->un_waitq_tailp = prev_waitq_bp; 26290 } 26291 prev_waitq_bp->av_forw = next_waitq_bp; 26292 } 26293 bp->av_forw = NULL; 26294 26295 /* 26296 * update kstat since the bp is moved out of 26297 * the waitq 26298 */ 26299 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26300 26301 /* 26302 * Now put the bp onto the failfast queue. 26303 */ 26304 if (un->un_failfast_headp == NULL) { 26305 /* failfast queue is currently empty */ 26306 ASSERT(un->un_failfast_tailp == NULL); 26307 un->un_failfast_headp = 26308 un->un_failfast_tailp = bp; 26309 } else { 26310 /* Add the bp to the end of the failfast q */ 26311 ASSERT(un->un_failfast_tailp != NULL); 26312 ASSERT(un->un_failfast_tailp->b_flags & 26313 B_FAILFAST); 26314 un->un_failfast_tailp->av_forw = bp; 26315 un->un_failfast_tailp = bp; 26316 } 26317 } 26318 } 26319 26320 /* 26321 * Now return all bp's on the failfast queue to their owners. 26322 */ 26323 while ((bp = un->un_failfast_headp) != NULL) { 26324 26325 un->un_failfast_headp = bp->av_forw; 26326 if (un->un_failfast_headp == NULL) { 26327 un->un_failfast_tailp = NULL; 26328 } 26329 26330 /* 26331 * We want to return the bp with a failure error code, but 26332 * we do not want a call to sd_start_cmds() to occur here, 26333 * so use sd_return_failed_command_no_restart() instead of 26334 * sd_return_failed_command(). 26335 */ 26336 sd_return_failed_command_no_restart(un, bp, EIO); 26337 } 26338 26339 /* Flush the xbuf queues if required. */ 26340 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26341 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26342 } 26343 26344 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26345 "sd_failfast_flushq: exit: un:0x%p\n", un); 26346 } 26347 26348 26349 /* 26350 * Function: sd_failfast_flushq_callback 26351 * 26352 * Description: Return TRUE if the given bp meets the criteria for failfast 26353 * flushing. Used with ddi_xbuf_flushq(9F). 26354 * 26355 * Arguments: bp - ptr to buf struct to be examined. 26356 * 26357 * Context: Any 26358 */ 26359 26360 static int 26361 sd_failfast_flushq_callback(struct buf *bp) 26362 { 26363 /* 26364 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26365 * state is entered; OR (2) the given bp has B_FAILFAST set. 26366 */ 26367 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26368 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26369 } 26370 26371 26372 26373 #if defined(__i386) || defined(__amd64) 26374 /* 26375 * Function: sd_setup_next_xfer 26376 * 26377 * Description: Prepare next I/O operation using DMA_PARTIAL 26378 * 26379 */ 26380 26381 static int 26382 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26383 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26384 { 26385 ssize_t num_blks_not_xfered; 26386 daddr_t strt_blk_num; 26387 ssize_t bytes_not_xfered; 26388 int rval; 26389 26390 ASSERT(pkt->pkt_resid == 0); 26391 26392 /* 26393 * Calculate next block number and amount to be transferred. 26394 * 26395 * How much data NOT transfered to the HBA yet. 26396 */ 26397 bytes_not_xfered = xp->xb_dma_resid; 26398 26399 /* 26400 * figure how many blocks NOT transfered to the HBA yet. 26401 */ 26402 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26403 26404 /* 26405 * set starting block number to the end of what WAS transfered. 26406 */ 26407 strt_blk_num = xp->xb_blkno + 26408 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26409 26410 /* 26411 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26412 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26413 * the disk mutex here. 26414 */ 26415 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26416 strt_blk_num, num_blks_not_xfered); 26417 26418 if (rval == 0) { 26419 26420 /* 26421 * Success. 26422 * 26423 * Adjust things if there are still more blocks to be 26424 * transfered. 26425 */ 26426 xp->xb_dma_resid = pkt->pkt_resid; 26427 pkt->pkt_resid = 0; 26428 26429 return (1); 26430 } 26431 26432 /* 26433 * There's really only one possible return value from 26434 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26435 * returns NULL. 26436 */ 26437 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26438 26439 bp->b_resid = bp->b_bcount; 26440 bp->b_flags |= B_ERROR; 26441 26442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26443 "Error setting up next portion of DMA transfer\n"); 26444 26445 return (0); 26446 } 26447 #endif 26448 26449 /* 26450 * Function: sd_panic_for_res_conflict 26451 * 26452 * Description: Call panic with a string formated with "Reservation Conflict" 26453 * and a human readable identifier indicating the SD instance 26454 * that experienced the reservation conflict. 26455 * 26456 * Arguments: un - pointer to the soft state struct for the instance. 26457 * 26458 * Context: may execute in interrupt context. 26459 */ 26460 26461 #define SD_RESV_CONFLICT_FMT_LEN 40 26462 void 26463 sd_panic_for_res_conflict(struct sd_lun *un) 26464 { 26465 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26466 char path_str[MAXPATHLEN]; 26467 26468 (void) snprintf(panic_str, sizeof (panic_str), 26469 "Reservation Conflict\nDisk: %s", 26470 ddi_pathname(SD_DEVINFO(un), path_str)); 26471 26472 panic(panic_str); 26473 } 26474 26475 /* 26476 * Note: The following sd_faultinjection_ioctl( ) routines implement 26477 * driver support for handling fault injection for error analysis 26478 * causing faults in multiple layers of the driver. 26479 * 26480 */ 26481 26482 #ifdef SD_FAULT_INJECTION 26483 static uint_t sd_fault_injection_on = 0; 26484 26485 /* 26486 * Function: sd_faultinjection_ioctl() 26487 * 26488 * Description: This routine is the driver entry point for handling 26489 * faultinjection ioctls to inject errors into the 26490 * layer model 26491 * 26492 * Arguments: cmd - the ioctl cmd recieved 26493 * arg - the arguments from user and returns 26494 */ 26495 26496 static void 26497 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26498 26499 uint_t i; 26500 uint_t rval; 26501 26502 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26503 26504 mutex_enter(SD_MUTEX(un)); 26505 26506 switch (cmd) { 26507 case SDIOCRUN: 26508 /* Allow pushed faults to be injected */ 26509 SD_INFO(SD_LOG_SDTEST, un, 26510 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26511 26512 sd_fault_injection_on = 1; 26513 26514 SD_INFO(SD_LOG_IOERR, un, 26515 "sd_faultinjection_ioctl: run finished\n"); 26516 break; 26517 26518 case SDIOCSTART: 26519 /* Start Injection Session */ 26520 SD_INFO(SD_LOG_SDTEST, un, 26521 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26522 26523 sd_fault_injection_on = 0; 26524 un->sd_injection_mask = 0xFFFFFFFF; 26525 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26526 un->sd_fi_fifo_pkt[i] = NULL; 26527 un->sd_fi_fifo_xb[i] = NULL; 26528 un->sd_fi_fifo_un[i] = NULL; 26529 un->sd_fi_fifo_arq[i] = NULL; 26530 } 26531 un->sd_fi_fifo_start = 0; 26532 un->sd_fi_fifo_end = 0; 26533 26534 mutex_enter(&(un->un_fi_mutex)); 26535 un->sd_fi_log[0] = '\0'; 26536 un->sd_fi_buf_len = 0; 26537 mutex_exit(&(un->un_fi_mutex)); 26538 26539 SD_INFO(SD_LOG_IOERR, un, 26540 "sd_faultinjection_ioctl: start finished\n"); 26541 break; 26542 26543 case SDIOCSTOP: 26544 /* Stop Injection Session */ 26545 SD_INFO(SD_LOG_SDTEST, un, 26546 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26547 sd_fault_injection_on = 0; 26548 un->sd_injection_mask = 0x0; 26549 26550 /* Empty stray or unuseds structs from fifo */ 26551 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26552 if (un->sd_fi_fifo_pkt[i] != NULL) { 26553 kmem_free(un->sd_fi_fifo_pkt[i], 26554 sizeof (struct sd_fi_pkt)); 26555 } 26556 if (un->sd_fi_fifo_xb[i] != NULL) { 26557 kmem_free(un->sd_fi_fifo_xb[i], 26558 sizeof (struct sd_fi_xb)); 26559 } 26560 if (un->sd_fi_fifo_un[i] != NULL) { 26561 kmem_free(un->sd_fi_fifo_un[i], 26562 sizeof (struct sd_fi_un)); 26563 } 26564 if (un->sd_fi_fifo_arq[i] != NULL) { 26565 kmem_free(un->sd_fi_fifo_arq[i], 26566 sizeof (struct sd_fi_arq)); 26567 } 26568 un->sd_fi_fifo_pkt[i] = NULL; 26569 un->sd_fi_fifo_un[i] = NULL; 26570 un->sd_fi_fifo_xb[i] = NULL; 26571 un->sd_fi_fifo_arq[i] = NULL; 26572 } 26573 un->sd_fi_fifo_start = 0; 26574 un->sd_fi_fifo_end = 0; 26575 26576 SD_INFO(SD_LOG_IOERR, un, 26577 "sd_faultinjection_ioctl: stop finished\n"); 26578 break; 26579 26580 case SDIOCINSERTPKT: 26581 /* Store a packet struct to be pushed onto fifo */ 26582 SD_INFO(SD_LOG_SDTEST, un, 26583 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26584 26585 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26586 26587 sd_fault_injection_on = 0; 26588 26589 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26590 if (un->sd_fi_fifo_pkt[i] != NULL) { 26591 kmem_free(un->sd_fi_fifo_pkt[i], 26592 sizeof (struct sd_fi_pkt)); 26593 } 26594 if (arg != NULL) { 26595 un->sd_fi_fifo_pkt[i] = 26596 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26597 if (un->sd_fi_fifo_pkt[i] == NULL) { 26598 /* Alloc failed don't store anything */ 26599 break; 26600 } 26601 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26602 sizeof (struct sd_fi_pkt), 0); 26603 if (rval == -1) { 26604 kmem_free(un->sd_fi_fifo_pkt[i], 26605 sizeof (struct sd_fi_pkt)); 26606 un->sd_fi_fifo_pkt[i] = NULL; 26607 } 26608 } else { 26609 SD_INFO(SD_LOG_IOERR, un, 26610 "sd_faultinjection_ioctl: pkt null\n"); 26611 } 26612 break; 26613 26614 case SDIOCINSERTXB: 26615 /* Store a xb struct to be pushed onto fifo */ 26616 SD_INFO(SD_LOG_SDTEST, un, 26617 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26618 26619 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26620 26621 sd_fault_injection_on = 0; 26622 26623 if (un->sd_fi_fifo_xb[i] != NULL) { 26624 kmem_free(un->sd_fi_fifo_xb[i], 26625 sizeof (struct sd_fi_xb)); 26626 un->sd_fi_fifo_xb[i] = NULL; 26627 } 26628 if (arg != NULL) { 26629 un->sd_fi_fifo_xb[i] = 26630 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26631 if (un->sd_fi_fifo_xb[i] == NULL) { 26632 /* Alloc failed don't store anything */ 26633 break; 26634 } 26635 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26636 sizeof (struct sd_fi_xb), 0); 26637 26638 if (rval == -1) { 26639 kmem_free(un->sd_fi_fifo_xb[i], 26640 sizeof (struct sd_fi_xb)); 26641 un->sd_fi_fifo_xb[i] = NULL; 26642 } 26643 } else { 26644 SD_INFO(SD_LOG_IOERR, un, 26645 "sd_faultinjection_ioctl: xb null\n"); 26646 } 26647 break; 26648 26649 case SDIOCINSERTUN: 26650 /* Store a un struct to be pushed onto fifo */ 26651 SD_INFO(SD_LOG_SDTEST, un, 26652 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26653 26654 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26655 26656 sd_fault_injection_on = 0; 26657 26658 if (un->sd_fi_fifo_un[i] != NULL) { 26659 kmem_free(un->sd_fi_fifo_un[i], 26660 sizeof (struct sd_fi_un)); 26661 un->sd_fi_fifo_un[i] = NULL; 26662 } 26663 if (arg != NULL) { 26664 un->sd_fi_fifo_un[i] = 26665 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26666 if (un->sd_fi_fifo_un[i] == NULL) { 26667 /* Alloc failed don't store anything */ 26668 break; 26669 } 26670 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26671 sizeof (struct sd_fi_un), 0); 26672 if (rval == -1) { 26673 kmem_free(un->sd_fi_fifo_un[i], 26674 sizeof (struct sd_fi_un)); 26675 un->sd_fi_fifo_un[i] = NULL; 26676 } 26677 26678 } else { 26679 SD_INFO(SD_LOG_IOERR, un, 26680 "sd_faultinjection_ioctl: un null\n"); 26681 } 26682 26683 break; 26684 26685 case SDIOCINSERTARQ: 26686 /* Store a arq struct to be pushed onto fifo */ 26687 SD_INFO(SD_LOG_SDTEST, un, 26688 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26689 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26690 26691 sd_fault_injection_on = 0; 26692 26693 if (un->sd_fi_fifo_arq[i] != NULL) { 26694 kmem_free(un->sd_fi_fifo_arq[i], 26695 sizeof (struct sd_fi_arq)); 26696 un->sd_fi_fifo_arq[i] = NULL; 26697 } 26698 if (arg != NULL) { 26699 un->sd_fi_fifo_arq[i] = 26700 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26701 if (un->sd_fi_fifo_arq[i] == NULL) { 26702 /* Alloc failed don't store anything */ 26703 break; 26704 } 26705 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26706 sizeof (struct sd_fi_arq), 0); 26707 if (rval == -1) { 26708 kmem_free(un->sd_fi_fifo_arq[i], 26709 sizeof (struct sd_fi_arq)); 26710 un->sd_fi_fifo_arq[i] = NULL; 26711 } 26712 26713 } else { 26714 SD_INFO(SD_LOG_IOERR, un, 26715 "sd_faultinjection_ioctl: arq null\n"); 26716 } 26717 26718 break; 26719 26720 case SDIOCPUSH: 26721 /* Push stored xb, pkt, un, and arq onto fifo */ 26722 sd_fault_injection_on = 0; 26723 26724 if (arg != NULL) { 26725 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26726 if (rval != -1 && 26727 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26728 un->sd_fi_fifo_end += i; 26729 } 26730 } else { 26731 SD_INFO(SD_LOG_IOERR, un, 26732 "sd_faultinjection_ioctl: push arg null\n"); 26733 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26734 un->sd_fi_fifo_end++; 26735 } 26736 } 26737 SD_INFO(SD_LOG_IOERR, un, 26738 "sd_faultinjection_ioctl: push to end=%d\n", 26739 un->sd_fi_fifo_end); 26740 break; 26741 26742 case SDIOCRETRIEVE: 26743 /* Return buffer of log from Injection session */ 26744 SD_INFO(SD_LOG_SDTEST, un, 26745 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26746 26747 sd_fault_injection_on = 0; 26748 26749 mutex_enter(&(un->un_fi_mutex)); 26750 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26751 un->sd_fi_buf_len+1, 0); 26752 mutex_exit(&(un->un_fi_mutex)); 26753 26754 if (rval == -1) { 26755 /* 26756 * arg is possibly invalid setting 26757 * it to NULL for return 26758 */ 26759 arg = NULL; 26760 } 26761 break; 26762 } 26763 26764 mutex_exit(SD_MUTEX(un)); 26765 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26766 " exit\n"); 26767 } 26768 26769 26770 /* 26771 * Function: sd_injection_log() 26772 * 26773 * Description: This routine adds buff to the already existing injection log 26774 * for retrieval via faultinjection_ioctl for use in fault 26775 * detection and recovery 26776 * 26777 * Arguments: buf - the string to add to the log 26778 */ 26779 26780 static void 26781 sd_injection_log(char *buf, struct sd_lun *un) 26782 { 26783 uint_t len; 26784 26785 ASSERT(un != NULL); 26786 ASSERT(buf != NULL); 26787 26788 mutex_enter(&(un->un_fi_mutex)); 26789 26790 len = min(strlen(buf), 255); 26791 /* Add logged value to Injection log to be returned later */ 26792 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26793 uint_t offset = strlen((char *)un->sd_fi_log); 26794 char *destp = (char *)un->sd_fi_log + offset; 26795 int i; 26796 for (i = 0; i < len; i++) { 26797 *destp++ = *buf++; 26798 } 26799 un->sd_fi_buf_len += len; 26800 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26801 } 26802 26803 mutex_exit(&(un->un_fi_mutex)); 26804 } 26805 26806 26807 /* 26808 * Function: sd_faultinjection() 26809 * 26810 * Description: This routine takes the pkt and changes its 26811 * content based on error injection scenerio. 26812 * 26813 * Arguments: pktp - packet to be changed 26814 */ 26815 26816 static void 26817 sd_faultinjection(struct scsi_pkt *pktp) 26818 { 26819 uint_t i; 26820 struct sd_fi_pkt *fi_pkt; 26821 struct sd_fi_xb *fi_xb; 26822 struct sd_fi_un *fi_un; 26823 struct sd_fi_arq *fi_arq; 26824 struct buf *bp; 26825 struct sd_xbuf *xb; 26826 struct sd_lun *un; 26827 26828 ASSERT(pktp != NULL); 26829 26830 /* pull bp xb and un from pktp */ 26831 bp = (struct buf *)pktp->pkt_private; 26832 xb = SD_GET_XBUF(bp); 26833 un = SD_GET_UN(bp); 26834 26835 ASSERT(un != NULL); 26836 26837 mutex_enter(SD_MUTEX(un)); 26838 26839 SD_TRACE(SD_LOG_SDTEST, un, 26840 "sd_faultinjection: entry Injection from sdintr\n"); 26841 26842 /* if injection is off return */ 26843 if (sd_fault_injection_on == 0 || 26844 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26845 mutex_exit(SD_MUTEX(un)); 26846 return; 26847 } 26848 26849 26850 /* take next set off fifo */ 26851 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26852 26853 fi_pkt = un->sd_fi_fifo_pkt[i]; 26854 fi_xb = un->sd_fi_fifo_xb[i]; 26855 fi_un = un->sd_fi_fifo_un[i]; 26856 fi_arq = un->sd_fi_fifo_arq[i]; 26857 26858 26859 /* set variables accordingly */ 26860 /* set pkt if it was on fifo */ 26861 if (fi_pkt != NULL) { 26862 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26863 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26864 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26865 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26866 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26867 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26868 26869 } 26870 26871 /* set xb if it was on fifo */ 26872 if (fi_xb != NULL) { 26873 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26874 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26875 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26876 SD_CONDSET(xb, xb, xb_victim_retry_count, 26877 "xb_victim_retry_count"); 26878 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26879 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26880 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26881 26882 /* copy in block data from sense */ 26883 if (fi_xb->xb_sense_data[0] != -1) { 26884 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26885 SENSE_LENGTH); 26886 } 26887 26888 /* copy in extended sense codes */ 26889 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 26890 "es_code"); 26891 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 26892 "es_key"); 26893 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 26894 "es_add_code"); 26895 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 26896 es_qual_code, "es_qual_code"); 26897 } 26898 26899 /* set un if it was on fifo */ 26900 if (fi_un != NULL) { 26901 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 26902 SD_CONDSET(un, un, un_ctype, "un_ctype"); 26903 SD_CONDSET(un, un, un_reset_retry_count, 26904 "un_reset_retry_count"); 26905 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 26906 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 26907 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 26908 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 26909 "un_f_allow_bus_device_reset"); 26910 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 26911 26912 } 26913 26914 /* copy in auto request sense if it was on fifo */ 26915 if (fi_arq != NULL) { 26916 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 26917 } 26918 26919 /* free structs */ 26920 if (un->sd_fi_fifo_pkt[i] != NULL) { 26921 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 26922 } 26923 if (un->sd_fi_fifo_xb[i] != NULL) { 26924 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 26925 } 26926 if (un->sd_fi_fifo_un[i] != NULL) { 26927 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 26928 } 26929 if (un->sd_fi_fifo_arq[i] != NULL) { 26930 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 26931 } 26932 26933 /* 26934 * kmem_free does not gurantee to set to NULL 26935 * since we uses these to determine if we set 26936 * values or not lets confirm they are always 26937 * NULL after free 26938 */ 26939 un->sd_fi_fifo_pkt[i] = NULL; 26940 un->sd_fi_fifo_un[i] = NULL; 26941 un->sd_fi_fifo_xb[i] = NULL; 26942 un->sd_fi_fifo_arq[i] = NULL; 26943 26944 un->sd_fi_fifo_start++; 26945 26946 mutex_exit(SD_MUTEX(un)); 26947 26948 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 26949 } 26950 26951 #endif /* SD_FAULT_INJECTION */ 26952 26953 /* 26954 * This routine is invoked in sd_unit_attach(). Before calling it, the 26955 * properties in conf file should be processed already, and "hotpluggable" 26956 * property was processed also. 26957 * 26958 * The sd driver distinguishes 3 different type of devices: removable media, 26959 * non-removable media, and hotpluggable. Below the differences are defined: 26960 * 26961 * 1. Device ID 26962 * 26963 * The device ID of a device is used to identify this device. Refer to 26964 * ddi_devid_register(9F). 26965 * 26966 * For a non-removable media disk device which can provide 0x80 or 0x83 26967 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 26968 * device ID is created to identify this device. For other non-removable 26969 * media devices, a default device ID is created only if this device has 26970 * at least 2 alter cylinders. Otherwise, this device has no devid. 26971 * 26972 * ------------------------------------------------------- 26973 * removable media hotpluggable | Can Have Device ID 26974 * ------------------------------------------------------- 26975 * false false | Yes 26976 * false true | Yes 26977 * true x | No 26978 * ------------------------------------------------------ 26979 * 26980 * 26981 * 2. SCSI group 4 commands 26982 * 26983 * In SCSI specs, only some commands in group 4 command set can use 26984 * 8-byte addresses that can be used to access >2TB storage spaces. 26985 * Other commands have no such capability. Without supporting group4, 26986 * it is impossible to make full use of storage spaces of a disk with 26987 * capacity larger than 2TB. 26988 * 26989 * ----------------------------------------------- 26990 * removable media hotpluggable LP64 | Group 26991 * ----------------------------------------------- 26992 * false false false | 1 26993 * false false true | 4 26994 * false true false | 1 26995 * false true true | 4 26996 * true x x | 5 26997 * ----------------------------------------------- 26998 * 26999 * 27000 * 3. Check for VTOC Label 27001 * 27002 * If a direct-access disk has no EFI label, sd will check if it has a 27003 * valid VTOC label. Now, sd also does that check for removable media 27004 * and hotpluggable devices. 27005 * 27006 * -------------------------------------------------------------- 27007 * Direct-Access removable media hotpluggable | Check Label 27008 * ------------------------------------------------------------- 27009 * false false false | No 27010 * false false true | No 27011 * false true false | Yes 27012 * false true true | Yes 27013 * true x x | Yes 27014 * -------------------------------------------------------------- 27015 * 27016 * 27017 * 4. Building default VTOC label 27018 * 27019 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27020 * If those devices have no valid VTOC label, sd(7d) will attempt to 27021 * create default VTOC for them. Currently sd creates default VTOC label 27022 * for all devices on x86 platform (VTOC_16), but only for removable 27023 * media devices on SPARC (VTOC_8). 27024 * 27025 * ----------------------------------------------------------- 27026 * removable media hotpluggable platform | Default Label 27027 * ----------------------------------------------------------- 27028 * false false sparc | No 27029 * false true x86 | Yes 27030 * false true sparc | Yes 27031 * true x x | Yes 27032 * ---------------------------------------------------------- 27033 * 27034 * 27035 * 5. Supported blocksizes of target devices 27036 * 27037 * Sd supports non-512-byte blocksize for removable media devices only. 27038 * For other devices, only 512-byte blocksize is supported. This may be 27039 * changed in near future because some RAID devices require non-512-byte 27040 * blocksize 27041 * 27042 * ----------------------------------------------------------- 27043 * removable media hotpluggable | non-512-byte blocksize 27044 * ----------------------------------------------------------- 27045 * false false | No 27046 * false true | No 27047 * true x | Yes 27048 * ----------------------------------------------------------- 27049 * 27050 * 27051 * 6. Automatic mount & unmount 27052 * 27053 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27054 * if a device is removable media device. It return 1 for removable media 27055 * devices, and 0 for others. 27056 * 27057 * The automatic mounting subsystem should distinguish between the types 27058 * of devices and apply automounting policies to each. 27059 * 27060 * 27061 * 7. fdisk partition management 27062 * 27063 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27064 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27065 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27066 * fdisk partitions on both x86 and SPARC platform. 27067 * 27068 * ----------------------------------------------------------- 27069 * platform removable media USB/1394 | fdisk supported 27070 * ----------------------------------------------------------- 27071 * x86 X X | true 27072 * ------------------------------------------------------------ 27073 * sparc X X | false 27074 * ------------------------------------------------------------ 27075 * 27076 * 27077 * 8. MBOOT/MBR 27078 * 27079 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27080 * read/write mboot for removable media devices on sparc platform. 27081 * 27082 * ----------------------------------------------------------- 27083 * platform removable media USB/1394 | mboot supported 27084 * ----------------------------------------------------------- 27085 * x86 X X | true 27086 * ------------------------------------------------------------ 27087 * sparc false false | false 27088 * sparc false true | true 27089 * sparc true false | true 27090 * sparc true true | true 27091 * ------------------------------------------------------------ 27092 * 27093 * 27094 * 9. error handling during opening device 27095 * 27096 * If failed to open a disk device, an errno is returned. For some kinds 27097 * of errors, different errno is returned depending on if this device is 27098 * a removable media device. This brings USB/1394 hard disks in line with 27099 * expected hard disk behavior. It is not expected that this breaks any 27100 * application. 27101 * 27102 * ------------------------------------------------------ 27103 * removable media hotpluggable | errno 27104 * ------------------------------------------------------ 27105 * false false | EIO 27106 * false true | EIO 27107 * true x | ENXIO 27108 * ------------------------------------------------------ 27109 * 27110 * 27111 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27112 * 27113 * These IOCTLs are applicable only to removable media devices. 27114 * 27115 * ----------------------------------------------------------- 27116 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27117 * ----------------------------------------------------------- 27118 * false false | No 27119 * false true | No 27120 * true x | Yes 27121 * ----------------------------------------------------------- 27122 * 27123 * 27124 * 12. Kstats for partitions 27125 * 27126 * sd creates partition kstat for non-removable media devices. USB and 27127 * Firewire hard disks now have partition kstats 27128 * 27129 * ------------------------------------------------------ 27130 * removable media hotplugable | kstat 27131 * ------------------------------------------------------ 27132 * false false | Yes 27133 * false true | Yes 27134 * true x | No 27135 * ------------------------------------------------------ 27136 * 27137 * 27138 * 13. Removable media & hotpluggable properties 27139 * 27140 * Sd driver creates a "removable-media" property for removable media 27141 * devices. Parent nexus drivers create a "hotpluggable" property if 27142 * it supports hotplugging. 27143 * 27144 * --------------------------------------------------------------------- 27145 * removable media hotpluggable | "removable-media" " hotpluggable" 27146 * --------------------------------------------------------------------- 27147 * false false | No No 27148 * false true | No Yes 27149 * true false | Yes No 27150 * true true | Yes Yes 27151 * --------------------------------------------------------------------- 27152 * 27153 * 27154 * 14. Power Management 27155 * 27156 * sd only power manages removable media devices or devices that support 27157 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27158 * 27159 * A parent nexus that supports hotplugging can also set "pm-capable" 27160 * if the disk can be power managed. 27161 * 27162 * ------------------------------------------------------------ 27163 * removable media hotpluggable pm-capable | power manage 27164 * ------------------------------------------------------------ 27165 * false false false | No 27166 * false false true | Yes 27167 * false true false | No 27168 * false true true | Yes 27169 * true x x | Yes 27170 * ------------------------------------------------------------ 27171 * 27172 * USB and firewire hard disks can now be power managed independently 27173 * of the framebuffer 27174 * 27175 * 27176 * 15. Support for USB disks with capacity larger than 1TB 27177 * 27178 * Currently, sd doesn't permit a fixed disk device with capacity 27179 * larger than 1TB to be used in a 32-bit operating system environment. 27180 * However, sd doesn't do that for removable media devices. Instead, it 27181 * assumes that removable media devices cannot have a capacity larger 27182 * than 1TB. Therefore, using those devices on 32-bit system is partially 27183 * supported, which can cause some unexpected results. 27184 * 27185 * --------------------------------------------------------------------- 27186 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27187 * --------------------------------------------------------------------- 27188 * false false | true | no 27189 * false true | true | no 27190 * true false | true | Yes 27191 * true true | true | Yes 27192 * --------------------------------------------------------------------- 27193 * 27194 * 27195 * 16. Check write-protection at open time 27196 * 27197 * When a removable media device is being opened for writing without NDELAY 27198 * flag, sd will check if this device is writable. If attempting to open 27199 * without NDELAY flag a write-protected device, this operation will abort. 27200 * 27201 * ------------------------------------------------------------ 27202 * removable media USB/1394 | WP Check 27203 * ------------------------------------------------------------ 27204 * false false | No 27205 * false true | No 27206 * true false | Yes 27207 * true true | Yes 27208 * ------------------------------------------------------------ 27209 * 27210 * 27211 * 17. syslog when corrupted VTOC is encountered 27212 * 27213 * Currently, if an invalid VTOC is encountered, sd only print syslog 27214 * for fixed SCSI disks. 27215 * ------------------------------------------------------------ 27216 * removable media USB/1394 | print syslog 27217 * ------------------------------------------------------------ 27218 * false false | Yes 27219 * false true | No 27220 * true false | No 27221 * true true | No 27222 * ------------------------------------------------------------ 27223 */ 27224 static void 27225 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27226 { 27227 int pm_capable_prop; 27228 27229 ASSERT(un->un_sd); 27230 ASSERT(un->un_sd->sd_inq); 27231 27232 /* 27233 * Enable SYNC CACHE support for all devices. 27234 */ 27235 un->un_f_sync_cache_supported = TRUE; 27236 27237 if (un->un_sd->sd_inq->inq_rmb) { 27238 /* 27239 * The media of this device is removable. And for this kind 27240 * of devices, it is possible to change medium after opening 27241 * devices. Thus we should support this operation. 27242 */ 27243 un->un_f_has_removable_media = TRUE; 27244 27245 /* 27246 * support non-512-byte blocksize of removable media devices 27247 */ 27248 un->un_f_non_devbsize_supported = TRUE; 27249 27250 /* 27251 * Assume that all removable media devices support DOOR_LOCK 27252 */ 27253 un->un_f_doorlock_supported = TRUE; 27254 27255 /* 27256 * For a removable media device, it is possible to be opened 27257 * with NDELAY flag when there is no media in drive, in this 27258 * case we don't care if device is writable. But if without 27259 * NDELAY flag, we need to check if media is write-protected. 27260 */ 27261 un->un_f_chk_wp_open = TRUE; 27262 27263 /* 27264 * need to start a SCSI watch thread to monitor media state, 27265 * when media is being inserted or ejected, notify syseventd. 27266 */ 27267 un->un_f_monitor_media_state = TRUE; 27268 27269 /* 27270 * Some devices don't support START_STOP_UNIT command. 27271 * Therefore, we'd better check if a device supports it 27272 * before sending it. 27273 */ 27274 un->un_f_check_start_stop = TRUE; 27275 27276 /* 27277 * support eject media ioctl: 27278 * FDEJECT, DKIOCEJECT, CDROMEJECT 27279 */ 27280 un->un_f_eject_media_supported = TRUE; 27281 27282 /* 27283 * Because many removable-media devices don't support 27284 * LOG_SENSE, we couldn't use this command to check if 27285 * a removable media device support power-management. 27286 * We assume that they support power-management via 27287 * START_STOP_UNIT command and can be spun up and down 27288 * without limitations. 27289 */ 27290 un->un_f_pm_supported = TRUE; 27291 27292 /* 27293 * Need to create a zero length (Boolean) property 27294 * removable-media for the removable media devices. 27295 * Note that the return value of the property is not being 27296 * checked, since if unable to create the property 27297 * then do not want the attach to fail altogether. Consistent 27298 * with other property creation in attach. 27299 */ 27300 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27301 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27302 27303 } else { 27304 /* 27305 * create device ID for device 27306 */ 27307 un->un_f_devid_supported = TRUE; 27308 27309 /* 27310 * Spin up non-removable-media devices once it is attached 27311 */ 27312 un->un_f_attach_spinup = TRUE; 27313 27314 /* 27315 * According to SCSI specification, Sense data has two kinds of 27316 * format: fixed format, and descriptor format. At present, we 27317 * don't support descriptor format sense data for removable 27318 * media. 27319 */ 27320 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27321 un->un_f_descr_format_supported = TRUE; 27322 } 27323 27324 /* 27325 * kstats are created only for non-removable media devices. 27326 * 27327 * Set this in sd.conf to 0 in order to disable kstats. The 27328 * default is 1, so they are enabled by default. 27329 */ 27330 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27331 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27332 "enable-partition-kstats", 1)); 27333 27334 /* 27335 * Check if HBA has set the "pm-capable" property. 27336 * If "pm-capable" exists and is non-zero then we can 27337 * power manage the device without checking the start/stop 27338 * cycle count log sense page. 27339 * 27340 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27341 * then we should not power manage the device. 27342 * 27343 * If "pm-capable" doesn't exist then pm_capable_prop will 27344 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27345 * sd will check the start/stop cycle count log sense page 27346 * and power manage the device if the cycle count limit has 27347 * not been exceeded. 27348 */ 27349 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27350 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27351 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27352 un->un_f_log_sense_supported = TRUE; 27353 } else { 27354 /* 27355 * pm-capable property exists. 27356 * 27357 * Convert "TRUE" values for pm_capable_prop to 27358 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27359 * later. "TRUE" values are any values except 27360 * SD_PM_CAPABLE_FALSE (0) and 27361 * SD_PM_CAPABLE_UNDEFINED (-1) 27362 */ 27363 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27364 un->un_f_log_sense_supported = FALSE; 27365 } else { 27366 un->un_f_pm_supported = TRUE; 27367 } 27368 27369 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27370 "sd_unit_attach: un:0x%p pm-capable " 27371 "property set to %d.\n", un, un->un_f_pm_supported); 27372 } 27373 } 27374 27375 if (un->un_f_is_hotpluggable) { 27376 27377 /* 27378 * Have to watch hotpluggable devices as well, since 27379 * that's the only way for userland applications to 27380 * detect hot removal while device is busy/mounted. 27381 */ 27382 un->un_f_monitor_media_state = TRUE; 27383 27384 un->un_f_check_start_stop = TRUE; 27385 27386 } 27387 } 27388 27389 /* 27390 * sd_tg_rdwr: 27391 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27392 * in sys block size, req_length in bytes. 27393 * 27394 */ 27395 static int 27396 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27397 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27398 { 27399 struct sd_lun *un; 27400 int path_flag = (int)(uintptr_t)tg_cookie; 27401 char *dkl = NULL; 27402 diskaddr_t real_addr = start_block; 27403 diskaddr_t first_byte, end_block; 27404 27405 size_t buffer_size = reqlength; 27406 int rval; 27407 diskaddr_t cap; 27408 uint32_t lbasize; 27409 27410 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27411 if (un == NULL) 27412 return (ENXIO); 27413 27414 if (cmd != TG_READ && cmd != TG_WRITE) 27415 return (EINVAL); 27416 27417 mutex_enter(SD_MUTEX(un)); 27418 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27419 mutex_exit(SD_MUTEX(un)); 27420 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27421 &lbasize, path_flag); 27422 if (rval != 0) 27423 return (rval); 27424 mutex_enter(SD_MUTEX(un)); 27425 sd_update_block_info(un, lbasize, cap); 27426 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27427 mutex_exit(SD_MUTEX(un)); 27428 return (EIO); 27429 } 27430 } 27431 27432 if (NOT_DEVBSIZE(un)) { 27433 /* 27434 * sys_blocksize != tgt_blocksize, need to re-adjust 27435 * blkno and save the index to beginning of dk_label 27436 */ 27437 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27438 real_addr = first_byte / un->un_tgt_blocksize; 27439 27440 end_block = (first_byte + reqlength + 27441 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27442 27443 /* round up buffer size to multiple of target block size */ 27444 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27445 27446 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27447 "label_addr: 0x%x allocation size: 0x%x\n", 27448 real_addr, buffer_size); 27449 27450 if (((first_byte % un->un_tgt_blocksize) != 0) || 27451 (reqlength % un->un_tgt_blocksize) != 0) 27452 /* the request is not aligned */ 27453 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27454 } 27455 27456 /* 27457 * The MMC standard allows READ CAPACITY to be 27458 * inaccurate by a bounded amount (in the interest of 27459 * response latency). As a result, failed READs are 27460 * commonplace (due to the reading of metadata and not 27461 * data). Depending on the per-Vendor/drive Sense data, 27462 * the failed READ can cause many (unnecessary) retries. 27463 */ 27464 27465 if (ISCD(un) && (cmd == TG_READ) && 27466 (un->un_f_blockcount_is_valid == TRUE) && 27467 ((start_block == (un->un_blockcount - 1))|| 27468 (start_block == (un->un_blockcount - 2)))) { 27469 path_flag = SD_PATH_DIRECT_PRIORITY; 27470 } 27471 27472 mutex_exit(SD_MUTEX(un)); 27473 if (cmd == TG_READ) { 27474 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27475 buffer_size, real_addr, path_flag); 27476 if (dkl != NULL) 27477 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27478 real_addr), bufaddr, reqlength); 27479 } else { 27480 if (dkl) { 27481 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27482 real_addr, path_flag); 27483 if (rval) { 27484 kmem_free(dkl, buffer_size); 27485 return (rval); 27486 } 27487 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27488 real_addr), reqlength); 27489 } 27490 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27491 buffer_size, real_addr, path_flag); 27492 } 27493 27494 if (dkl != NULL) 27495 kmem_free(dkl, buffer_size); 27496 27497 return (rval); 27498 } 27499 27500 27501 static int 27502 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27503 { 27504 27505 struct sd_lun *un; 27506 diskaddr_t cap; 27507 uint32_t lbasize; 27508 int path_flag = (int)(uintptr_t)tg_cookie; 27509 int ret = 0; 27510 27511 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27512 if (un == NULL) 27513 return (ENXIO); 27514 27515 switch (cmd) { 27516 case TG_GETPHYGEOM: 27517 case TG_GETVIRTGEOM: 27518 case TG_GETCAPACITY: 27519 case TG_GETBLOCKSIZE: 27520 mutex_enter(SD_MUTEX(un)); 27521 27522 if ((un->un_f_blockcount_is_valid == TRUE) && 27523 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27524 cap = un->un_blockcount; 27525 lbasize = un->un_tgt_blocksize; 27526 mutex_exit(SD_MUTEX(un)); 27527 } else { 27528 mutex_exit(SD_MUTEX(un)); 27529 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27530 &lbasize, path_flag); 27531 if (ret != 0) 27532 return (ret); 27533 mutex_enter(SD_MUTEX(un)); 27534 sd_update_block_info(un, lbasize, cap); 27535 if ((un->un_f_blockcount_is_valid == FALSE) || 27536 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27537 mutex_exit(SD_MUTEX(un)); 27538 return (EIO); 27539 } 27540 mutex_exit(SD_MUTEX(un)); 27541 } 27542 27543 if (cmd == TG_GETCAPACITY) { 27544 *(diskaddr_t *)arg = cap; 27545 return (0); 27546 } 27547 27548 if (cmd == TG_GETBLOCKSIZE) { 27549 *(uint32_t *)arg = lbasize; 27550 return (0); 27551 } 27552 27553 if (cmd == TG_GETPHYGEOM) 27554 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27555 cap, lbasize, path_flag); 27556 else 27557 /* TG_GETVIRTGEOM */ 27558 ret = sd_get_virtual_geometry(un, 27559 (cmlb_geom_t *)arg, cap, lbasize); 27560 27561 return (ret); 27562 27563 case TG_GETATTR: 27564 mutex_enter(SD_MUTEX(un)); 27565 ((tg_attribute_t *)arg)->media_is_writable = 27566 un->un_f_mmc_writable_media; 27567 mutex_exit(SD_MUTEX(un)); 27568 return (0); 27569 default: 27570 return (ENOTTY); 27571 27572 } 27573 27574 } 27575