1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 #include <sys/sysevent/eventdefs.h> 68 #include <sys/sysevent/dev.h> 69 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #endif 212 213 214 #ifdef SDDEBUG 215 int sd_force_pm_supported = 0; 216 #endif /* SDDEBUG */ 217 218 void *sd_state = NULL; 219 int sd_io_time = SD_IO_TIME; 220 int sd_failfast_enable = 1; 221 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 222 int sd_report_pfa = 1; 223 int sd_max_throttle = SD_MAX_THROTTLE; 224 int sd_min_throttle = SD_MIN_THROTTLE; 225 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 226 int sd_qfull_throttle_enable = TRUE; 227 228 int sd_retry_on_reservation_conflict = 1; 229 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 230 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 231 232 static int sd_dtype_optical_bind = -1; 233 234 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 235 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 236 237 /* 238 * Global data for debug logging. To enable debug printing, sd_component_mask 239 * and sd_level_mask should be set to the desired bit patterns as outlined in 240 * sddef.h. 241 */ 242 uint_t sd_component_mask = 0x0; 243 uint_t sd_level_mask = 0x0; 244 struct sd_lun *sd_debug_un = NULL; 245 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 246 247 /* Note: these may go away in the future... */ 248 static uint32_t sd_xbuf_active_limit = 512; 249 static uint32_t sd_xbuf_reserve_limit = 16; 250 251 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 252 253 /* 254 * Timer value used to reset the throttle after it has been reduced 255 * (typically in response to TRAN_BUSY or STATUS_QFULL) 256 */ 257 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 258 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 259 260 /* 261 * Interval value associated with the media change scsi watch. 262 */ 263 static int sd_check_media_time = 3000000; 264 265 /* 266 * Wait value used for in progress operations during a DDI_SUSPEND 267 */ 268 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 269 270 /* 271 * sd_label_mutex protects a static buffer used in the disk label 272 * component of the driver 273 */ 274 static kmutex_t sd_label_mutex; 275 276 /* 277 * sd_detach_mutex protects un_layer_count, un_detach_count, and 278 * un_opens_in_progress in the sd_lun structure. 279 */ 280 static kmutex_t sd_detach_mutex; 281 282 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 283 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 284 285 /* 286 * Global buffer and mutex for debug logging 287 */ 288 static char sd_log_buf[1024]; 289 static kmutex_t sd_log_mutex; 290 291 /* 292 * Structs and globals for recording attached lun information. 293 * This maintains a chain. Each node in the chain represents a SCSI controller. 294 * The structure records the number of luns attached to each target connected 295 * with the controller. 296 * For parallel scsi device only. 297 */ 298 struct sd_scsi_hba_tgt_lun { 299 struct sd_scsi_hba_tgt_lun *next; 300 dev_info_t *pdip; 301 int nlun[NTARGETS_WIDE]; 302 }; 303 304 /* 305 * Flag to indicate the lun is attached or detached 306 */ 307 #define SD_SCSI_LUN_ATTACH 0 308 #define SD_SCSI_LUN_DETACH 1 309 310 static kmutex_t sd_scsi_target_lun_mutex; 311 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 312 313 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 314 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 315 316 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 317 sd_scsi_target_lun_head)) 318 319 /* 320 * "Smart" Probe Caching structs, globals, #defines, etc. 321 * For parallel scsi and non-self-identify device only. 322 */ 323 324 /* 325 * The following resources and routines are implemented to support 326 * "smart" probing, which caches the scsi_probe() results in an array, 327 * in order to help avoid long probe times. 328 */ 329 struct sd_scsi_probe_cache { 330 struct sd_scsi_probe_cache *next; 331 dev_info_t *pdip; 332 int cache[NTARGETS_WIDE]; 333 }; 334 335 static kmutex_t sd_scsi_probe_cache_mutex; 336 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 337 338 /* 339 * Really we only need protection on the head of the linked list, but 340 * better safe than sorry. 341 */ 342 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 343 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 344 345 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 346 sd_scsi_probe_cache_head)) 347 348 349 /* 350 * Vendor specific data name property declarations 351 */ 352 353 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 354 355 static sd_tunables seagate_properties = { 356 SEAGATE_THROTTLE_VALUE, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0, 363 0, 364 0 365 }; 366 367 368 static sd_tunables fujitsu_properties = { 369 FUJITSU_THROTTLE_VALUE, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0, 376 0, 377 0 378 }; 379 380 static sd_tunables ibm_properties = { 381 IBM_THROTTLE_VALUE, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0 390 }; 391 392 static sd_tunables purple_properties = { 393 PURPLE_THROTTLE_VALUE, 394 0, 395 0, 396 PURPLE_BUSY_RETRIES, 397 PURPLE_RESET_RETRY_COUNT, 398 PURPLE_RESERVE_RELEASE_TIME, 399 0, 400 0, 401 0 402 }; 403 404 static sd_tunables sve_properties = { 405 SVE_THROTTLE_VALUE, 406 0, 407 0, 408 SVE_BUSY_RETRIES, 409 SVE_RESET_RETRY_COUNT, 410 SVE_RESERVE_RELEASE_TIME, 411 SVE_MIN_THROTTLE_VALUE, 412 SVE_DISKSORT_DISABLED_FLAG, 413 0 414 }; 415 416 static sd_tunables maserati_properties = { 417 0, 418 0, 419 0, 420 0, 421 0, 422 0, 423 0, 424 MASERATI_DISKSORT_DISABLED_FLAG, 425 MASERATI_LUN_RESET_ENABLED_FLAG 426 }; 427 428 static sd_tunables pirus_properties = { 429 PIRUS_THROTTLE_VALUE, 430 0, 431 PIRUS_NRR_COUNT, 432 PIRUS_BUSY_RETRIES, 433 PIRUS_RESET_RETRY_COUNT, 434 0, 435 PIRUS_MIN_THROTTLE_VALUE, 436 PIRUS_DISKSORT_DISABLED_FLAG, 437 PIRUS_LUN_RESET_ENABLED_FLAG 438 }; 439 440 #endif 441 442 #if (defined(__sparc) && !defined(__fibre)) || \ 443 (defined(__i386) || defined(__amd64)) 444 445 446 static sd_tunables elite_properties = { 447 ELITE_THROTTLE_VALUE, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0, 454 0, 455 0 456 }; 457 458 static sd_tunables st31200n_properties = { 459 ST31200N_THROTTLE_VALUE, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0, 467 0 468 }; 469 470 #endif /* Fibre or not */ 471 472 static sd_tunables lsi_properties_scsi = { 473 LSI_THROTTLE_VALUE, 474 0, 475 LSI_NOTREADY_RETRIES, 476 0, 477 0, 478 0, 479 0, 480 0, 481 0 482 }; 483 484 static sd_tunables symbios_properties = { 485 SYMBIOS_THROTTLE_VALUE, 486 0, 487 SYMBIOS_NOTREADY_RETRIES, 488 0, 489 0, 490 0, 491 0, 492 0, 493 0 494 }; 495 496 static sd_tunables lsi_properties = { 497 0, 498 0, 499 LSI_NOTREADY_RETRIES, 500 0, 501 0, 502 0, 503 0, 504 0, 505 0 506 }; 507 508 static sd_tunables lsi_oem_properties = { 509 0, 510 0, 511 LSI_OEM_NOTREADY_RETRIES, 512 0, 513 0, 514 0, 515 0, 516 0, 517 0, 518 1 519 }; 520 521 522 523 #if (defined(SD_PROP_TST)) 524 525 #define SD_TST_CTYPE_VAL CTYPE_CDROM 526 #define SD_TST_THROTTLE_VAL 16 527 #define SD_TST_NOTREADY_VAL 12 528 #define SD_TST_BUSY_VAL 60 529 #define SD_TST_RST_RETRY_VAL 36 530 #define SD_TST_RSV_REL_TIME 60 531 532 static sd_tunables tst_properties = { 533 SD_TST_THROTTLE_VAL, 534 SD_TST_CTYPE_VAL, 535 SD_TST_NOTREADY_VAL, 536 SD_TST_BUSY_VAL, 537 SD_TST_RST_RETRY_VAL, 538 SD_TST_RSV_REL_TIME, 539 0, 540 0, 541 0 542 }; 543 #endif 544 545 /* This is similar to the ANSI toupper implementation */ 546 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 547 548 /* 549 * Static Driver Configuration Table 550 * 551 * This is the table of disks which need throttle adjustment (or, perhaps 552 * something else as defined by the flags at a future time.) device_id 553 * is a string consisting of concatenated vid (vendor), pid (product/model) 554 * and revision strings as defined in the scsi_inquiry structure. Offsets of 555 * the parts of the string are as defined by the sizes in the scsi_inquiry 556 * structure. Device type is searched as far as the device_id string is 557 * defined. Flags defines which values are to be set in the driver from the 558 * properties list. 559 * 560 * Entries below which begin and end with a "*" are a special case. 561 * These do not have a specific vendor, and the string which follows 562 * can appear anywhere in the 16 byte PID portion of the inquiry data. 563 * 564 * Entries below which begin and end with a " " (blank) are a special 565 * case. The comparison function will treat multiple consecutive blanks 566 * as equivalent to a single blank. For example, this causes a 567 * sd_disk_table entry of " NEC CDROM " to match a device's id string 568 * of "NEC CDROM". 569 * 570 * Note: The MD21 controller type has been obsoleted. 571 * ST318202F is a Legacy device 572 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 573 * made with an FC connection. The entries here are a legacy. 574 */ 575 static sd_disk_config_t sd_disk_table[] = { 576 #if defined(__fibre) || defined(__i386) || defined(__amd64) 577 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 599 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 600 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 603 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 604 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 627 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 628 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 629 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 630 { "SUN T3", SD_CONF_BSET_THROTTLE | 631 SD_CONF_BSET_BSY_RETRY_COUNT| 632 SD_CONF_BSET_RST_RETRIES| 633 SD_CONF_BSET_RSV_REL_TIME, 634 &purple_properties }, 635 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 636 SD_CONF_BSET_BSY_RETRY_COUNT| 637 SD_CONF_BSET_RST_RETRIES| 638 SD_CONF_BSET_RSV_REL_TIME| 639 SD_CONF_BSET_MIN_THROTTLE| 640 SD_CONF_BSET_DISKSORT_DISABLED, 641 &sve_properties }, 642 { "SUN T4", SD_CONF_BSET_THROTTLE | 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_RSV_REL_TIME, 646 &purple_properties }, 647 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 648 SD_CONF_BSET_LUN_RESET_ENABLED, 649 &maserati_properties }, 650 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 651 SD_CONF_BSET_NRR_COUNT| 652 SD_CONF_BSET_BSY_RETRY_COUNT| 653 SD_CONF_BSET_RST_RETRIES| 654 SD_CONF_BSET_MIN_THROTTLE| 655 SD_CONF_BSET_DISKSORT_DISABLED| 656 SD_CONF_BSET_LUN_RESET_ENABLED, 657 &pirus_properties }, 658 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 659 SD_CONF_BSET_NRR_COUNT| 660 SD_CONF_BSET_BSY_RETRY_COUNT| 661 SD_CONF_BSET_RST_RETRIES| 662 SD_CONF_BSET_MIN_THROTTLE| 663 SD_CONF_BSET_DISKSORT_DISABLED| 664 SD_CONF_BSET_LUN_RESET_ENABLED, 665 &pirus_properties }, 666 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 667 SD_CONF_BSET_NRR_COUNT| 668 SD_CONF_BSET_BSY_RETRY_COUNT| 669 SD_CONF_BSET_RST_RETRIES| 670 SD_CONF_BSET_MIN_THROTTLE| 671 SD_CONF_BSET_DISKSORT_DISABLED| 672 SD_CONF_BSET_LUN_RESET_ENABLED, 673 &pirus_properties }, 674 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_NRR_COUNT| 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_MIN_THROTTLE| 679 SD_CONF_BSET_DISKSORT_DISABLED| 680 SD_CONF_BSET_LUN_RESET_ENABLED, 681 &pirus_properties }, 682 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_NRR_COUNT| 684 SD_CONF_BSET_BSY_RETRY_COUNT| 685 SD_CONF_BSET_RST_RETRIES| 686 SD_CONF_BSET_MIN_THROTTLE| 687 SD_CONF_BSET_DISKSORT_DISABLED| 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &pirus_properties }, 690 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 691 SD_CONF_BSET_NRR_COUNT| 692 SD_CONF_BSET_BSY_RETRY_COUNT| 693 SD_CONF_BSET_RST_RETRIES| 694 SD_CONF_BSET_MIN_THROTTLE| 695 SD_CONF_BSET_DISKSORT_DISABLED| 696 SD_CONF_BSET_LUN_RESET_ENABLED, 697 &pirus_properties }, 698 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 704 #endif /* fibre or NON-sparc platforms */ 705 #if ((defined(__sparc) && !defined(__fibre)) ||\ 706 (defined(__i386) || defined(__amd64))) 707 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 708 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 709 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 710 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 711 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 720 &symbios_properties }, 721 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 722 &lsi_properties_scsi }, 723 #if defined(__i386) || defined(__amd64) 724 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 725 | SD_CONF_BSET_READSUB_BCD 726 | SD_CONF_BSET_READ_TOC_ADDR_BCD 727 | SD_CONF_BSET_NO_READ_HEADER 728 | SD_CONF_BSET_READ_CD_XD4), NULL }, 729 730 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 731 | SD_CONF_BSET_READSUB_BCD 732 | SD_CONF_BSET_READ_TOC_ADDR_BCD 733 | SD_CONF_BSET_NO_READ_HEADER 734 | SD_CONF_BSET_READ_CD_XD4), NULL }, 735 #endif /* __i386 || __amd64 */ 736 #endif /* sparc NON-fibre or NON-sparc platforms */ 737 738 #if (defined(SD_PROP_TST)) 739 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 740 | SD_CONF_BSET_CTYPE 741 | SD_CONF_BSET_NRR_COUNT 742 | SD_CONF_BSET_FAB_DEVID 743 | SD_CONF_BSET_NOCACHE 744 | SD_CONF_BSET_BSY_RETRY_COUNT 745 | SD_CONF_BSET_PLAYMSF_BCD 746 | SD_CONF_BSET_READSUB_BCD 747 | SD_CONF_BSET_READ_TOC_TRK_BCD 748 | SD_CONF_BSET_READ_TOC_ADDR_BCD 749 | SD_CONF_BSET_NO_READ_HEADER 750 | SD_CONF_BSET_READ_CD_XD4 751 | SD_CONF_BSET_RST_RETRIES 752 | SD_CONF_BSET_RSV_REL_TIME 753 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 754 #endif 755 }; 756 757 static const int sd_disk_table_size = 758 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 759 760 761 762 #define SD_INTERCONNECT_PARALLEL 0 763 #define SD_INTERCONNECT_FABRIC 1 764 #define SD_INTERCONNECT_FIBRE 2 765 #define SD_INTERCONNECT_SSA 3 766 #define SD_INTERCONNECT_SATA 4 767 #define SD_IS_PARALLEL_SCSI(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 769 #define SD_IS_SERIAL(un) \ 770 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 771 772 /* 773 * Definitions used by device id registration routines 774 */ 775 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 776 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 777 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 778 779 static kmutex_t sd_sense_mutex = {0}; 780 781 /* 782 * Macros for updates of the driver state 783 */ 784 #define New_state(un, s) \ 785 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 786 #define Restore_state(un) \ 787 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 788 789 static struct sd_cdbinfo sd_cdbtab[] = { 790 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 791 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 792 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 793 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 794 }; 795 796 /* 797 * Specifies the number of seconds that must have elapsed since the last 798 * cmd. has completed for a device to be declared idle to the PM framework. 799 */ 800 static int sd_pm_idletime = 1; 801 802 /* 803 * Internal function prototypes 804 */ 805 806 #if (defined(__fibre)) 807 /* 808 * These #defines are to avoid namespace collisions that occur because this 809 * code is currently used to compile two separate driver modules: sd and ssd. 810 * All function names need to be treated this way (even if declared static) 811 * in order to allow the debugger to resolve the names properly. 812 * It is anticipated that in the near future the ssd module will be obsoleted, 813 * at which time this ugliness should go away. 814 */ 815 #define sd_log_trace ssd_log_trace 816 #define sd_log_info ssd_log_info 817 #define sd_log_err ssd_log_err 818 #define sdprobe ssdprobe 819 #define sdinfo ssdinfo 820 #define sd_prop_op ssd_prop_op 821 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 822 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 823 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 824 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 825 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 826 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 827 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 828 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 829 #define sd_spin_up_unit ssd_spin_up_unit 830 #define sd_enable_descr_sense ssd_enable_descr_sense 831 #define sd_reenable_dsense_task ssd_reenable_dsense_task 832 #define sd_set_mmc_caps ssd_set_mmc_caps 833 #define sd_read_unit_properties ssd_read_unit_properties 834 #define sd_process_sdconf_file ssd_process_sdconf_file 835 #define sd_process_sdconf_table ssd_process_sdconf_table 836 #define sd_sdconf_id_match ssd_sdconf_id_match 837 #define sd_blank_cmp ssd_blank_cmp 838 #define sd_chk_vers1_data ssd_chk_vers1_data 839 #define sd_set_vers1_properties ssd_set_vers1_properties 840 841 #define sd_get_physical_geometry ssd_get_physical_geometry 842 #define sd_get_virtual_geometry ssd_get_virtual_geometry 843 #define sd_update_block_info ssd_update_block_info 844 #define sd_register_devid ssd_register_devid 845 #define sd_get_devid ssd_get_devid 846 #define sd_create_devid ssd_create_devid 847 #define sd_write_deviceid ssd_write_deviceid 848 #define sd_check_vpd_page_support ssd_check_vpd_page_support 849 #define sd_setup_pm ssd_setup_pm 850 #define sd_create_pm_components ssd_create_pm_components 851 #define sd_ddi_suspend ssd_ddi_suspend 852 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 853 #define sd_ddi_resume ssd_ddi_resume 854 #define sd_ddi_pm_resume ssd_ddi_pm_resume 855 #define sdpower ssdpower 856 #define sdattach ssdattach 857 #define sddetach ssddetach 858 #define sd_unit_attach ssd_unit_attach 859 #define sd_unit_detach ssd_unit_detach 860 #define sd_set_unit_attributes ssd_set_unit_attributes 861 #define sd_create_errstats ssd_create_errstats 862 #define sd_set_errstats ssd_set_errstats 863 #define sd_set_pstats ssd_set_pstats 864 #define sddump ssddump 865 #define sd_scsi_poll ssd_scsi_poll 866 #define sd_send_polled_RQS ssd_send_polled_RQS 867 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 868 #define sd_init_event_callbacks ssd_init_event_callbacks 869 #define sd_event_callback ssd_event_callback 870 #define sd_cache_control ssd_cache_control 871 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 872 #define sd_get_nv_sup ssd_get_nv_sup 873 #define sd_make_device ssd_make_device 874 #define sdopen ssdopen 875 #define sdclose ssdclose 876 #define sd_ready_and_valid ssd_ready_and_valid 877 #define sdmin ssdmin 878 #define sdread ssdread 879 #define sdwrite ssdwrite 880 #define sdaread ssdaread 881 #define sdawrite ssdawrite 882 #define sdstrategy ssdstrategy 883 #define sdioctl ssdioctl 884 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 885 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 886 #define sd_checksum_iostart ssd_checksum_iostart 887 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 888 #define sd_pm_iostart ssd_pm_iostart 889 #define sd_core_iostart ssd_core_iostart 890 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 891 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 892 #define sd_checksum_iodone ssd_checksum_iodone 893 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 894 #define sd_pm_iodone ssd_pm_iodone 895 #define sd_initpkt_for_buf ssd_initpkt_for_buf 896 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 897 #define sd_setup_rw_pkt ssd_setup_rw_pkt 898 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 899 #define sd_buf_iodone ssd_buf_iodone 900 #define sd_uscsi_strategy ssd_uscsi_strategy 901 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 902 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 903 #define sd_uscsi_iodone ssd_uscsi_iodone 904 #define sd_xbuf_strategy ssd_xbuf_strategy 905 #define sd_xbuf_init ssd_xbuf_init 906 #define sd_pm_entry ssd_pm_entry 907 #define sd_pm_exit ssd_pm_exit 908 909 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 910 #define sd_pm_timeout_handler ssd_pm_timeout_handler 911 912 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 913 #define sdintr ssdintr 914 #define sd_start_cmds ssd_start_cmds 915 #define sd_send_scsi_cmd ssd_send_scsi_cmd 916 #define sd_bioclone_alloc ssd_bioclone_alloc 917 #define sd_bioclone_free ssd_bioclone_free 918 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 919 #define sd_shadow_buf_free ssd_shadow_buf_free 920 #define sd_print_transport_rejected_message \ 921 ssd_print_transport_rejected_message 922 #define sd_retry_command ssd_retry_command 923 #define sd_set_retry_bp ssd_set_retry_bp 924 #define sd_send_request_sense_command ssd_send_request_sense_command 925 #define sd_start_retry_command ssd_start_retry_command 926 #define sd_start_direct_priority_command \ 927 ssd_start_direct_priority_command 928 #define sd_return_failed_command ssd_return_failed_command 929 #define sd_return_failed_command_no_restart \ 930 ssd_return_failed_command_no_restart 931 #define sd_return_command ssd_return_command 932 #define sd_sync_with_callback ssd_sync_with_callback 933 #define sdrunout ssdrunout 934 #define sd_mark_rqs_busy ssd_mark_rqs_busy 935 #define sd_mark_rqs_idle ssd_mark_rqs_idle 936 #define sd_reduce_throttle ssd_reduce_throttle 937 #define sd_restore_throttle ssd_restore_throttle 938 #define sd_print_incomplete_msg ssd_print_incomplete_msg 939 #define sd_init_cdb_limits ssd_init_cdb_limits 940 #define sd_pkt_status_good ssd_pkt_status_good 941 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 942 #define sd_pkt_status_busy ssd_pkt_status_busy 943 #define sd_pkt_status_reservation_conflict \ 944 ssd_pkt_status_reservation_conflict 945 #define sd_pkt_status_qfull ssd_pkt_status_qfull 946 #define sd_handle_request_sense ssd_handle_request_sense 947 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 948 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 949 #define sd_validate_sense_data ssd_validate_sense_data 950 #define sd_decode_sense ssd_decode_sense 951 #define sd_print_sense_msg ssd_print_sense_msg 952 #define sd_sense_key_no_sense ssd_sense_key_no_sense 953 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 954 #define sd_sense_key_not_ready ssd_sense_key_not_ready 955 #define sd_sense_key_medium_or_hardware_error \ 956 ssd_sense_key_medium_or_hardware_error 957 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 958 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 959 #define sd_sense_key_fail_command ssd_sense_key_fail_command 960 #define sd_sense_key_blank_check ssd_sense_key_blank_check 961 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 962 #define sd_sense_key_default ssd_sense_key_default 963 #define sd_print_retry_msg ssd_print_retry_msg 964 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 965 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 966 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 967 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 968 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 969 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 970 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 971 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 972 #define sd_pkt_reason_default ssd_pkt_reason_default 973 #define sd_reset_target ssd_reset_target 974 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 975 #define sd_start_stop_unit_task ssd_start_stop_unit_task 976 #define sd_taskq_create ssd_taskq_create 977 #define sd_taskq_delete ssd_taskq_delete 978 #define sd_target_change_task ssd_target_change_task 979 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 980 #define sd_media_change_task ssd_media_change_task 981 #define sd_handle_mchange ssd_handle_mchange 982 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 983 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 984 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 985 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 986 #define sd_send_scsi_feature_GET_CONFIGURATION \ 987 sd_send_scsi_feature_GET_CONFIGURATION 988 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 989 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 990 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 991 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 992 ssd_send_scsi_PERSISTENT_RESERVE_IN 993 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 994 ssd_send_scsi_PERSISTENT_RESERVE_OUT 995 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 996 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 997 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 998 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 999 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1000 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1001 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1002 #define sd_alloc_rqs ssd_alloc_rqs 1003 #define sd_free_rqs ssd_free_rqs 1004 #define sd_dump_memory ssd_dump_memory 1005 #define sd_get_media_info ssd_get_media_info 1006 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1007 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1008 #define sd_setup_next_xfer ssd_setup_next_xfer 1009 #define sd_dkio_get_temp ssd_dkio_get_temp 1010 #define sd_check_mhd ssd_check_mhd 1011 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1012 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1013 #define sd_sname ssd_sname 1014 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1015 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1016 #define sd_take_ownership ssd_take_ownership 1017 #define sd_reserve_release ssd_reserve_release 1018 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1019 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1020 #define sd_persistent_reservation_in_read_keys \ 1021 ssd_persistent_reservation_in_read_keys 1022 #define sd_persistent_reservation_in_read_resv \ 1023 ssd_persistent_reservation_in_read_resv 1024 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1025 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1026 #define sd_mhdioc_release ssd_mhdioc_release 1027 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1028 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1029 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1030 #define sr_change_blkmode ssr_change_blkmode 1031 #define sr_change_speed ssr_change_speed 1032 #define sr_atapi_change_speed ssr_atapi_change_speed 1033 #define sr_pause_resume ssr_pause_resume 1034 #define sr_play_msf ssr_play_msf 1035 #define sr_play_trkind ssr_play_trkind 1036 #define sr_read_all_subcodes ssr_read_all_subcodes 1037 #define sr_read_subchannel ssr_read_subchannel 1038 #define sr_read_tocentry ssr_read_tocentry 1039 #define sr_read_tochdr ssr_read_tochdr 1040 #define sr_read_cdda ssr_read_cdda 1041 #define sr_read_cdxa ssr_read_cdxa 1042 #define sr_read_mode1 ssr_read_mode1 1043 #define sr_read_mode2 ssr_read_mode2 1044 #define sr_read_cd_mode2 ssr_read_cd_mode2 1045 #define sr_sector_mode ssr_sector_mode 1046 #define sr_eject ssr_eject 1047 #define sr_ejected ssr_ejected 1048 #define sr_check_wp ssr_check_wp 1049 #define sd_check_media ssd_check_media 1050 #define sd_media_watch_cb ssd_media_watch_cb 1051 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1052 #define sr_volume_ctrl ssr_volume_ctrl 1053 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1054 #define sd_log_page_supported ssd_log_page_supported 1055 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1056 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1057 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1058 #define sd_range_lock ssd_range_lock 1059 #define sd_get_range ssd_get_range 1060 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1061 #define sd_range_unlock ssd_range_unlock 1062 #define sd_read_modify_write_task ssd_read_modify_write_task 1063 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1064 1065 #define sd_iostart_chain ssd_iostart_chain 1066 #define sd_iodone_chain ssd_iodone_chain 1067 #define sd_initpkt_map ssd_initpkt_map 1068 #define sd_destroypkt_map ssd_destroypkt_map 1069 #define sd_chain_type_map ssd_chain_type_map 1070 #define sd_chain_index_map ssd_chain_index_map 1071 1072 #define sd_failfast_flushctl ssd_failfast_flushctl 1073 #define sd_failfast_flushq ssd_failfast_flushq 1074 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1075 1076 #define sd_is_lsi ssd_is_lsi 1077 #define sd_tg_rdwr ssd_tg_rdwr 1078 #define sd_tg_getinfo ssd_tg_getinfo 1079 1080 #endif /* #if (defined(__fibre)) */ 1081 1082 1083 int _init(void); 1084 int _fini(void); 1085 int _info(struct modinfo *modinfop); 1086 1087 /*PRINTFLIKE3*/ 1088 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1089 /*PRINTFLIKE3*/ 1090 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1091 /*PRINTFLIKE3*/ 1092 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1093 1094 static int sdprobe(dev_info_t *devi); 1095 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1096 void **result); 1097 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1098 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1099 1100 /* 1101 * Smart probe for parallel scsi 1102 */ 1103 static void sd_scsi_probe_cache_init(void); 1104 static void sd_scsi_probe_cache_fini(void); 1105 static void sd_scsi_clear_probe_cache(void); 1106 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1107 1108 /* 1109 * Attached luns on target for parallel scsi 1110 */ 1111 static void sd_scsi_target_lun_init(void); 1112 static void sd_scsi_target_lun_fini(void); 1113 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1114 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1115 1116 static int sd_spin_up_unit(struct sd_lun *un); 1117 #ifdef _LP64 1118 static void sd_enable_descr_sense(struct sd_lun *un); 1119 static void sd_reenable_dsense_task(void *arg); 1120 #endif /* _LP64 */ 1121 1122 static void sd_set_mmc_caps(struct sd_lun *un); 1123 1124 static void sd_read_unit_properties(struct sd_lun *un); 1125 static int sd_process_sdconf_file(struct sd_lun *un); 1126 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1127 int *data_list, sd_tunables *values); 1128 static void sd_process_sdconf_table(struct sd_lun *un); 1129 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1130 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1131 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1132 int list_len, char *dataname_ptr); 1133 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1134 sd_tunables *prop_list); 1135 1136 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1137 int reservation_flag); 1138 static int sd_get_devid(struct sd_lun *un); 1139 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1140 static int sd_write_deviceid(struct sd_lun *un); 1141 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1142 static int sd_check_vpd_page_support(struct sd_lun *un); 1143 1144 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1145 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1146 1147 static int sd_ddi_suspend(dev_info_t *devi); 1148 static int sd_ddi_pm_suspend(struct sd_lun *un); 1149 static int sd_ddi_resume(dev_info_t *devi); 1150 static int sd_ddi_pm_resume(struct sd_lun *un); 1151 static int sdpower(dev_info_t *devi, int component, int level); 1152 1153 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1154 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1155 static int sd_unit_attach(dev_info_t *devi); 1156 static int sd_unit_detach(dev_info_t *devi); 1157 1158 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1159 static void sd_create_errstats(struct sd_lun *un, int instance); 1160 static void sd_set_errstats(struct sd_lun *un); 1161 static void sd_set_pstats(struct sd_lun *un); 1162 1163 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1164 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1165 static int sd_send_polled_RQS(struct sd_lun *un); 1166 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1167 1168 #if (defined(__fibre)) 1169 /* 1170 * Event callbacks (photon) 1171 */ 1172 static void sd_init_event_callbacks(struct sd_lun *un); 1173 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1174 #endif 1175 1176 /* 1177 * Defines for sd_cache_control 1178 */ 1179 1180 #define SD_CACHE_ENABLE 1 1181 #define SD_CACHE_DISABLE 0 1182 #define SD_CACHE_NOCHANGE -1 1183 1184 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1185 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1186 static void sd_get_nv_sup(struct sd_lun *un); 1187 static dev_t sd_make_device(dev_info_t *devi); 1188 1189 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1190 uint64_t capacity); 1191 1192 /* 1193 * Driver entry point functions. 1194 */ 1195 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1196 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1197 static int sd_ready_and_valid(struct sd_lun *un); 1198 1199 static void sdmin(struct buf *bp); 1200 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1201 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1202 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1203 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1204 1205 static int sdstrategy(struct buf *bp); 1206 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1207 1208 /* 1209 * Function prototypes for layering functions in the iostart chain. 1210 */ 1211 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1212 struct buf *bp); 1213 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1216 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1217 struct buf *bp); 1218 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1219 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1220 1221 /* 1222 * Function prototypes for layering functions in the iodone chain. 1223 */ 1224 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1226 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1227 struct buf *bp); 1228 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1229 struct buf *bp); 1230 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1231 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1232 struct buf *bp); 1233 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1234 1235 /* 1236 * Prototypes for functions to support buf(9S) based IO. 1237 */ 1238 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1239 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1240 static void sd_destroypkt_for_buf(struct buf *); 1241 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1242 struct buf *bp, int flags, 1243 int (*callback)(caddr_t), caddr_t callback_arg, 1244 diskaddr_t lba, uint32_t blockcount); 1245 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1246 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1247 1248 /* 1249 * Prototypes for functions to support USCSI IO. 1250 */ 1251 static int sd_uscsi_strategy(struct buf *bp); 1252 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1253 static void sd_destroypkt_for_uscsi(struct buf *); 1254 1255 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1256 uchar_t chain_type, void *pktinfop); 1257 1258 static int sd_pm_entry(struct sd_lun *un); 1259 static void sd_pm_exit(struct sd_lun *un); 1260 1261 static void sd_pm_idletimeout_handler(void *arg); 1262 1263 /* 1264 * sd_core internal functions (used at the sd_core_io layer). 1265 */ 1266 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1267 static void sdintr(struct scsi_pkt *pktp); 1268 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1269 1270 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1271 enum uio_seg dataspace, int path_flag); 1272 1273 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1274 daddr_t blkno, int (*func)(struct buf *)); 1275 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1276 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1277 static void sd_bioclone_free(struct buf *bp); 1278 static void sd_shadow_buf_free(struct buf *bp); 1279 1280 static void sd_print_transport_rejected_message(struct sd_lun *un, 1281 struct sd_xbuf *xp, int code); 1282 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1283 void *arg, int code); 1284 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1285 void *arg, int code); 1286 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1287 void *arg, int code); 1288 1289 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1290 int retry_check_flag, 1291 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1292 int c), 1293 void *user_arg, int failure_code, clock_t retry_delay, 1294 void (*statp)(kstat_io_t *)); 1295 1296 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1297 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1298 1299 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1300 struct scsi_pkt *pktp); 1301 static void sd_start_retry_command(void *arg); 1302 static void sd_start_direct_priority_command(void *arg); 1303 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1304 int errcode); 1305 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1306 struct buf *bp, int errcode); 1307 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1308 static void sd_sync_with_callback(struct sd_lun *un); 1309 static int sdrunout(caddr_t arg); 1310 1311 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1312 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1313 1314 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1315 static void sd_restore_throttle(void *arg); 1316 1317 static void sd_init_cdb_limits(struct sd_lun *un); 1318 1319 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 1322 /* 1323 * Error handling functions 1324 */ 1325 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1330 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1332 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 1334 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, size_t actual_len); 1340 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 1343 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1344 void *arg, int code); 1345 1346 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_not_ready(struct sd_lun *un, 1352 uint8_t *sense_datap, 1353 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1354 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1355 uint8_t *sense_datap, 1356 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1358 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1359 static void sd_sense_key_unit_attention(struct sd_lun *un, 1360 uint8_t *sense_datap, 1361 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1363 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_sense_key_default(struct sd_lun *un, 1369 uint8_t *sense_datap, 1370 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 1372 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1373 void *arg, int flag); 1374 1375 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 1392 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1393 1394 static void sd_start_stop_unit_callback(void *arg); 1395 static void sd_start_stop_unit_task(void *arg); 1396 1397 static void sd_taskq_create(void); 1398 static void sd_taskq_delete(void); 1399 static void sd_target_change_task(void *arg); 1400 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1401 static void sd_media_change_task(void *arg); 1402 1403 static int sd_handle_mchange(struct sd_lun *un); 1404 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1405 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1406 uint32_t *lbap, int path_flag); 1407 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1408 uint32_t *lbap, int path_flag); 1409 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1410 int path_flag); 1411 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1412 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1413 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1414 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1415 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1416 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1417 uchar_t usr_cmd, uchar_t *usr_bufp); 1418 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1419 struct dk_callback *dkc); 1420 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1421 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1422 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1423 uchar_t *bufaddr, uint_t buflen, int path_flag); 1424 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1425 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1426 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1427 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1428 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1429 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1430 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1431 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1432 size_t buflen, daddr_t start_block, int path_flag); 1433 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1434 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1435 path_flag) 1436 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1437 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1438 path_flag) 1439 1440 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1441 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1442 uint16_t param_ptr, int path_flag); 1443 1444 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1445 static void sd_free_rqs(struct sd_lun *un); 1446 1447 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1448 uchar_t *data, int len, int fmt); 1449 static void sd_panic_for_res_conflict(struct sd_lun *un); 1450 1451 /* 1452 * Disk Ioctl Function Prototypes 1453 */ 1454 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1455 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1456 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1457 1458 /* 1459 * Multi-host Ioctl Prototypes 1460 */ 1461 static int sd_check_mhd(dev_t dev, int interval); 1462 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1463 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1464 static char *sd_sname(uchar_t status); 1465 static void sd_mhd_resvd_recover(void *arg); 1466 static void sd_resv_reclaim_thread(); 1467 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1468 static int sd_reserve_release(dev_t dev, int cmd); 1469 static void sd_rmv_resv_reclaim_req(dev_t dev); 1470 static void sd_mhd_reset_notify_cb(caddr_t arg); 1471 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1472 mhioc_inkeys_t *usrp, int flag); 1473 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1474 mhioc_inresvs_t *usrp, int flag); 1475 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1476 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1477 static int sd_mhdioc_release(dev_t dev); 1478 static int sd_mhdioc_register_devid(dev_t dev); 1479 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1480 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1481 1482 /* 1483 * SCSI removable prototypes 1484 */ 1485 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1486 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1487 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1488 static int sr_pause_resume(dev_t dev, int mode); 1489 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1490 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1492 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1494 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1496 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1500 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1501 static int sr_eject(dev_t dev); 1502 static void sr_ejected(register struct sd_lun *un); 1503 static int sr_check_wp(dev_t dev); 1504 static int sd_check_media(dev_t dev, enum dkio_state state); 1505 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1506 static void sd_delayed_cv_broadcast(void *arg); 1507 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1508 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1509 1510 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1511 1512 /* 1513 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1514 */ 1515 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1516 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1517 static void sd_wm_cache_destructor(void *wm, void *un); 1518 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1519 daddr_t endb, ushort_t typ); 1520 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1521 daddr_t endb); 1522 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1523 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1524 static void sd_read_modify_write_task(void * arg); 1525 static int 1526 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1527 struct buf **bpp); 1528 1529 1530 /* 1531 * Function prototypes for failfast support. 1532 */ 1533 static void sd_failfast_flushq(struct sd_lun *un); 1534 static int sd_failfast_flushq_callback(struct buf *bp); 1535 1536 /* 1537 * Function prototypes to check for lsi devices 1538 */ 1539 static void sd_is_lsi(struct sd_lun *un); 1540 1541 /* 1542 * Function prototypes for partial DMA support 1543 */ 1544 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1545 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1546 1547 1548 /* Function prototypes for cmlb */ 1549 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1550 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1551 1552 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1553 1554 /* 1555 * Constants for failfast support: 1556 * 1557 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1558 * failfast processing being performed. 1559 * 1560 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1561 * failfast processing on all bufs with B_FAILFAST set. 1562 */ 1563 1564 #define SD_FAILFAST_INACTIVE 0 1565 #define SD_FAILFAST_ACTIVE 1 1566 1567 /* 1568 * Bitmask to control behavior of buf(9S) flushes when a transition to 1569 * the failfast state occurs. Optional bits include: 1570 * 1571 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1572 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1573 * be flushed. 1574 * 1575 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1576 * driver, in addition to the regular wait queue. This includes the xbuf 1577 * queues. When clear, only the driver's wait queue will be flushed. 1578 */ 1579 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1580 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1581 1582 /* 1583 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1584 * to flush all queues within the driver. 1585 */ 1586 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1587 1588 1589 /* 1590 * SD Testing Fault Injection 1591 */ 1592 #ifdef SD_FAULT_INJECTION 1593 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1594 static void sd_faultinjection(struct scsi_pkt *pktp); 1595 static void sd_injection_log(char *buf, struct sd_lun *un); 1596 #endif 1597 1598 /* 1599 * Device driver ops vector 1600 */ 1601 static struct cb_ops sd_cb_ops = { 1602 sdopen, /* open */ 1603 sdclose, /* close */ 1604 sdstrategy, /* strategy */ 1605 nodev, /* print */ 1606 sddump, /* dump */ 1607 sdread, /* read */ 1608 sdwrite, /* write */ 1609 sdioctl, /* ioctl */ 1610 nodev, /* devmap */ 1611 nodev, /* mmap */ 1612 nodev, /* segmap */ 1613 nochpoll, /* poll */ 1614 sd_prop_op, /* cb_prop_op */ 1615 0, /* streamtab */ 1616 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1617 CB_REV, /* cb_rev */ 1618 sdaread, /* async I/O read entry point */ 1619 sdawrite /* async I/O write entry point */ 1620 }; 1621 1622 static struct dev_ops sd_ops = { 1623 DEVO_REV, /* devo_rev, */ 1624 0, /* refcnt */ 1625 sdinfo, /* info */ 1626 nulldev, /* identify */ 1627 sdprobe, /* probe */ 1628 sdattach, /* attach */ 1629 sddetach, /* detach */ 1630 nodev, /* reset */ 1631 &sd_cb_ops, /* driver operations */ 1632 NULL, /* bus operations */ 1633 sdpower /* power */ 1634 }; 1635 1636 1637 /* 1638 * This is the loadable module wrapper. 1639 */ 1640 #include <sys/modctl.h> 1641 1642 static struct modldrv modldrv = { 1643 &mod_driverops, /* Type of module. This one is a driver */ 1644 SD_MODULE_NAME, /* Module name. */ 1645 &sd_ops /* driver ops */ 1646 }; 1647 1648 1649 static struct modlinkage modlinkage = { 1650 MODREV_1, 1651 &modldrv, 1652 NULL 1653 }; 1654 1655 static cmlb_tg_ops_t sd_tgops = { 1656 TG_DK_OPS_VERSION_1, 1657 sd_tg_rdwr, 1658 sd_tg_getinfo 1659 }; 1660 1661 static struct scsi_asq_key_strings sd_additional_codes[] = { 1662 0x81, 0, "Logical Unit is Reserved", 1663 0x85, 0, "Audio Address Not Valid", 1664 0xb6, 0, "Media Load Mechanism Failed", 1665 0xB9, 0, "Audio Play Operation Aborted", 1666 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1667 0x53, 2, "Medium removal prevented", 1668 0x6f, 0, "Authentication failed during key exchange", 1669 0x6f, 1, "Key not present", 1670 0x6f, 2, "Key not established", 1671 0x6f, 3, "Read without proper authentication", 1672 0x6f, 4, "Mismatched region to this logical unit", 1673 0x6f, 5, "Region reset count error", 1674 0xffff, 0x0, NULL 1675 }; 1676 1677 1678 /* 1679 * Struct for passing printing information for sense data messages 1680 */ 1681 struct sd_sense_info { 1682 int ssi_severity; 1683 int ssi_pfa_flag; 1684 }; 1685 1686 /* 1687 * Table of function pointers for iostart-side routines. Separate "chains" 1688 * of layered function calls are formed by placing the function pointers 1689 * sequentially in the desired order. Functions are called according to an 1690 * incrementing table index ordering. The last function in each chain must 1691 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1692 * in the sd_iodone_chain[] array. 1693 * 1694 * Note: It may seem more natural to organize both the iostart and iodone 1695 * functions together, into an array of structures (or some similar 1696 * organization) with a common index, rather than two separate arrays which 1697 * must be maintained in synchronization. The purpose of this division is 1698 * to achieve improved performance: individual arrays allows for more 1699 * effective cache line utilization on certain platforms. 1700 */ 1701 1702 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1703 1704 1705 static sd_chain_t sd_iostart_chain[] = { 1706 1707 /* Chain for buf IO for disk drive targets (PM enabled) */ 1708 sd_mapblockaddr_iostart, /* Index: 0 */ 1709 sd_pm_iostart, /* Index: 1 */ 1710 sd_core_iostart, /* Index: 2 */ 1711 1712 /* Chain for buf IO for disk drive targets (PM disabled) */ 1713 sd_mapblockaddr_iostart, /* Index: 3 */ 1714 sd_core_iostart, /* Index: 4 */ 1715 1716 /* Chain for buf IO for removable-media targets (PM enabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 5 */ 1718 sd_mapblocksize_iostart, /* Index: 6 */ 1719 sd_pm_iostart, /* Index: 7 */ 1720 sd_core_iostart, /* Index: 8 */ 1721 1722 /* Chain for buf IO for removable-media targets (PM disabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 9 */ 1724 sd_mapblocksize_iostart, /* Index: 10 */ 1725 sd_core_iostart, /* Index: 11 */ 1726 1727 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1728 sd_mapblockaddr_iostart, /* Index: 12 */ 1729 sd_checksum_iostart, /* Index: 13 */ 1730 sd_pm_iostart, /* Index: 14 */ 1731 sd_core_iostart, /* Index: 15 */ 1732 1733 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1734 sd_mapblockaddr_iostart, /* Index: 16 */ 1735 sd_checksum_iostart, /* Index: 17 */ 1736 sd_core_iostart, /* Index: 18 */ 1737 1738 /* Chain for USCSI commands (all targets) */ 1739 sd_pm_iostart, /* Index: 19 */ 1740 sd_core_iostart, /* Index: 20 */ 1741 1742 /* Chain for checksumming USCSI commands (all targets) */ 1743 sd_checksum_uscsi_iostart, /* Index: 21 */ 1744 sd_pm_iostart, /* Index: 22 */ 1745 sd_core_iostart, /* Index: 23 */ 1746 1747 /* Chain for "direct" USCSI commands (all targets) */ 1748 sd_core_iostart, /* Index: 24 */ 1749 1750 /* Chain for "direct priority" USCSI commands (all targets) */ 1751 sd_core_iostart, /* Index: 25 */ 1752 }; 1753 1754 /* 1755 * Macros to locate the first function of each iostart chain in the 1756 * sd_iostart_chain[] array. These are located by the index in the array. 1757 */ 1758 #define SD_CHAIN_DISK_IOSTART 0 1759 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1760 #define SD_CHAIN_RMMEDIA_IOSTART 5 1761 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1762 #define SD_CHAIN_CHKSUM_IOSTART 12 1763 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1764 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1765 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1766 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1767 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1768 1769 1770 /* 1771 * Table of function pointers for the iodone-side routines for the driver- 1772 * internal layering mechanism. The calling sequence for iodone routines 1773 * uses a decrementing table index, so the last routine called in a chain 1774 * must be at the lowest array index location for that chain. The last 1775 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1776 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1777 * of the functions in an iodone side chain must correspond to the ordering 1778 * of the iostart routines for that chain. Note that there is no iodone 1779 * side routine that corresponds to sd_core_iostart(), so there is no 1780 * entry in the table for this. 1781 */ 1782 1783 static sd_chain_t sd_iodone_chain[] = { 1784 1785 /* Chain for buf IO for disk drive targets (PM enabled) */ 1786 sd_buf_iodone, /* Index: 0 */ 1787 sd_mapblockaddr_iodone, /* Index: 1 */ 1788 sd_pm_iodone, /* Index: 2 */ 1789 1790 /* Chain for buf IO for disk drive targets (PM disabled) */ 1791 sd_buf_iodone, /* Index: 3 */ 1792 sd_mapblockaddr_iodone, /* Index: 4 */ 1793 1794 /* Chain for buf IO for removable-media targets (PM enabled) */ 1795 sd_buf_iodone, /* Index: 5 */ 1796 sd_mapblockaddr_iodone, /* Index: 6 */ 1797 sd_mapblocksize_iodone, /* Index: 7 */ 1798 sd_pm_iodone, /* Index: 8 */ 1799 1800 /* Chain for buf IO for removable-media targets (PM disabled) */ 1801 sd_buf_iodone, /* Index: 9 */ 1802 sd_mapblockaddr_iodone, /* Index: 10 */ 1803 sd_mapblocksize_iodone, /* Index: 11 */ 1804 1805 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1806 sd_buf_iodone, /* Index: 12 */ 1807 sd_mapblockaddr_iodone, /* Index: 13 */ 1808 sd_checksum_iodone, /* Index: 14 */ 1809 sd_pm_iodone, /* Index: 15 */ 1810 1811 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1812 sd_buf_iodone, /* Index: 16 */ 1813 sd_mapblockaddr_iodone, /* Index: 17 */ 1814 sd_checksum_iodone, /* Index: 18 */ 1815 1816 /* Chain for USCSI commands (non-checksum targets) */ 1817 sd_uscsi_iodone, /* Index: 19 */ 1818 sd_pm_iodone, /* Index: 20 */ 1819 1820 /* Chain for USCSI commands (checksum targets) */ 1821 sd_uscsi_iodone, /* Index: 21 */ 1822 sd_checksum_uscsi_iodone, /* Index: 22 */ 1823 sd_pm_iodone, /* Index: 22 */ 1824 1825 /* Chain for "direct" USCSI commands (all targets) */ 1826 sd_uscsi_iodone, /* Index: 24 */ 1827 1828 /* Chain for "direct priority" USCSI commands (all targets) */ 1829 sd_uscsi_iodone, /* Index: 25 */ 1830 }; 1831 1832 1833 /* 1834 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1835 * each iodone-side chain. These are located by the array index, but as the 1836 * iodone side functions are called in a decrementing-index order, the 1837 * highest index number in each chain must be specified (as these correspond 1838 * to the first function in the iodone chain that will be called by the core 1839 * at IO completion time). 1840 */ 1841 1842 #define SD_CHAIN_DISK_IODONE 2 1843 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1844 #define SD_CHAIN_RMMEDIA_IODONE 8 1845 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1846 #define SD_CHAIN_CHKSUM_IODONE 15 1847 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1848 #define SD_CHAIN_USCSI_CMD_IODONE 20 1849 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1850 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1851 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1852 1853 1854 1855 1856 /* 1857 * Array to map a layering chain index to the appropriate initpkt routine. 1858 * The redundant entries are present so that the index used for accessing 1859 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1860 * with this table as well. 1861 */ 1862 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1863 1864 static sd_initpkt_t sd_initpkt_map[] = { 1865 1866 /* Chain for buf IO for disk drive targets (PM enabled) */ 1867 sd_initpkt_for_buf, /* Index: 0 */ 1868 sd_initpkt_for_buf, /* Index: 1 */ 1869 sd_initpkt_for_buf, /* Index: 2 */ 1870 1871 /* Chain for buf IO for disk drive targets (PM disabled) */ 1872 sd_initpkt_for_buf, /* Index: 3 */ 1873 sd_initpkt_for_buf, /* Index: 4 */ 1874 1875 /* Chain for buf IO for removable-media targets (PM enabled) */ 1876 sd_initpkt_for_buf, /* Index: 5 */ 1877 sd_initpkt_for_buf, /* Index: 6 */ 1878 sd_initpkt_for_buf, /* Index: 7 */ 1879 sd_initpkt_for_buf, /* Index: 8 */ 1880 1881 /* Chain for buf IO for removable-media targets (PM disabled) */ 1882 sd_initpkt_for_buf, /* Index: 9 */ 1883 sd_initpkt_for_buf, /* Index: 10 */ 1884 sd_initpkt_for_buf, /* Index: 11 */ 1885 1886 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1887 sd_initpkt_for_buf, /* Index: 12 */ 1888 sd_initpkt_for_buf, /* Index: 13 */ 1889 sd_initpkt_for_buf, /* Index: 14 */ 1890 sd_initpkt_for_buf, /* Index: 15 */ 1891 1892 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1893 sd_initpkt_for_buf, /* Index: 16 */ 1894 sd_initpkt_for_buf, /* Index: 17 */ 1895 sd_initpkt_for_buf, /* Index: 18 */ 1896 1897 /* Chain for USCSI commands (non-checksum targets) */ 1898 sd_initpkt_for_uscsi, /* Index: 19 */ 1899 sd_initpkt_for_uscsi, /* Index: 20 */ 1900 1901 /* Chain for USCSI commands (checksum targets) */ 1902 sd_initpkt_for_uscsi, /* Index: 21 */ 1903 sd_initpkt_for_uscsi, /* Index: 22 */ 1904 sd_initpkt_for_uscsi, /* Index: 22 */ 1905 1906 /* Chain for "direct" USCSI commands (all targets) */ 1907 sd_initpkt_for_uscsi, /* Index: 24 */ 1908 1909 /* Chain for "direct priority" USCSI commands (all targets) */ 1910 sd_initpkt_for_uscsi, /* Index: 25 */ 1911 1912 }; 1913 1914 1915 /* 1916 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1917 * The redundant entries are present so that the index used for accessing 1918 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1919 * with this table as well. 1920 */ 1921 typedef void (*sd_destroypkt_t)(struct buf *); 1922 1923 static sd_destroypkt_t sd_destroypkt_map[] = { 1924 1925 /* Chain for buf IO for disk drive targets (PM enabled) */ 1926 sd_destroypkt_for_buf, /* Index: 0 */ 1927 sd_destroypkt_for_buf, /* Index: 1 */ 1928 sd_destroypkt_for_buf, /* Index: 2 */ 1929 1930 /* Chain for buf IO for disk drive targets (PM disabled) */ 1931 sd_destroypkt_for_buf, /* Index: 3 */ 1932 sd_destroypkt_for_buf, /* Index: 4 */ 1933 1934 /* Chain for buf IO for removable-media targets (PM enabled) */ 1935 sd_destroypkt_for_buf, /* Index: 5 */ 1936 sd_destroypkt_for_buf, /* Index: 6 */ 1937 sd_destroypkt_for_buf, /* Index: 7 */ 1938 sd_destroypkt_for_buf, /* Index: 8 */ 1939 1940 /* Chain for buf IO for removable-media targets (PM disabled) */ 1941 sd_destroypkt_for_buf, /* Index: 9 */ 1942 sd_destroypkt_for_buf, /* Index: 10 */ 1943 sd_destroypkt_for_buf, /* Index: 11 */ 1944 1945 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1946 sd_destroypkt_for_buf, /* Index: 12 */ 1947 sd_destroypkt_for_buf, /* Index: 13 */ 1948 sd_destroypkt_for_buf, /* Index: 14 */ 1949 sd_destroypkt_for_buf, /* Index: 15 */ 1950 1951 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1952 sd_destroypkt_for_buf, /* Index: 16 */ 1953 sd_destroypkt_for_buf, /* Index: 17 */ 1954 sd_destroypkt_for_buf, /* Index: 18 */ 1955 1956 /* Chain for USCSI commands (non-checksum targets) */ 1957 sd_destroypkt_for_uscsi, /* Index: 19 */ 1958 sd_destroypkt_for_uscsi, /* Index: 20 */ 1959 1960 /* Chain for USCSI commands (checksum targets) */ 1961 sd_destroypkt_for_uscsi, /* Index: 21 */ 1962 sd_destroypkt_for_uscsi, /* Index: 22 */ 1963 sd_destroypkt_for_uscsi, /* Index: 22 */ 1964 1965 /* Chain for "direct" USCSI commands (all targets) */ 1966 sd_destroypkt_for_uscsi, /* Index: 24 */ 1967 1968 /* Chain for "direct priority" USCSI commands (all targets) */ 1969 sd_destroypkt_for_uscsi, /* Index: 25 */ 1970 1971 }; 1972 1973 1974 1975 /* 1976 * Array to map a layering chain index to the appropriate chain "type". 1977 * The chain type indicates a specific property/usage of the chain. 1978 * The redundant entries are present so that the index used for accessing 1979 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1980 * with this table as well. 1981 */ 1982 1983 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1984 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1985 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1986 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1987 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1988 /* (for error recovery) */ 1989 1990 static int sd_chain_type_map[] = { 1991 1992 /* Chain for buf IO for disk drive targets (PM enabled) */ 1993 SD_CHAIN_BUFIO, /* Index: 0 */ 1994 SD_CHAIN_BUFIO, /* Index: 1 */ 1995 SD_CHAIN_BUFIO, /* Index: 2 */ 1996 1997 /* Chain for buf IO for disk drive targets (PM disabled) */ 1998 SD_CHAIN_BUFIO, /* Index: 3 */ 1999 SD_CHAIN_BUFIO, /* Index: 4 */ 2000 2001 /* Chain for buf IO for removable-media targets (PM enabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 5 */ 2003 SD_CHAIN_BUFIO, /* Index: 6 */ 2004 SD_CHAIN_BUFIO, /* Index: 7 */ 2005 SD_CHAIN_BUFIO, /* Index: 8 */ 2006 2007 /* Chain for buf IO for removable-media targets (PM disabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 9 */ 2009 SD_CHAIN_BUFIO, /* Index: 10 */ 2010 SD_CHAIN_BUFIO, /* Index: 11 */ 2011 2012 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2013 SD_CHAIN_BUFIO, /* Index: 12 */ 2014 SD_CHAIN_BUFIO, /* Index: 13 */ 2015 SD_CHAIN_BUFIO, /* Index: 14 */ 2016 SD_CHAIN_BUFIO, /* Index: 15 */ 2017 2018 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2019 SD_CHAIN_BUFIO, /* Index: 16 */ 2020 SD_CHAIN_BUFIO, /* Index: 17 */ 2021 SD_CHAIN_BUFIO, /* Index: 18 */ 2022 2023 /* Chain for USCSI commands (non-checksum targets) */ 2024 SD_CHAIN_USCSI, /* Index: 19 */ 2025 SD_CHAIN_USCSI, /* Index: 20 */ 2026 2027 /* Chain for USCSI commands (checksum targets) */ 2028 SD_CHAIN_USCSI, /* Index: 21 */ 2029 SD_CHAIN_USCSI, /* Index: 22 */ 2030 SD_CHAIN_USCSI, /* Index: 22 */ 2031 2032 /* Chain for "direct" USCSI commands (all targets) */ 2033 SD_CHAIN_DIRECT, /* Index: 24 */ 2034 2035 /* Chain for "direct priority" USCSI commands (all targets) */ 2036 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2037 }; 2038 2039 2040 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2041 #define SD_IS_BUFIO(xp) \ 2042 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2043 2044 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2045 #define SD_IS_DIRECT_PRIORITY(xp) \ 2046 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2047 2048 2049 2050 /* 2051 * Struct, array, and macros to map a specific chain to the appropriate 2052 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2053 * 2054 * The sd_chain_index_map[] array is used at attach time to set the various 2055 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2056 * chain to be used with the instance. This allows different instances to use 2057 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2058 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2059 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2060 * dynamically & without the use of locking; and (2) a layer may update the 2061 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2062 * to allow for deferred processing of an IO within the same chain from a 2063 * different execution context. 2064 */ 2065 2066 struct sd_chain_index { 2067 int sci_iostart_index; 2068 int sci_iodone_index; 2069 }; 2070 2071 static struct sd_chain_index sd_chain_index_map[] = { 2072 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2073 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2074 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2075 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2076 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2077 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2078 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2079 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2080 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2081 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2082 }; 2083 2084 2085 /* 2086 * The following are indexes into the sd_chain_index_map[] array. 2087 */ 2088 2089 /* un->un_buf_chain_type must be set to one of these */ 2090 #define SD_CHAIN_INFO_DISK 0 2091 #define SD_CHAIN_INFO_DISK_NO_PM 1 2092 #define SD_CHAIN_INFO_RMMEDIA 2 2093 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2094 #define SD_CHAIN_INFO_CHKSUM 4 2095 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2096 2097 /* un->un_uscsi_chain_type must be set to one of these */ 2098 #define SD_CHAIN_INFO_USCSI_CMD 6 2099 /* USCSI with PM disabled is the same as DIRECT */ 2100 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2101 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2102 2103 /* un->un_direct_chain_type must be set to one of these */ 2104 #define SD_CHAIN_INFO_DIRECT_CMD 8 2105 2106 /* un->un_priority_chain_type must be set to one of these */ 2107 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2108 2109 /* size for devid inquiries */ 2110 #define MAX_INQUIRY_SIZE 0xF0 2111 2112 /* 2113 * Macros used by functions to pass a given buf(9S) struct along to the 2114 * next function in the layering chain for further processing. 2115 * 2116 * In the following macros, passing more than three arguments to the called 2117 * routines causes the optimizer for the SPARC compiler to stop doing tail 2118 * call elimination which results in significant performance degradation. 2119 */ 2120 #define SD_BEGIN_IOSTART(index, un, bp) \ 2121 ((*(sd_iostart_chain[index]))(index, un, bp)) 2122 2123 #define SD_BEGIN_IODONE(index, un, bp) \ 2124 ((*(sd_iodone_chain[index]))(index, un, bp)) 2125 2126 #define SD_NEXT_IOSTART(index, un, bp) \ 2127 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2128 2129 #define SD_NEXT_IODONE(index, un, bp) \ 2130 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2131 2132 /* 2133 * Function: _init 2134 * 2135 * Description: This is the driver _init(9E) entry point. 2136 * 2137 * Return Code: Returns the value from mod_install(9F) or 2138 * ddi_soft_state_init(9F) as appropriate. 2139 * 2140 * Context: Called when driver module loaded. 2141 */ 2142 2143 int 2144 _init(void) 2145 { 2146 int err; 2147 2148 /* establish driver name from module name */ 2149 sd_label = mod_modname(&modlinkage); 2150 2151 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2152 SD_MAXUNIT); 2153 2154 if (err != 0) { 2155 return (err); 2156 } 2157 2158 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2159 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2160 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2161 2162 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2163 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2164 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2165 2166 /* 2167 * it's ok to init here even for fibre device 2168 */ 2169 sd_scsi_probe_cache_init(); 2170 2171 sd_scsi_target_lun_init(); 2172 2173 /* 2174 * Creating taskq before mod_install ensures that all callers (threads) 2175 * that enter the module after a successfull mod_install encounter 2176 * a valid taskq. 2177 */ 2178 sd_taskq_create(); 2179 2180 err = mod_install(&modlinkage); 2181 if (err != 0) { 2182 /* delete taskq if install fails */ 2183 sd_taskq_delete(); 2184 2185 mutex_destroy(&sd_detach_mutex); 2186 mutex_destroy(&sd_log_mutex); 2187 mutex_destroy(&sd_label_mutex); 2188 2189 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2190 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2191 cv_destroy(&sd_tr.srq_inprocess_cv); 2192 2193 sd_scsi_probe_cache_fini(); 2194 2195 sd_scsi_target_lun_fini(); 2196 2197 ddi_soft_state_fini(&sd_state); 2198 return (err); 2199 } 2200 2201 return (err); 2202 } 2203 2204 2205 /* 2206 * Function: _fini 2207 * 2208 * Description: This is the driver _fini(9E) entry point. 2209 * 2210 * Return Code: Returns the value from mod_remove(9F) 2211 * 2212 * Context: Called when driver module is unloaded. 2213 */ 2214 2215 int 2216 _fini(void) 2217 { 2218 int err; 2219 2220 if ((err = mod_remove(&modlinkage)) != 0) { 2221 return (err); 2222 } 2223 2224 sd_taskq_delete(); 2225 2226 mutex_destroy(&sd_detach_mutex); 2227 mutex_destroy(&sd_log_mutex); 2228 mutex_destroy(&sd_label_mutex); 2229 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2230 2231 sd_scsi_probe_cache_fini(); 2232 2233 sd_scsi_target_lun_fini(); 2234 2235 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2236 cv_destroy(&sd_tr.srq_inprocess_cv); 2237 2238 ddi_soft_state_fini(&sd_state); 2239 2240 return (err); 2241 } 2242 2243 2244 /* 2245 * Function: _info 2246 * 2247 * Description: This is the driver _info(9E) entry point. 2248 * 2249 * Arguments: modinfop - pointer to the driver modinfo structure 2250 * 2251 * Return Code: Returns the value from mod_info(9F). 2252 * 2253 * Context: Kernel thread context 2254 */ 2255 2256 int 2257 _info(struct modinfo *modinfop) 2258 { 2259 return (mod_info(&modlinkage, modinfop)); 2260 } 2261 2262 2263 /* 2264 * The following routines implement the driver message logging facility. 2265 * They provide component- and level- based debug output filtering. 2266 * Output may also be restricted to messages for a single instance by 2267 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2268 * to NULL, then messages for all instances are printed. 2269 * 2270 * These routines have been cloned from each other due to the language 2271 * constraints of macros and variable argument list processing. 2272 */ 2273 2274 2275 /* 2276 * Function: sd_log_err 2277 * 2278 * Description: This routine is called by the SD_ERROR macro for debug 2279 * logging of error conditions. 2280 * 2281 * Arguments: comp - driver component being logged 2282 * dev - pointer to driver info structure 2283 * fmt - error string and format to be logged 2284 */ 2285 2286 static void 2287 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2288 { 2289 va_list ap; 2290 dev_info_t *dev; 2291 2292 ASSERT(un != NULL); 2293 dev = SD_DEVINFO(un); 2294 ASSERT(dev != NULL); 2295 2296 /* 2297 * Filter messages based on the global component and level masks. 2298 * Also print if un matches the value of sd_debug_un, or if 2299 * sd_debug_un is set to NULL. 2300 */ 2301 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2302 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2303 mutex_enter(&sd_log_mutex); 2304 va_start(ap, fmt); 2305 (void) vsprintf(sd_log_buf, fmt, ap); 2306 va_end(ap); 2307 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2308 mutex_exit(&sd_log_mutex); 2309 } 2310 #ifdef SD_FAULT_INJECTION 2311 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2312 if (un->sd_injection_mask & comp) { 2313 mutex_enter(&sd_log_mutex); 2314 va_start(ap, fmt); 2315 (void) vsprintf(sd_log_buf, fmt, ap); 2316 va_end(ap); 2317 sd_injection_log(sd_log_buf, un); 2318 mutex_exit(&sd_log_mutex); 2319 } 2320 #endif 2321 } 2322 2323 2324 /* 2325 * Function: sd_log_info 2326 * 2327 * Description: This routine is called by the SD_INFO macro for debug 2328 * logging of general purpose informational conditions. 2329 * 2330 * Arguments: comp - driver component being logged 2331 * dev - pointer to driver info structure 2332 * fmt - info string and format to be logged 2333 */ 2334 2335 static void 2336 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2337 { 2338 va_list ap; 2339 dev_info_t *dev; 2340 2341 ASSERT(un != NULL); 2342 dev = SD_DEVINFO(un); 2343 ASSERT(dev != NULL); 2344 2345 /* 2346 * Filter messages based on the global component and level masks. 2347 * Also print if un matches the value of sd_debug_un, or if 2348 * sd_debug_un is set to NULL. 2349 */ 2350 if ((sd_component_mask & component) && 2351 (sd_level_mask & SD_LOGMASK_INFO) && 2352 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2353 mutex_enter(&sd_log_mutex); 2354 va_start(ap, fmt); 2355 (void) vsprintf(sd_log_buf, fmt, ap); 2356 va_end(ap); 2357 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2358 mutex_exit(&sd_log_mutex); 2359 } 2360 #ifdef SD_FAULT_INJECTION 2361 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2362 if (un->sd_injection_mask & component) { 2363 mutex_enter(&sd_log_mutex); 2364 va_start(ap, fmt); 2365 (void) vsprintf(sd_log_buf, fmt, ap); 2366 va_end(ap); 2367 sd_injection_log(sd_log_buf, un); 2368 mutex_exit(&sd_log_mutex); 2369 } 2370 #endif 2371 } 2372 2373 2374 /* 2375 * Function: sd_log_trace 2376 * 2377 * Description: This routine is called by the SD_TRACE macro for debug 2378 * logging of trace conditions (i.e. function entry/exit). 2379 * 2380 * Arguments: comp - driver component being logged 2381 * dev - pointer to driver info structure 2382 * fmt - trace string and format to be logged 2383 */ 2384 2385 static void 2386 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2387 { 2388 va_list ap; 2389 dev_info_t *dev; 2390 2391 ASSERT(un != NULL); 2392 dev = SD_DEVINFO(un); 2393 ASSERT(dev != NULL); 2394 2395 /* 2396 * Filter messages based on the global component and level masks. 2397 * Also print if un matches the value of sd_debug_un, or if 2398 * sd_debug_un is set to NULL. 2399 */ 2400 if ((sd_component_mask & component) && 2401 (sd_level_mask & SD_LOGMASK_TRACE) && 2402 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2403 mutex_enter(&sd_log_mutex); 2404 va_start(ap, fmt); 2405 (void) vsprintf(sd_log_buf, fmt, ap); 2406 va_end(ap); 2407 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2408 mutex_exit(&sd_log_mutex); 2409 } 2410 #ifdef SD_FAULT_INJECTION 2411 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2412 if (un->sd_injection_mask & component) { 2413 mutex_enter(&sd_log_mutex); 2414 va_start(ap, fmt); 2415 (void) vsprintf(sd_log_buf, fmt, ap); 2416 va_end(ap); 2417 sd_injection_log(sd_log_buf, un); 2418 mutex_exit(&sd_log_mutex); 2419 } 2420 #endif 2421 } 2422 2423 2424 /* 2425 * Function: sdprobe 2426 * 2427 * Description: This is the driver probe(9e) entry point function. 2428 * 2429 * Arguments: devi - opaque device info handle 2430 * 2431 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2432 * DDI_PROBE_FAILURE: If the probe failed. 2433 * DDI_PROBE_PARTIAL: If the instance is not present now, 2434 * but may be present in the future. 2435 */ 2436 2437 static int 2438 sdprobe(dev_info_t *devi) 2439 { 2440 struct scsi_device *devp; 2441 int rval; 2442 int instance; 2443 2444 /* 2445 * if it wasn't for pln, sdprobe could actually be nulldev 2446 * in the "__fibre" case. 2447 */ 2448 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2449 return (DDI_PROBE_DONTCARE); 2450 } 2451 2452 devp = ddi_get_driver_private(devi); 2453 2454 if (devp == NULL) { 2455 /* Ooops... nexus driver is mis-configured... */ 2456 return (DDI_PROBE_FAILURE); 2457 } 2458 2459 instance = ddi_get_instance(devi); 2460 2461 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2462 return (DDI_PROBE_PARTIAL); 2463 } 2464 2465 /* 2466 * Call the SCSA utility probe routine to see if we actually 2467 * have a target at this SCSI nexus. 2468 */ 2469 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2470 case SCSIPROBE_EXISTS: 2471 switch (devp->sd_inq->inq_dtype) { 2472 case DTYPE_DIRECT: 2473 rval = DDI_PROBE_SUCCESS; 2474 break; 2475 case DTYPE_RODIRECT: 2476 /* CDs etc. Can be removable media */ 2477 rval = DDI_PROBE_SUCCESS; 2478 break; 2479 case DTYPE_OPTICAL: 2480 /* 2481 * Rewritable optical driver HP115AA 2482 * Can also be removable media 2483 */ 2484 2485 /* 2486 * Do not attempt to bind to DTYPE_OPTICAL if 2487 * pre solaris 9 sparc sd behavior is required 2488 * 2489 * If first time through and sd_dtype_optical_bind 2490 * has not been set in /etc/system check properties 2491 */ 2492 2493 if (sd_dtype_optical_bind < 0) { 2494 sd_dtype_optical_bind = ddi_prop_get_int 2495 (DDI_DEV_T_ANY, devi, 0, 2496 "optical-device-bind", 1); 2497 } 2498 2499 if (sd_dtype_optical_bind == 0) { 2500 rval = DDI_PROBE_FAILURE; 2501 } else { 2502 rval = DDI_PROBE_SUCCESS; 2503 } 2504 break; 2505 2506 case DTYPE_NOTPRESENT: 2507 default: 2508 rval = DDI_PROBE_FAILURE; 2509 break; 2510 } 2511 break; 2512 default: 2513 rval = DDI_PROBE_PARTIAL; 2514 break; 2515 } 2516 2517 /* 2518 * This routine checks for resource allocation prior to freeing, 2519 * so it will take care of the "smart probing" case where a 2520 * scsi_probe() may or may not have been issued and will *not* 2521 * free previously-freed resources. 2522 */ 2523 scsi_unprobe(devp); 2524 return (rval); 2525 } 2526 2527 2528 /* 2529 * Function: sdinfo 2530 * 2531 * Description: This is the driver getinfo(9e) entry point function. 2532 * Given the device number, return the devinfo pointer from 2533 * the scsi_device structure or the instance number 2534 * associated with the dev_t. 2535 * 2536 * Arguments: dip - pointer to device info structure 2537 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2538 * DDI_INFO_DEVT2INSTANCE) 2539 * arg - driver dev_t 2540 * resultp - user buffer for request response 2541 * 2542 * Return Code: DDI_SUCCESS 2543 * DDI_FAILURE 2544 */ 2545 /* ARGSUSED */ 2546 static int 2547 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2548 { 2549 struct sd_lun *un; 2550 dev_t dev; 2551 int instance; 2552 int error; 2553 2554 switch (infocmd) { 2555 case DDI_INFO_DEVT2DEVINFO: 2556 dev = (dev_t)arg; 2557 instance = SDUNIT(dev); 2558 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2559 return (DDI_FAILURE); 2560 } 2561 *result = (void *) SD_DEVINFO(un); 2562 error = DDI_SUCCESS; 2563 break; 2564 case DDI_INFO_DEVT2INSTANCE: 2565 dev = (dev_t)arg; 2566 instance = SDUNIT(dev); 2567 *result = (void *)(uintptr_t)instance; 2568 error = DDI_SUCCESS; 2569 break; 2570 default: 2571 error = DDI_FAILURE; 2572 } 2573 return (error); 2574 } 2575 2576 /* 2577 * Function: sd_prop_op 2578 * 2579 * Description: This is the driver prop_op(9e) entry point function. 2580 * Return the number of blocks for the partition in question 2581 * or forward the request to the property facilities. 2582 * 2583 * Arguments: dev - device number 2584 * dip - pointer to device info structure 2585 * prop_op - property operator 2586 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2587 * name - pointer to property name 2588 * valuep - pointer or address of the user buffer 2589 * lengthp - property length 2590 * 2591 * Return Code: DDI_PROP_SUCCESS 2592 * DDI_PROP_NOT_FOUND 2593 * DDI_PROP_UNDEFINED 2594 * DDI_PROP_NO_MEMORY 2595 * DDI_PROP_BUF_TOO_SMALL 2596 */ 2597 2598 static int 2599 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2600 char *name, caddr_t valuep, int *lengthp) 2601 { 2602 int instance = ddi_get_instance(dip); 2603 struct sd_lun *un; 2604 uint64_t nblocks64; 2605 uint_t dblk; 2606 2607 /* 2608 * Our dynamic properties are all device specific and size oriented. 2609 * Requests issued under conditions where size is valid are passed 2610 * to ddi_prop_op_nblocks with the size information, otherwise the 2611 * request is passed to ddi_prop_op. Size depends on valid geometry. 2612 */ 2613 un = ddi_get_soft_state(sd_state, instance); 2614 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2615 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2616 name, valuep, lengthp)); 2617 } else if (!SD_IS_VALID_LABEL(un)) { 2618 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2619 valuep, lengthp)); 2620 } 2621 2622 /* get nblocks value */ 2623 ASSERT(!mutex_owned(SD_MUTEX(un))); 2624 2625 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2626 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2627 2628 /* report size in target size blocks */ 2629 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2630 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2631 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2632 } 2633 2634 /* 2635 * The following functions are for smart probing: 2636 * sd_scsi_probe_cache_init() 2637 * sd_scsi_probe_cache_fini() 2638 * sd_scsi_clear_probe_cache() 2639 * sd_scsi_probe_with_cache() 2640 */ 2641 2642 /* 2643 * Function: sd_scsi_probe_cache_init 2644 * 2645 * Description: Initializes the probe response cache mutex and head pointer. 2646 * 2647 * Context: Kernel thread context 2648 */ 2649 2650 static void 2651 sd_scsi_probe_cache_init(void) 2652 { 2653 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2654 sd_scsi_probe_cache_head = NULL; 2655 } 2656 2657 2658 /* 2659 * Function: sd_scsi_probe_cache_fini 2660 * 2661 * Description: Frees all resources associated with the probe response cache. 2662 * 2663 * Context: Kernel thread context 2664 */ 2665 2666 static void 2667 sd_scsi_probe_cache_fini(void) 2668 { 2669 struct sd_scsi_probe_cache *cp; 2670 struct sd_scsi_probe_cache *ncp; 2671 2672 /* Clean up our smart probing linked list */ 2673 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2674 ncp = cp->next; 2675 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2676 } 2677 sd_scsi_probe_cache_head = NULL; 2678 mutex_destroy(&sd_scsi_probe_cache_mutex); 2679 } 2680 2681 2682 /* 2683 * Function: sd_scsi_clear_probe_cache 2684 * 2685 * Description: This routine clears the probe response cache. This is 2686 * done when open() returns ENXIO so that when deferred 2687 * attach is attempted (possibly after a device has been 2688 * turned on) we will retry the probe. Since we don't know 2689 * which target we failed to open, we just clear the 2690 * entire cache. 2691 * 2692 * Context: Kernel thread context 2693 */ 2694 2695 static void 2696 sd_scsi_clear_probe_cache(void) 2697 { 2698 struct sd_scsi_probe_cache *cp; 2699 int i; 2700 2701 mutex_enter(&sd_scsi_probe_cache_mutex); 2702 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2703 /* 2704 * Reset all entries to SCSIPROBE_EXISTS. This will 2705 * force probing to be performed the next time 2706 * sd_scsi_probe_with_cache is called. 2707 */ 2708 for (i = 0; i < NTARGETS_WIDE; i++) { 2709 cp->cache[i] = SCSIPROBE_EXISTS; 2710 } 2711 } 2712 mutex_exit(&sd_scsi_probe_cache_mutex); 2713 } 2714 2715 2716 /* 2717 * Function: sd_scsi_probe_with_cache 2718 * 2719 * Description: This routine implements support for a scsi device probe 2720 * with cache. The driver maintains a cache of the target 2721 * responses to scsi probes. If we get no response from a 2722 * target during a probe inquiry, we remember that, and we 2723 * avoid additional calls to scsi_probe on non-zero LUNs 2724 * on the same target until the cache is cleared. By doing 2725 * so we avoid the 1/4 sec selection timeout for nonzero 2726 * LUNs. lun0 of a target is always probed. 2727 * 2728 * Arguments: devp - Pointer to a scsi_device(9S) structure 2729 * waitfunc - indicates what the allocator routines should 2730 * do when resources are not available. This value 2731 * is passed on to scsi_probe() when that routine 2732 * is called. 2733 * 2734 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2735 * otherwise the value returned by scsi_probe(9F). 2736 * 2737 * Context: Kernel thread context 2738 */ 2739 2740 static int 2741 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2742 { 2743 struct sd_scsi_probe_cache *cp; 2744 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2745 int lun, tgt; 2746 2747 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2748 SCSI_ADDR_PROP_LUN, 0); 2749 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2750 SCSI_ADDR_PROP_TARGET, -1); 2751 2752 /* Make sure caching enabled and target in range */ 2753 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2754 /* do it the old way (no cache) */ 2755 return (scsi_probe(devp, waitfn)); 2756 } 2757 2758 mutex_enter(&sd_scsi_probe_cache_mutex); 2759 2760 /* Find the cache for this scsi bus instance */ 2761 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2762 if (cp->pdip == pdip) { 2763 break; 2764 } 2765 } 2766 2767 /* If we can't find a cache for this pdip, create one */ 2768 if (cp == NULL) { 2769 int i; 2770 2771 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2772 KM_SLEEP); 2773 cp->pdip = pdip; 2774 cp->next = sd_scsi_probe_cache_head; 2775 sd_scsi_probe_cache_head = cp; 2776 for (i = 0; i < NTARGETS_WIDE; i++) { 2777 cp->cache[i] = SCSIPROBE_EXISTS; 2778 } 2779 } 2780 2781 mutex_exit(&sd_scsi_probe_cache_mutex); 2782 2783 /* Recompute the cache for this target if LUN zero */ 2784 if (lun == 0) { 2785 cp->cache[tgt] = SCSIPROBE_EXISTS; 2786 } 2787 2788 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2789 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2790 return (SCSIPROBE_NORESP); 2791 } 2792 2793 /* Do the actual probe; save & return the result */ 2794 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2795 } 2796 2797 2798 /* 2799 * Function: sd_scsi_target_lun_init 2800 * 2801 * Description: Initializes the attached lun chain mutex and head pointer. 2802 * 2803 * Context: Kernel thread context 2804 */ 2805 2806 static void 2807 sd_scsi_target_lun_init(void) 2808 { 2809 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2810 sd_scsi_target_lun_head = NULL; 2811 } 2812 2813 2814 /* 2815 * Function: sd_scsi_target_lun_fini 2816 * 2817 * Description: Frees all resources associated with the attached lun 2818 * chain 2819 * 2820 * Context: Kernel thread context 2821 */ 2822 2823 static void 2824 sd_scsi_target_lun_fini(void) 2825 { 2826 struct sd_scsi_hba_tgt_lun *cp; 2827 struct sd_scsi_hba_tgt_lun *ncp; 2828 2829 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2830 ncp = cp->next; 2831 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2832 } 2833 sd_scsi_target_lun_head = NULL; 2834 mutex_destroy(&sd_scsi_target_lun_mutex); 2835 } 2836 2837 2838 /* 2839 * Function: sd_scsi_get_target_lun_count 2840 * 2841 * Description: This routine will check in the attached lun chain to see 2842 * how many luns are attached on the required SCSI controller 2843 * and target. Currently, some capabilities like tagged queue 2844 * are supported per target based by HBA. So all luns in a 2845 * target have the same capabilities. Based on this assumption, 2846 * sd should only set these capabilities once per target. This 2847 * function is called when sd needs to decide how many luns 2848 * already attached on a target. 2849 * 2850 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2851 * controller device. 2852 * target - The target ID on the controller's SCSI bus. 2853 * 2854 * Return Code: The number of luns attached on the required target and 2855 * controller. 2856 * -1 if target ID is not in parallel SCSI scope or the given 2857 * dip is not in the chain. 2858 * 2859 * Context: Kernel thread context 2860 */ 2861 2862 static int 2863 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2864 { 2865 struct sd_scsi_hba_tgt_lun *cp; 2866 2867 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2868 return (-1); 2869 } 2870 2871 mutex_enter(&sd_scsi_target_lun_mutex); 2872 2873 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2874 if (cp->pdip == dip) { 2875 break; 2876 } 2877 } 2878 2879 mutex_exit(&sd_scsi_target_lun_mutex); 2880 2881 if (cp == NULL) { 2882 return (-1); 2883 } 2884 2885 return (cp->nlun[target]); 2886 } 2887 2888 2889 /* 2890 * Function: sd_scsi_update_lun_on_target 2891 * 2892 * Description: This routine is used to update the attached lun chain when a 2893 * lun is attached or detached on a target. 2894 * 2895 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2896 * controller device. 2897 * target - The target ID on the controller's SCSI bus. 2898 * flag - Indicate the lun is attached or detached. 2899 * 2900 * Context: Kernel thread context 2901 */ 2902 2903 static void 2904 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2905 { 2906 struct sd_scsi_hba_tgt_lun *cp; 2907 2908 mutex_enter(&sd_scsi_target_lun_mutex); 2909 2910 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2911 if (cp->pdip == dip) { 2912 break; 2913 } 2914 } 2915 2916 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2917 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2918 KM_SLEEP); 2919 cp->pdip = dip; 2920 cp->next = sd_scsi_target_lun_head; 2921 sd_scsi_target_lun_head = cp; 2922 } 2923 2924 mutex_exit(&sd_scsi_target_lun_mutex); 2925 2926 if (cp != NULL) { 2927 if (flag == SD_SCSI_LUN_ATTACH) { 2928 cp->nlun[target] ++; 2929 } else { 2930 cp->nlun[target] --; 2931 } 2932 } 2933 } 2934 2935 2936 /* 2937 * Function: sd_spin_up_unit 2938 * 2939 * Description: Issues the following commands to spin-up the device: 2940 * START STOP UNIT, and INQUIRY. 2941 * 2942 * Arguments: un - driver soft state (unit) structure 2943 * 2944 * Return Code: 0 - success 2945 * EIO - failure 2946 * EACCES - reservation conflict 2947 * 2948 * Context: Kernel thread context 2949 */ 2950 2951 static int 2952 sd_spin_up_unit(struct sd_lun *un) 2953 { 2954 size_t resid = 0; 2955 int has_conflict = FALSE; 2956 uchar_t *bufaddr; 2957 2958 ASSERT(un != NULL); 2959 2960 /* 2961 * Send a throwaway START UNIT command. 2962 * 2963 * If we fail on this, we don't care presently what precisely 2964 * is wrong. EMC's arrays will also fail this with a check 2965 * condition (0x2/0x4/0x3) if the device is "inactive," but 2966 * we don't want to fail the attach because it may become 2967 * "active" later. 2968 */ 2969 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2970 == EACCES) 2971 has_conflict = TRUE; 2972 2973 /* 2974 * Send another INQUIRY command to the target. This is necessary for 2975 * non-removable media direct access devices because their INQUIRY data 2976 * may not be fully qualified until they are spun up (perhaps via the 2977 * START command above). Note: This seems to be needed for some 2978 * legacy devices only.) The INQUIRY command should succeed even if a 2979 * Reservation Conflict is present. 2980 */ 2981 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2982 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2983 kmem_free(bufaddr, SUN_INQSIZE); 2984 return (EIO); 2985 } 2986 2987 /* 2988 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2989 * Note that this routine does not return a failure here even if the 2990 * INQUIRY command did not return any data. This is a legacy behavior. 2991 */ 2992 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2993 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2994 } 2995 2996 kmem_free(bufaddr, SUN_INQSIZE); 2997 2998 /* If we hit a reservation conflict above, tell the caller. */ 2999 if (has_conflict == TRUE) { 3000 return (EACCES); 3001 } 3002 3003 return (0); 3004 } 3005 3006 #ifdef _LP64 3007 /* 3008 * Function: sd_enable_descr_sense 3009 * 3010 * Description: This routine attempts to select descriptor sense format 3011 * using the Control mode page. Devices that support 64 bit 3012 * LBAs (for >2TB luns) should also implement descriptor 3013 * sense data so we will call this function whenever we see 3014 * a lun larger than 2TB. If for some reason the device 3015 * supports 64 bit LBAs but doesn't support descriptor sense 3016 * presumably the mode select will fail. Everything will 3017 * continue to work normally except that we will not get 3018 * complete sense data for commands that fail with an LBA 3019 * larger than 32 bits. 3020 * 3021 * Arguments: un - driver soft state (unit) structure 3022 * 3023 * Context: Kernel thread context only 3024 */ 3025 3026 static void 3027 sd_enable_descr_sense(struct sd_lun *un) 3028 { 3029 uchar_t *header; 3030 struct mode_control_scsi3 *ctrl_bufp; 3031 size_t buflen; 3032 size_t bd_len; 3033 3034 /* 3035 * Read MODE SENSE page 0xA, Control Mode Page 3036 */ 3037 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3038 sizeof (struct mode_control_scsi3); 3039 header = kmem_zalloc(buflen, KM_SLEEP); 3040 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3041 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3042 SD_ERROR(SD_LOG_COMMON, un, 3043 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3044 goto eds_exit; 3045 } 3046 3047 /* 3048 * Determine size of Block Descriptors in order to locate 3049 * the mode page data. ATAPI devices return 0, SCSI devices 3050 * should return MODE_BLK_DESC_LENGTH. 3051 */ 3052 bd_len = ((struct mode_header *)header)->bdesc_length; 3053 3054 /* Clear the mode data length field for MODE SELECT */ 3055 ((struct mode_header *)header)->length = 0; 3056 3057 ctrl_bufp = (struct mode_control_scsi3 *) 3058 (header + MODE_HEADER_LENGTH + bd_len); 3059 3060 /* 3061 * If the page length is smaller than the expected value, 3062 * the target device doesn't support D_SENSE. Bail out here. 3063 */ 3064 if (ctrl_bufp->mode_page.length < 3065 sizeof (struct mode_control_scsi3) - 2) { 3066 SD_ERROR(SD_LOG_COMMON, un, 3067 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3068 goto eds_exit; 3069 } 3070 3071 /* 3072 * Clear PS bit for MODE SELECT 3073 */ 3074 ctrl_bufp->mode_page.ps = 0; 3075 3076 /* 3077 * Set D_SENSE to enable descriptor sense format. 3078 */ 3079 ctrl_bufp->d_sense = 1; 3080 3081 /* 3082 * Use MODE SELECT to commit the change to the D_SENSE bit 3083 */ 3084 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3085 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3086 SD_INFO(SD_LOG_COMMON, un, 3087 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3088 goto eds_exit; 3089 } 3090 3091 eds_exit: 3092 kmem_free(header, buflen); 3093 } 3094 3095 /* 3096 * Function: sd_reenable_dsense_task 3097 * 3098 * Description: Re-enable descriptor sense after device or bus reset 3099 * 3100 * Context: Executes in a taskq() thread context 3101 */ 3102 static void 3103 sd_reenable_dsense_task(void *arg) 3104 { 3105 struct sd_lun *un = arg; 3106 3107 ASSERT(un != NULL); 3108 sd_enable_descr_sense(un); 3109 } 3110 #endif /* _LP64 */ 3111 3112 /* 3113 * Function: sd_set_mmc_caps 3114 * 3115 * Description: This routine determines if the device is MMC compliant and if 3116 * the device supports CDDA via a mode sense of the CDVD 3117 * capabilities mode page. Also checks if the device is a 3118 * dvdram writable device. 3119 * 3120 * Arguments: un - driver soft state (unit) structure 3121 * 3122 * Context: Kernel thread context only 3123 */ 3124 3125 static void 3126 sd_set_mmc_caps(struct sd_lun *un) 3127 { 3128 struct mode_header_grp2 *sense_mhp; 3129 uchar_t *sense_page; 3130 caddr_t buf; 3131 int bd_len; 3132 int status; 3133 struct uscsi_cmd com; 3134 int rtn; 3135 uchar_t *out_data_rw, *out_data_hd; 3136 uchar_t *rqbuf_rw, *rqbuf_hd; 3137 3138 ASSERT(un != NULL); 3139 3140 /* 3141 * The flags which will be set in this function are - mmc compliant, 3142 * dvdram writable device, cdda support. Initialize them to FALSE 3143 * and if a capability is detected - it will be set to TRUE. 3144 */ 3145 un->un_f_mmc_cap = FALSE; 3146 un->un_f_dvdram_writable_device = FALSE; 3147 un->un_f_cfg_cdda = FALSE; 3148 3149 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3150 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3151 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3152 3153 if (status != 0) { 3154 /* command failed; just return */ 3155 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3156 return; 3157 } 3158 /* 3159 * If the mode sense request for the CDROM CAPABILITIES 3160 * page (0x2A) succeeds the device is assumed to be MMC. 3161 */ 3162 un->un_f_mmc_cap = TRUE; 3163 3164 /* Get to the page data */ 3165 sense_mhp = (struct mode_header_grp2 *)buf; 3166 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3167 sense_mhp->bdesc_length_lo; 3168 if (bd_len > MODE_BLK_DESC_LENGTH) { 3169 /* 3170 * We did not get back the expected block descriptor 3171 * length so we cannot determine if the device supports 3172 * CDDA. However, we still indicate the device is MMC 3173 * according to the successful response to the page 3174 * 0x2A mode sense request. 3175 */ 3176 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3177 "sd_set_mmc_caps: Mode Sense returned " 3178 "invalid block descriptor length\n"); 3179 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3180 return; 3181 } 3182 3183 /* See if read CDDA is supported */ 3184 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3185 bd_len); 3186 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3187 3188 /* See if writing DVD RAM is supported. */ 3189 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3190 if (un->un_f_dvdram_writable_device == TRUE) { 3191 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3192 return; 3193 } 3194 3195 /* 3196 * If the device presents DVD or CD capabilities in the mode 3197 * page, we can return here since a RRD will not have 3198 * these capabilities. 3199 */ 3200 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3201 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3202 return; 3203 } 3204 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3205 3206 /* 3207 * If un->un_f_dvdram_writable_device is still FALSE, 3208 * check for a Removable Rigid Disk (RRD). A RRD 3209 * device is identified by the features RANDOM_WRITABLE and 3210 * HARDWARE_DEFECT_MANAGEMENT. 3211 */ 3212 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3213 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3214 3215 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3216 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3217 RANDOM_WRITABLE, SD_PATH_STANDARD); 3218 if (rtn != 0) { 3219 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3220 kmem_free(rqbuf_rw, SENSE_LENGTH); 3221 return; 3222 } 3223 3224 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3225 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3226 3227 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3228 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3229 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3230 if (rtn == 0) { 3231 /* 3232 * We have good information, check for random writable 3233 * and hardware defect features. 3234 */ 3235 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3236 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3237 un->un_f_dvdram_writable_device = TRUE; 3238 } 3239 } 3240 3241 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3242 kmem_free(rqbuf_rw, SENSE_LENGTH); 3243 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3244 kmem_free(rqbuf_hd, SENSE_LENGTH); 3245 } 3246 3247 /* 3248 * Function: sd_check_for_writable_cd 3249 * 3250 * Description: This routine determines if the media in the device is 3251 * writable or not. It uses the get configuration command (0x46) 3252 * to determine if the media is writable 3253 * 3254 * Arguments: un - driver soft state (unit) structure 3255 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3256 * chain and the normal command waitq, or 3257 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3258 * "direct" chain and bypass the normal command 3259 * waitq. 3260 * 3261 * Context: Never called at interrupt context. 3262 */ 3263 3264 static void 3265 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3266 { 3267 struct uscsi_cmd com; 3268 uchar_t *out_data; 3269 uchar_t *rqbuf; 3270 int rtn; 3271 uchar_t *out_data_rw, *out_data_hd; 3272 uchar_t *rqbuf_rw, *rqbuf_hd; 3273 struct mode_header_grp2 *sense_mhp; 3274 uchar_t *sense_page; 3275 caddr_t buf; 3276 int bd_len; 3277 int status; 3278 3279 ASSERT(un != NULL); 3280 ASSERT(mutex_owned(SD_MUTEX(un))); 3281 3282 /* 3283 * Initialize the writable media to false, if configuration info. 3284 * tells us otherwise then only we will set it. 3285 */ 3286 un->un_f_mmc_writable_media = FALSE; 3287 mutex_exit(SD_MUTEX(un)); 3288 3289 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3290 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3291 3292 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3293 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3294 3295 mutex_enter(SD_MUTEX(un)); 3296 if (rtn == 0) { 3297 /* 3298 * We have good information, check for writable DVD. 3299 */ 3300 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3301 un->un_f_mmc_writable_media = TRUE; 3302 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3303 kmem_free(rqbuf, SENSE_LENGTH); 3304 return; 3305 } 3306 } 3307 3308 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3309 kmem_free(rqbuf, SENSE_LENGTH); 3310 3311 /* 3312 * Determine if this is a RRD type device. 3313 */ 3314 mutex_exit(SD_MUTEX(un)); 3315 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3316 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3317 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3318 mutex_enter(SD_MUTEX(un)); 3319 if (status != 0) { 3320 /* command failed; just return */ 3321 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3322 return; 3323 } 3324 3325 /* Get to the page data */ 3326 sense_mhp = (struct mode_header_grp2 *)buf; 3327 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3328 if (bd_len > MODE_BLK_DESC_LENGTH) { 3329 /* 3330 * We did not get back the expected block descriptor length so 3331 * we cannot check the mode page. 3332 */ 3333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3334 "sd_check_for_writable_cd: Mode Sense returned " 3335 "invalid block descriptor length\n"); 3336 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3337 return; 3338 } 3339 3340 /* 3341 * If the device presents DVD or CD capabilities in the mode 3342 * page, we can return here since a RRD device will not have 3343 * these capabilities. 3344 */ 3345 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3346 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3347 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3348 return; 3349 } 3350 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3351 3352 /* 3353 * If un->un_f_mmc_writable_media is still FALSE, 3354 * check for RRD type media. A RRD device is identified 3355 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3356 */ 3357 mutex_exit(SD_MUTEX(un)); 3358 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3359 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3360 3361 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3362 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3363 RANDOM_WRITABLE, path_flag); 3364 if (rtn != 0) { 3365 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3366 kmem_free(rqbuf_rw, SENSE_LENGTH); 3367 mutex_enter(SD_MUTEX(un)); 3368 return; 3369 } 3370 3371 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3372 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3373 3374 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3375 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3376 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3377 mutex_enter(SD_MUTEX(un)); 3378 if (rtn == 0) { 3379 /* 3380 * We have good information, check for random writable 3381 * and hardware defect features as current. 3382 */ 3383 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3384 (out_data_rw[10] & 0x1) && 3385 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3386 (out_data_hd[10] & 0x1)) { 3387 un->un_f_mmc_writable_media = TRUE; 3388 } 3389 } 3390 3391 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3392 kmem_free(rqbuf_rw, SENSE_LENGTH); 3393 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3394 kmem_free(rqbuf_hd, SENSE_LENGTH); 3395 } 3396 3397 /* 3398 * Function: sd_read_unit_properties 3399 * 3400 * Description: The following implements a property lookup mechanism. 3401 * Properties for particular disks (keyed on vendor, model 3402 * and rev numbers) are sought in the sd.conf file via 3403 * sd_process_sdconf_file(), and if not found there, are 3404 * looked for in a list hardcoded in this driver via 3405 * sd_process_sdconf_table() Once located the properties 3406 * are used to update the driver unit structure. 3407 * 3408 * Arguments: un - driver soft state (unit) structure 3409 */ 3410 3411 static void 3412 sd_read_unit_properties(struct sd_lun *un) 3413 { 3414 /* 3415 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3416 * the "sd-config-list" property (from the sd.conf file) or if 3417 * there was not a match for the inquiry vid/pid. If this event 3418 * occurs the static driver configuration table is searched for 3419 * a match. 3420 */ 3421 ASSERT(un != NULL); 3422 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3423 sd_process_sdconf_table(un); 3424 } 3425 3426 /* check for LSI device */ 3427 sd_is_lsi(un); 3428 3429 3430 } 3431 3432 3433 /* 3434 * Function: sd_process_sdconf_file 3435 * 3436 * Description: Use ddi_getlongprop to obtain the properties from the 3437 * driver's config file (ie, sd.conf) and update the driver 3438 * soft state structure accordingly. 3439 * 3440 * Arguments: un - driver soft state (unit) structure 3441 * 3442 * Return Code: SD_SUCCESS - The properties were successfully set according 3443 * to the driver configuration file. 3444 * SD_FAILURE - The driver config list was not obtained or 3445 * there was no vid/pid match. This indicates that 3446 * the static config table should be used. 3447 * 3448 * The config file has a property, "sd-config-list", which consists of 3449 * one or more duplets as follows: 3450 * 3451 * sd-config-list= 3452 * <duplet>, 3453 * [<duplet>,] 3454 * [<duplet>]; 3455 * 3456 * The structure of each duplet is as follows: 3457 * 3458 * <duplet>:= <vid+pid>,<data-property-name_list> 3459 * 3460 * The first entry of the duplet is the device ID string (the concatenated 3461 * vid & pid; not to be confused with a device_id). This is defined in 3462 * the same way as in the sd_disk_table. 3463 * 3464 * The second part of the duplet is a string that identifies a 3465 * data-property-name-list. The data-property-name-list is defined as 3466 * follows: 3467 * 3468 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3469 * 3470 * The syntax of <data-property-name> depends on the <version> field. 3471 * 3472 * If version = SD_CONF_VERSION_1 we have the following syntax: 3473 * 3474 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3475 * 3476 * where the prop0 value will be used to set prop0 if bit0 set in the 3477 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3478 * 3479 */ 3480 3481 static int 3482 sd_process_sdconf_file(struct sd_lun *un) 3483 { 3484 char *config_list = NULL; 3485 int config_list_len; 3486 int len; 3487 int dupletlen = 0; 3488 char *vidptr; 3489 int vidlen; 3490 char *dnlist_ptr; 3491 char *dataname_ptr; 3492 int dnlist_len; 3493 int dataname_len; 3494 int *data_list; 3495 int data_list_len; 3496 int rval = SD_FAILURE; 3497 int i; 3498 3499 ASSERT(un != NULL); 3500 3501 /* Obtain the configuration list associated with the .conf file */ 3502 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3503 sd_config_list, (caddr_t)&config_list, &config_list_len) 3504 != DDI_PROP_SUCCESS) { 3505 return (SD_FAILURE); 3506 } 3507 3508 /* 3509 * Compare vids in each duplet to the inquiry vid - if a match is 3510 * made, get the data value and update the soft state structure 3511 * accordingly. 3512 * 3513 * Note: This algorithm is complex and difficult to maintain. It should 3514 * be replaced with a more robust implementation. 3515 */ 3516 for (len = config_list_len, vidptr = config_list; len > 0; 3517 vidptr += dupletlen, len -= dupletlen) { 3518 /* 3519 * Note: The assumption here is that each vid entry is on 3520 * a unique line from its associated duplet. 3521 */ 3522 vidlen = dupletlen = (int)strlen(vidptr); 3523 if ((vidlen == 0) || 3524 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3525 dupletlen++; 3526 continue; 3527 } 3528 3529 /* 3530 * dnlist contains 1 or more blank separated 3531 * data-property-name entries 3532 */ 3533 dnlist_ptr = vidptr + vidlen + 1; 3534 dnlist_len = (int)strlen(dnlist_ptr); 3535 dupletlen += dnlist_len + 2; 3536 3537 /* 3538 * Set a pointer for the first data-property-name 3539 * entry in the list 3540 */ 3541 dataname_ptr = dnlist_ptr; 3542 dataname_len = 0; 3543 3544 /* 3545 * Loop through all data-property-name entries in the 3546 * data-property-name-list setting the properties for each. 3547 */ 3548 while (dataname_len < dnlist_len) { 3549 int version; 3550 3551 /* 3552 * Determine the length of the current 3553 * data-property-name entry by indexing until a 3554 * blank or NULL is encountered. When the space is 3555 * encountered reset it to a NULL for compliance 3556 * with ddi_getlongprop(). 3557 */ 3558 for (i = 0; ((dataname_ptr[i] != ' ') && 3559 (dataname_ptr[i] != '\0')); i++) { 3560 ; 3561 } 3562 3563 dataname_len += i; 3564 /* If not null terminated, Make it so */ 3565 if (dataname_ptr[i] == ' ') { 3566 dataname_ptr[i] = '\0'; 3567 } 3568 dataname_len++; 3569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3570 "sd_process_sdconf_file: disk:%s, data:%s\n", 3571 vidptr, dataname_ptr); 3572 3573 /* Get the data list */ 3574 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3575 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3576 != DDI_PROP_SUCCESS) { 3577 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3578 "sd_process_sdconf_file: data property (%s)" 3579 " has no value\n", dataname_ptr); 3580 dataname_ptr = dnlist_ptr + dataname_len; 3581 continue; 3582 } 3583 3584 version = data_list[0]; 3585 3586 if (version == SD_CONF_VERSION_1) { 3587 sd_tunables values; 3588 3589 /* Set the properties */ 3590 if (sd_chk_vers1_data(un, data_list[1], 3591 &data_list[2], data_list_len, dataname_ptr) 3592 == SD_SUCCESS) { 3593 sd_get_tunables_from_conf(un, 3594 data_list[1], &data_list[2], 3595 &values); 3596 sd_set_vers1_properties(un, 3597 data_list[1], &values); 3598 rval = SD_SUCCESS; 3599 } else { 3600 rval = SD_FAILURE; 3601 } 3602 } else { 3603 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3604 "data property %s version 0x%x is invalid.", 3605 dataname_ptr, version); 3606 rval = SD_FAILURE; 3607 } 3608 kmem_free(data_list, data_list_len); 3609 dataname_ptr = dnlist_ptr + dataname_len; 3610 } 3611 } 3612 3613 /* free up the memory allocated by ddi_getlongprop */ 3614 if (config_list) { 3615 kmem_free(config_list, config_list_len); 3616 } 3617 3618 return (rval); 3619 } 3620 3621 /* 3622 * Function: sd_get_tunables_from_conf() 3623 * 3624 * 3625 * This function reads the data list from the sd.conf file and pulls 3626 * the values that can have numeric values as arguments and places 3627 * the values in the appropriate sd_tunables member. 3628 * Since the order of the data list members varies across platforms 3629 * This function reads them from the data list in a platform specific 3630 * order and places them into the correct sd_tunable member that is 3631 * consistent across all platforms. 3632 */ 3633 static void 3634 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3635 sd_tunables *values) 3636 { 3637 int i; 3638 int mask; 3639 3640 bzero(values, sizeof (sd_tunables)); 3641 3642 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3643 3644 mask = 1 << i; 3645 if (mask > flags) { 3646 break; 3647 } 3648 3649 switch (mask & flags) { 3650 case 0: /* This mask bit not set in flags */ 3651 continue; 3652 case SD_CONF_BSET_THROTTLE: 3653 values->sdt_throttle = data_list[i]; 3654 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3655 "sd_get_tunables_from_conf: throttle = %d\n", 3656 values->sdt_throttle); 3657 break; 3658 case SD_CONF_BSET_CTYPE: 3659 values->sdt_ctype = data_list[i]; 3660 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3661 "sd_get_tunables_from_conf: ctype = %d\n", 3662 values->sdt_ctype); 3663 break; 3664 case SD_CONF_BSET_NRR_COUNT: 3665 values->sdt_not_rdy_retries = data_list[i]; 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3668 values->sdt_not_rdy_retries); 3669 break; 3670 case SD_CONF_BSET_BSY_RETRY_COUNT: 3671 values->sdt_busy_retries = data_list[i]; 3672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3673 "sd_get_tunables_from_conf: busy_retries = %d\n", 3674 values->sdt_busy_retries); 3675 break; 3676 case SD_CONF_BSET_RST_RETRIES: 3677 values->sdt_reset_retries = data_list[i]; 3678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3679 "sd_get_tunables_from_conf: reset_retries = %d\n", 3680 values->sdt_reset_retries); 3681 break; 3682 case SD_CONF_BSET_RSV_REL_TIME: 3683 values->sdt_reserv_rel_time = data_list[i]; 3684 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3685 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3686 values->sdt_reserv_rel_time); 3687 break; 3688 case SD_CONF_BSET_MIN_THROTTLE: 3689 values->sdt_min_throttle = data_list[i]; 3690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3691 "sd_get_tunables_from_conf: min_throttle = %d\n", 3692 values->sdt_min_throttle); 3693 break; 3694 case SD_CONF_BSET_DISKSORT_DISABLED: 3695 values->sdt_disk_sort_dis = data_list[i]; 3696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3697 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3698 values->sdt_disk_sort_dis); 3699 break; 3700 case SD_CONF_BSET_LUN_RESET_ENABLED: 3701 values->sdt_lun_reset_enable = data_list[i]; 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3703 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3704 "\n", values->sdt_lun_reset_enable); 3705 break; 3706 case SD_CONF_BSET_CACHE_IS_NV: 3707 values->sdt_suppress_cache_flush = data_list[i]; 3708 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3709 "sd_get_tunables_from_conf: \ 3710 suppress_cache_flush = %d" 3711 "\n", values->sdt_suppress_cache_flush); 3712 break; 3713 } 3714 } 3715 } 3716 3717 /* 3718 * Function: sd_process_sdconf_table 3719 * 3720 * Description: Search the static configuration table for a match on the 3721 * inquiry vid/pid and update the driver soft state structure 3722 * according to the table property values for the device. 3723 * 3724 * The form of a configuration table entry is: 3725 * <vid+pid>,<flags>,<property-data> 3726 * "SEAGATE ST42400N",1,0x40000, 3727 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3728 * 3729 * Arguments: un - driver soft state (unit) structure 3730 */ 3731 3732 static void 3733 sd_process_sdconf_table(struct sd_lun *un) 3734 { 3735 char *id = NULL; 3736 int table_index; 3737 int idlen; 3738 3739 ASSERT(un != NULL); 3740 for (table_index = 0; table_index < sd_disk_table_size; 3741 table_index++) { 3742 id = sd_disk_table[table_index].device_id; 3743 idlen = strlen(id); 3744 if (idlen == 0) { 3745 continue; 3746 } 3747 3748 /* 3749 * The static configuration table currently does not 3750 * implement version 10 properties. Additionally, 3751 * multiple data-property-name entries are not 3752 * implemented in the static configuration table. 3753 */ 3754 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3756 "sd_process_sdconf_table: disk %s\n", id); 3757 sd_set_vers1_properties(un, 3758 sd_disk_table[table_index].flags, 3759 sd_disk_table[table_index].properties); 3760 break; 3761 } 3762 } 3763 } 3764 3765 3766 /* 3767 * Function: sd_sdconf_id_match 3768 * 3769 * Description: This local function implements a case sensitive vid/pid 3770 * comparison as well as the boundary cases of wild card and 3771 * multiple blanks. 3772 * 3773 * Note: An implicit assumption made here is that the scsi 3774 * inquiry structure will always keep the vid, pid and 3775 * revision strings in consecutive sequence, so they can be 3776 * read as a single string. If this assumption is not the 3777 * case, a separate string, to be used for the check, needs 3778 * to be built with these strings concatenated. 3779 * 3780 * Arguments: un - driver soft state (unit) structure 3781 * id - table or config file vid/pid 3782 * idlen - length of the vid/pid (bytes) 3783 * 3784 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3785 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3786 */ 3787 3788 static int 3789 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3790 { 3791 struct scsi_inquiry *sd_inq; 3792 int rval = SD_SUCCESS; 3793 3794 ASSERT(un != NULL); 3795 sd_inq = un->un_sd->sd_inq; 3796 ASSERT(id != NULL); 3797 3798 /* 3799 * We use the inq_vid as a pointer to a buffer containing the 3800 * vid and pid and use the entire vid/pid length of the table 3801 * entry for the comparison. This works because the inq_pid 3802 * data member follows inq_vid in the scsi_inquiry structure. 3803 */ 3804 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3805 /* 3806 * The user id string is compared to the inquiry vid/pid 3807 * using a case insensitive comparison and ignoring 3808 * multiple spaces. 3809 */ 3810 rval = sd_blank_cmp(un, id, idlen); 3811 if (rval != SD_SUCCESS) { 3812 /* 3813 * User id strings that start and end with a "*" 3814 * are a special case. These do not have a 3815 * specific vendor, and the product string can 3816 * appear anywhere in the 16 byte PID portion of 3817 * the inquiry data. This is a simple strstr() 3818 * type search for the user id in the inquiry data. 3819 */ 3820 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3821 char *pidptr = &id[1]; 3822 int i; 3823 int j; 3824 int pidstrlen = idlen - 2; 3825 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3826 pidstrlen; 3827 3828 if (j < 0) { 3829 return (SD_FAILURE); 3830 } 3831 for (i = 0; i < j; i++) { 3832 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3833 pidptr, pidstrlen) == 0) { 3834 rval = SD_SUCCESS; 3835 break; 3836 } 3837 } 3838 } 3839 } 3840 } 3841 return (rval); 3842 } 3843 3844 3845 /* 3846 * Function: sd_blank_cmp 3847 * 3848 * Description: If the id string starts and ends with a space, treat 3849 * multiple consecutive spaces as equivalent to a single 3850 * space. For example, this causes a sd_disk_table entry 3851 * of " NEC CDROM " to match a device's id string of 3852 * "NEC CDROM". 3853 * 3854 * Note: The success exit condition for this routine is if 3855 * the pointer to the table entry is '\0' and the cnt of 3856 * the inquiry length is zero. This will happen if the inquiry 3857 * string returned by the device is padded with spaces to be 3858 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3859 * SCSI spec states that the inquiry string is to be padded with 3860 * spaces. 3861 * 3862 * Arguments: un - driver soft state (unit) structure 3863 * id - table or config file vid/pid 3864 * idlen - length of the vid/pid (bytes) 3865 * 3866 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3867 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3868 */ 3869 3870 static int 3871 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3872 { 3873 char *p1; 3874 char *p2; 3875 int cnt; 3876 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3877 sizeof (SD_INQUIRY(un)->inq_pid); 3878 3879 ASSERT(un != NULL); 3880 p2 = un->un_sd->sd_inq->inq_vid; 3881 ASSERT(id != NULL); 3882 p1 = id; 3883 3884 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3885 /* 3886 * Note: string p1 is terminated by a NUL but string p2 3887 * isn't. The end of p2 is determined by cnt. 3888 */ 3889 for (;;) { 3890 /* skip over any extra blanks in both strings */ 3891 while ((*p1 != '\0') && (*p1 == ' ')) { 3892 p1++; 3893 } 3894 while ((cnt != 0) && (*p2 == ' ')) { 3895 p2++; 3896 cnt--; 3897 } 3898 3899 /* compare the two strings */ 3900 if ((cnt == 0) || 3901 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3902 break; 3903 } 3904 while ((cnt > 0) && 3905 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3906 p1++; 3907 p2++; 3908 cnt--; 3909 } 3910 } 3911 } 3912 3913 /* return SD_SUCCESS if both strings match */ 3914 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3915 } 3916 3917 3918 /* 3919 * Function: sd_chk_vers1_data 3920 * 3921 * Description: Verify the version 1 device properties provided by the 3922 * user via the configuration file 3923 * 3924 * Arguments: un - driver soft state (unit) structure 3925 * flags - integer mask indicating properties to be set 3926 * prop_list - integer list of property values 3927 * list_len - length of user provided data 3928 * 3929 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3930 * SD_FAILURE - Indicates the user provided data is invalid 3931 */ 3932 3933 static int 3934 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3935 int list_len, char *dataname_ptr) 3936 { 3937 int i; 3938 int mask = 1; 3939 int index = 0; 3940 3941 ASSERT(un != NULL); 3942 3943 /* Check for a NULL property name and list */ 3944 if (dataname_ptr == NULL) { 3945 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3946 "sd_chk_vers1_data: NULL data property name."); 3947 return (SD_FAILURE); 3948 } 3949 if (prop_list == NULL) { 3950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3951 "sd_chk_vers1_data: %s NULL data property list.", 3952 dataname_ptr); 3953 return (SD_FAILURE); 3954 } 3955 3956 /* Display a warning if undefined bits are set in the flags */ 3957 if (flags & ~SD_CONF_BIT_MASK) { 3958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3959 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3960 "Properties not set.", 3961 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3962 return (SD_FAILURE); 3963 } 3964 3965 /* 3966 * Verify the length of the list by identifying the highest bit set 3967 * in the flags and validating that the property list has a length 3968 * up to the index of this bit. 3969 */ 3970 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3971 if (flags & mask) { 3972 index++; 3973 } 3974 mask = 1 << i; 3975 } 3976 if ((list_len / sizeof (int)) < (index + 2)) { 3977 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3978 "sd_chk_vers1_data: " 3979 "Data property list %s size is incorrect. " 3980 "Properties not set.", dataname_ptr); 3981 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3982 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3983 return (SD_FAILURE); 3984 } 3985 return (SD_SUCCESS); 3986 } 3987 3988 3989 /* 3990 * Function: sd_set_vers1_properties 3991 * 3992 * Description: Set version 1 device properties based on a property list 3993 * retrieved from the driver configuration file or static 3994 * configuration table. Version 1 properties have the format: 3995 * 3996 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3997 * 3998 * where the prop0 value will be used to set prop0 if bit0 3999 * is set in the flags 4000 * 4001 * Arguments: un - driver soft state (unit) structure 4002 * flags - integer mask indicating properties to be set 4003 * prop_list - integer list of property values 4004 */ 4005 4006 static void 4007 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4008 { 4009 ASSERT(un != NULL); 4010 4011 /* 4012 * Set the flag to indicate cache is to be disabled. An attempt 4013 * to disable the cache via sd_cache_control() will be made 4014 * later during attach once the basic initialization is complete. 4015 */ 4016 if (flags & SD_CONF_BSET_NOCACHE) { 4017 un->un_f_opt_disable_cache = TRUE; 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4019 "sd_set_vers1_properties: caching disabled flag set\n"); 4020 } 4021 4022 /* CD-specific configuration parameters */ 4023 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4024 un->un_f_cfg_playmsf_bcd = TRUE; 4025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4026 "sd_set_vers1_properties: playmsf_bcd set\n"); 4027 } 4028 if (flags & SD_CONF_BSET_READSUB_BCD) { 4029 un->un_f_cfg_readsub_bcd = TRUE; 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4031 "sd_set_vers1_properties: readsub_bcd set\n"); 4032 } 4033 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4034 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4036 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4037 } 4038 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4039 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4041 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4042 } 4043 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4044 un->un_f_cfg_no_read_header = TRUE; 4045 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4046 "sd_set_vers1_properties: no_read_header set\n"); 4047 } 4048 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4049 un->un_f_cfg_read_cd_xd4 = TRUE; 4050 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4051 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4052 } 4053 4054 /* Support for devices which do not have valid/unique serial numbers */ 4055 if (flags & SD_CONF_BSET_FAB_DEVID) { 4056 un->un_f_opt_fab_devid = TRUE; 4057 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4058 "sd_set_vers1_properties: fab_devid bit set\n"); 4059 } 4060 4061 /* Support for user throttle configuration */ 4062 if (flags & SD_CONF_BSET_THROTTLE) { 4063 ASSERT(prop_list != NULL); 4064 un->un_saved_throttle = un->un_throttle = 4065 prop_list->sdt_throttle; 4066 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4067 "sd_set_vers1_properties: throttle set to %d\n", 4068 prop_list->sdt_throttle); 4069 } 4070 4071 /* Set the per disk retry count according to the conf file or table. */ 4072 if (flags & SD_CONF_BSET_NRR_COUNT) { 4073 ASSERT(prop_list != NULL); 4074 if (prop_list->sdt_not_rdy_retries) { 4075 un->un_notready_retry_count = 4076 prop_list->sdt_not_rdy_retries; 4077 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4078 "sd_set_vers1_properties: not ready retry count" 4079 " set to %d\n", un->un_notready_retry_count); 4080 } 4081 } 4082 4083 /* The controller type is reported for generic disk driver ioctls */ 4084 if (flags & SD_CONF_BSET_CTYPE) { 4085 ASSERT(prop_list != NULL); 4086 switch (prop_list->sdt_ctype) { 4087 case CTYPE_CDROM: 4088 un->un_ctype = prop_list->sdt_ctype; 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4090 "sd_set_vers1_properties: ctype set to " 4091 "CTYPE_CDROM\n"); 4092 break; 4093 case CTYPE_CCS: 4094 un->un_ctype = prop_list->sdt_ctype; 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_set_vers1_properties: ctype set to " 4097 "CTYPE_CCS\n"); 4098 break; 4099 case CTYPE_ROD: /* RW optical */ 4100 un->un_ctype = prop_list->sdt_ctype; 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4102 "sd_set_vers1_properties: ctype set to " 4103 "CTYPE_ROD\n"); 4104 break; 4105 default: 4106 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4107 "sd_set_vers1_properties: Could not set " 4108 "invalid ctype value (%d)", 4109 prop_list->sdt_ctype); 4110 } 4111 } 4112 4113 /* Purple failover timeout */ 4114 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4115 ASSERT(prop_list != NULL); 4116 un->un_busy_retry_count = 4117 prop_list->sdt_busy_retries; 4118 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4119 "sd_set_vers1_properties: " 4120 "busy retry count set to %d\n", 4121 un->un_busy_retry_count); 4122 } 4123 4124 /* Purple reset retry count */ 4125 if (flags & SD_CONF_BSET_RST_RETRIES) { 4126 ASSERT(prop_list != NULL); 4127 un->un_reset_retry_count = 4128 prop_list->sdt_reset_retries; 4129 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4130 "sd_set_vers1_properties: " 4131 "reset retry count set to %d\n", 4132 un->un_reset_retry_count); 4133 } 4134 4135 /* Purple reservation release timeout */ 4136 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4137 ASSERT(prop_list != NULL); 4138 un->un_reserve_release_time = 4139 prop_list->sdt_reserv_rel_time; 4140 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4141 "sd_set_vers1_properties: " 4142 "reservation release timeout set to %d\n", 4143 un->un_reserve_release_time); 4144 } 4145 4146 /* 4147 * Driver flag telling the driver to verify that no commands are pending 4148 * for a device before issuing a Test Unit Ready. This is a workaround 4149 * for a firmware bug in some Seagate eliteI drives. 4150 */ 4151 if (flags & SD_CONF_BSET_TUR_CHECK) { 4152 un->un_f_cfg_tur_check = TRUE; 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4154 "sd_set_vers1_properties: tur queue check set\n"); 4155 } 4156 4157 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4158 un->un_min_throttle = prop_list->sdt_min_throttle; 4159 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4160 "sd_set_vers1_properties: min throttle set to %d\n", 4161 un->un_min_throttle); 4162 } 4163 4164 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4165 un->un_f_disksort_disabled = 4166 (prop_list->sdt_disk_sort_dis != 0) ? 4167 TRUE : FALSE; 4168 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4169 "sd_set_vers1_properties: disksort disabled " 4170 "flag set to %d\n", 4171 prop_list->sdt_disk_sort_dis); 4172 } 4173 4174 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4175 un->un_f_lun_reset_enabled = 4176 (prop_list->sdt_lun_reset_enable != 0) ? 4177 TRUE : FALSE; 4178 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4179 "sd_set_vers1_properties: lun reset enabled " 4180 "flag set to %d\n", 4181 prop_list->sdt_lun_reset_enable); 4182 } 4183 4184 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4185 un->un_f_suppress_cache_flush = 4186 (prop_list->sdt_suppress_cache_flush != 0) ? 4187 TRUE : FALSE; 4188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4189 "sd_set_vers1_properties: suppress_cache_flush " 4190 "flag set to %d\n", 4191 prop_list->sdt_suppress_cache_flush); 4192 } 4193 4194 /* 4195 * Validate the throttle values. 4196 * If any of the numbers are invalid, set everything to defaults. 4197 */ 4198 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4199 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4200 (un->un_min_throttle > un->un_throttle)) { 4201 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4202 un->un_min_throttle = sd_min_throttle; 4203 } 4204 } 4205 4206 /* 4207 * Function: sd_is_lsi() 4208 * 4209 * Description: Check for lsi devices, step through the static device 4210 * table to match vid/pid. 4211 * 4212 * Args: un - ptr to sd_lun 4213 * 4214 * Notes: When creating new LSI property, need to add the new LSI property 4215 * to this function. 4216 */ 4217 static void 4218 sd_is_lsi(struct sd_lun *un) 4219 { 4220 char *id = NULL; 4221 int table_index; 4222 int idlen; 4223 void *prop; 4224 4225 ASSERT(un != NULL); 4226 for (table_index = 0; table_index < sd_disk_table_size; 4227 table_index++) { 4228 id = sd_disk_table[table_index].device_id; 4229 idlen = strlen(id); 4230 if (idlen == 0) { 4231 continue; 4232 } 4233 4234 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4235 prop = sd_disk_table[table_index].properties; 4236 if (prop == &lsi_properties || 4237 prop == &lsi_oem_properties || 4238 prop == &lsi_properties_scsi || 4239 prop == &symbios_properties) { 4240 un->un_f_cfg_is_lsi = TRUE; 4241 } 4242 break; 4243 } 4244 } 4245 } 4246 4247 /* 4248 * Function: sd_get_physical_geometry 4249 * 4250 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4251 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4252 * target, and use this information to initialize the physical 4253 * geometry cache specified by pgeom_p. 4254 * 4255 * MODE SENSE is an optional command, so failure in this case 4256 * does not necessarily denote an error. We want to use the 4257 * MODE SENSE commands to derive the physical geometry of the 4258 * device, but if either command fails, the logical geometry is 4259 * used as the fallback for disk label geometry in cmlb. 4260 * 4261 * This requires that un->un_blockcount and un->un_tgt_blocksize 4262 * have already been initialized for the current target and 4263 * that the current values be passed as args so that we don't 4264 * end up ever trying to use -1 as a valid value. This could 4265 * happen if either value is reset while we're not holding 4266 * the mutex. 4267 * 4268 * Arguments: un - driver soft state (unit) structure 4269 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4270 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4271 * to use the USCSI "direct" chain and bypass the normal 4272 * command waitq. 4273 * 4274 * Context: Kernel thread only (can sleep). 4275 */ 4276 4277 static int 4278 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4279 diskaddr_t capacity, int lbasize, int path_flag) 4280 { 4281 struct mode_format *page3p; 4282 struct mode_geometry *page4p; 4283 struct mode_header *headerp; 4284 int sector_size; 4285 int nsect; 4286 int nhead; 4287 int ncyl; 4288 int intrlv; 4289 int spc; 4290 diskaddr_t modesense_capacity; 4291 int rpm; 4292 int bd_len; 4293 int mode_header_length; 4294 uchar_t *p3bufp; 4295 uchar_t *p4bufp; 4296 int cdbsize; 4297 int ret = EIO; 4298 4299 ASSERT(un != NULL); 4300 4301 if (lbasize == 0) { 4302 if (ISCD(un)) { 4303 lbasize = 2048; 4304 } else { 4305 lbasize = un->un_sys_blocksize; 4306 } 4307 } 4308 pgeom_p->g_secsize = (unsigned short)lbasize; 4309 4310 /* 4311 * If the unit is a cd/dvd drive MODE SENSE page three 4312 * and MODE SENSE page four are reserved (see SBC spec 4313 * and MMC spec). To prevent soft errors just return 4314 * using the default LBA size. 4315 */ 4316 if (ISCD(un)) 4317 return (ret); 4318 4319 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4320 4321 /* 4322 * Retrieve MODE SENSE page 3 - Format Device Page 4323 */ 4324 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4325 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4326 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4327 != 0) { 4328 SD_ERROR(SD_LOG_COMMON, un, 4329 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4330 goto page3_exit; 4331 } 4332 4333 /* 4334 * Determine size of Block Descriptors in order to locate the mode 4335 * page data. ATAPI devices return 0, SCSI devices should return 4336 * MODE_BLK_DESC_LENGTH. 4337 */ 4338 headerp = (struct mode_header *)p3bufp; 4339 if (un->un_f_cfg_is_atapi == TRUE) { 4340 struct mode_header_grp2 *mhp = 4341 (struct mode_header_grp2 *)headerp; 4342 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4343 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4344 } else { 4345 mode_header_length = MODE_HEADER_LENGTH; 4346 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4347 } 4348 4349 if (bd_len > MODE_BLK_DESC_LENGTH) { 4350 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4351 "received unexpected bd_len of %d, page3\n", bd_len); 4352 goto page3_exit; 4353 } 4354 4355 page3p = (struct mode_format *) 4356 ((caddr_t)headerp + mode_header_length + bd_len); 4357 4358 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4359 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4360 "mode sense pg3 code mismatch %d\n", 4361 page3p->mode_page.code); 4362 goto page3_exit; 4363 } 4364 4365 /* 4366 * Use this physical geometry data only if BOTH MODE SENSE commands 4367 * complete successfully; otherwise, revert to the logical geometry. 4368 * So, we need to save everything in temporary variables. 4369 */ 4370 sector_size = BE_16(page3p->data_bytes_sect); 4371 4372 /* 4373 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4374 */ 4375 if (sector_size == 0) { 4376 sector_size = un->un_sys_blocksize; 4377 } else { 4378 sector_size &= ~(un->un_sys_blocksize - 1); 4379 } 4380 4381 nsect = BE_16(page3p->sect_track); 4382 intrlv = BE_16(page3p->interleave); 4383 4384 SD_INFO(SD_LOG_COMMON, un, 4385 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4386 SD_INFO(SD_LOG_COMMON, un, 4387 " mode page: %d; nsect: %d; sector size: %d;\n", 4388 page3p->mode_page.code, nsect, sector_size); 4389 SD_INFO(SD_LOG_COMMON, un, 4390 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4391 BE_16(page3p->track_skew), 4392 BE_16(page3p->cylinder_skew)); 4393 4394 4395 /* 4396 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4397 */ 4398 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4399 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4400 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4401 != 0) { 4402 SD_ERROR(SD_LOG_COMMON, un, 4403 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4404 goto page4_exit; 4405 } 4406 4407 /* 4408 * Determine size of Block Descriptors in order to locate the mode 4409 * page data. ATAPI devices return 0, SCSI devices should return 4410 * MODE_BLK_DESC_LENGTH. 4411 */ 4412 headerp = (struct mode_header *)p4bufp; 4413 if (un->un_f_cfg_is_atapi == TRUE) { 4414 struct mode_header_grp2 *mhp = 4415 (struct mode_header_grp2 *)headerp; 4416 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4417 } else { 4418 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4419 } 4420 4421 if (bd_len > MODE_BLK_DESC_LENGTH) { 4422 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4423 "received unexpected bd_len of %d, page4\n", bd_len); 4424 goto page4_exit; 4425 } 4426 4427 page4p = (struct mode_geometry *) 4428 ((caddr_t)headerp + mode_header_length + bd_len); 4429 4430 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4431 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4432 "mode sense pg4 code mismatch %d\n", 4433 page4p->mode_page.code); 4434 goto page4_exit; 4435 } 4436 4437 /* 4438 * Stash the data now, after we know that both commands completed. 4439 */ 4440 4441 4442 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4443 spc = nhead * nsect; 4444 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4445 rpm = BE_16(page4p->rpm); 4446 4447 modesense_capacity = spc * ncyl; 4448 4449 SD_INFO(SD_LOG_COMMON, un, 4450 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4451 SD_INFO(SD_LOG_COMMON, un, 4452 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4453 SD_INFO(SD_LOG_COMMON, un, 4454 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4455 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4456 (void *)pgeom_p, capacity); 4457 4458 /* 4459 * Compensate if the drive's geometry is not rectangular, i.e., 4460 * the product of C * H * S returned by MODE SENSE >= that returned 4461 * by read capacity. This is an idiosyncrasy of the original x86 4462 * disk subsystem. 4463 */ 4464 if (modesense_capacity >= capacity) { 4465 SD_INFO(SD_LOG_COMMON, un, 4466 "sd_get_physical_geometry: adjusting acyl; " 4467 "old: %d; new: %d\n", pgeom_p->g_acyl, 4468 (modesense_capacity - capacity + spc - 1) / spc); 4469 if (sector_size != 0) { 4470 /* 1243403: NEC D38x7 drives don't support sec size */ 4471 pgeom_p->g_secsize = (unsigned short)sector_size; 4472 } 4473 pgeom_p->g_nsect = (unsigned short)nsect; 4474 pgeom_p->g_nhead = (unsigned short)nhead; 4475 pgeom_p->g_capacity = capacity; 4476 pgeom_p->g_acyl = 4477 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4478 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4479 } 4480 4481 pgeom_p->g_rpm = (unsigned short)rpm; 4482 pgeom_p->g_intrlv = (unsigned short)intrlv; 4483 ret = 0; 4484 4485 SD_INFO(SD_LOG_COMMON, un, 4486 "sd_get_physical_geometry: mode sense geometry:\n"); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 " nsect: %d; sector size: %d; interlv: %d\n", 4489 nsect, sector_size, intrlv); 4490 SD_INFO(SD_LOG_COMMON, un, 4491 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4492 nhead, ncyl, rpm, modesense_capacity); 4493 SD_INFO(SD_LOG_COMMON, un, 4494 "sd_get_physical_geometry: (cached)\n"); 4495 SD_INFO(SD_LOG_COMMON, un, 4496 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4497 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4498 pgeom_p->g_nhead, pgeom_p->g_nsect); 4499 SD_INFO(SD_LOG_COMMON, un, 4500 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4501 pgeom_p->g_secsize, pgeom_p->g_capacity, 4502 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4503 4504 page4_exit: 4505 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4506 page3_exit: 4507 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4508 4509 return (ret); 4510 } 4511 4512 /* 4513 * Function: sd_get_virtual_geometry 4514 * 4515 * Description: Ask the controller to tell us about the target device. 4516 * 4517 * Arguments: un - pointer to softstate 4518 * capacity - disk capacity in #blocks 4519 * lbasize - disk block size in bytes 4520 * 4521 * Context: Kernel thread only 4522 */ 4523 4524 static int 4525 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4526 diskaddr_t capacity, int lbasize) 4527 { 4528 uint_t geombuf; 4529 int spc; 4530 4531 ASSERT(un != NULL); 4532 4533 /* Set sector size, and total number of sectors */ 4534 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4535 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4536 4537 /* Let the HBA tell us its geometry */ 4538 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4539 4540 /* A value of -1 indicates an undefined "geometry" property */ 4541 if (geombuf == (-1)) { 4542 return (EINVAL); 4543 } 4544 4545 /* Initialize the logical geometry cache. */ 4546 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4547 lgeom_p->g_nsect = geombuf & 0xffff; 4548 lgeom_p->g_secsize = un->un_sys_blocksize; 4549 4550 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4551 4552 /* 4553 * Note: The driver originally converted the capacity value from 4554 * target blocks to system blocks. However, the capacity value passed 4555 * to this routine is already in terms of system blocks (this scaling 4556 * is done when the READ CAPACITY command is issued and processed). 4557 * This 'error' may have gone undetected because the usage of g_ncyl 4558 * (which is based upon g_capacity) is very limited within the driver 4559 */ 4560 lgeom_p->g_capacity = capacity; 4561 4562 /* 4563 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4564 * hba may return zero values if the device has been removed. 4565 */ 4566 if (spc == 0) { 4567 lgeom_p->g_ncyl = 0; 4568 } else { 4569 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4570 } 4571 lgeom_p->g_acyl = 0; 4572 4573 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4574 return (0); 4575 4576 } 4577 /* 4578 * Function: sd_update_block_info 4579 * 4580 * Description: Calculate a byte count to sector count bitshift value 4581 * from sector size. 4582 * 4583 * Arguments: un: unit struct. 4584 * lbasize: new target sector size 4585 * capacity: new target capacity, ie. block count 4586 * 4587 * Context: Kernel thread context 4588 */ 4589 4590 static void 4591 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4592 { 4593 uint_t dblk; 4594 4595 if (lbasize != 0) { 4596 un->un_tgt_blocksize = lbasize; 4597 un->un_f_tgt_blocksize_is_valid = TRUE; 4598 } 4599 4600 if (capacity != 0) { 4601 un->un_blockcount = capacity; 4602 un->un_f_blockcount_is_valid = TRUE; 4603 } 4604 4605 /* 4606 * Update device capacity properties. 4607 * 4608 * 'device-nblocks' number of blocks in target's units 4609 * 'device-blksize' data bearing size of target's block 4610 * 4611 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4612 * not be a power of two for checksumming disks with 520/528 byte 4613 * sectors. 4614 */ 4615 if (un->un_f_tgt_blocksize_is_valid && 4616 un->un_f_blockcount_is_valid && 4617 un->un_sys_blocksize) { 4618 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4619 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4620 "device-nblocks", un->un_blockcount / dblk); 4621 /* 4622 * To save memory, only define "device-blksize" when its 4623 * value is differnet than the default DEV_BSIZE value. 4624 */ 4625 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4626 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4627 SD_DEVINFO(un), "device-blksize", 4628 un->un_sys_blocksize * dblk); 4629 } 4630 } 4631 4632 4633 /* 4634 * Function: sd_register_devid 4635 * 4636 * Description: This routine will obtain the device id information from the 4637 * target, obtain the serial number, and register the device 4638 * id with the ddi framework. 4639 * 4640 * Arguments: devi - the system's dev_info_t for the device. 4641 * un - driver soft state (unit) structure 4642 * reservation_flag - indicates if a reservation conflict 4643 * occurred during attach 4644 * 4645 * Context: Kernel Thread 4646 */ 4647 static void 4648 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4649 { 4650 int rval = 0; 4651 uchar_t *inq80 = NULL; 4652 size_t inq80_len = MAX_INQUIRY_SIZE; 4653 size_t inq80_resid = 0; 4654 uchar_t *inq83 = NULL; 4655 size_t inq83_len = MAX_INQUIRY_SIZE; 4656 size_t inq83_resid = 0; 4657 int dlen, len; 4658 char *sn; 4659 4660 ASSERT(un != NULL); 4661 ASSERT(mutex_owned(SD_MUTEX(un))); 4662 ASSERT((SD_DEVINFO(un)) == devi); 4663 4664 /* 4665 * If transport has already registered a devid for this target 4666 * then that takes precedence over the driver's determination 4667 * of the devid. 4668 */ 4669 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4670 ASSERT(un->un_devid); 4671 return; /* use devid registered by the transport */ 4672 } 4673 4674 /* 4675 * This is the case of antiquated Sun disk drives that have the 4676 * FAB_DEVID property set in the disk_table. These drives 4677 * manage the devid's by storing them in last 2 available sectors 4678 * on the drive and have them fabricated by the ddi layer by calling 4679 * ddi_devid_init and passing the DEVID_FAB flag. 4680 */ 4681 if (un->un_f_opt_fab_devid == TRUE) { 4682 /* 4683 * Depending on EINVAL isn't reliable, since a reserved disk 4684 * may result in invalid geometry, so check to make sure a 4685 * reservation conflict did not occur during attach. 4686 */ 4687 if ((sd_get_devid(un) == EINVAL) && 4688 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4689 /* 4690 * The devid is invalid AND there is no reservation 4691 * conflict. Fabricate a new devid. 4692 */ 4693 (void) sd_create_devid(un); 4694 } 4695 4696 /* Register the devid if it exists */ 4697 if (un->un_devid != NULL) { 4698 (void) ddi_devid_register(SD_DEVINFO(un), 4699 un->un_devid); 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4701 "sd_register_devid: Devid Fabricated\n"); 4702 } 4703 return; 4704 } 4705 4706 /* 4707 * We check the availibility of the World Wide Name (0x83) and Unit 4708 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4709 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4710 * 0x83 is availible, that is the best choice. Our next choice is 4711 * 0x80. If neither are availible, we munge the devid from the device 4712 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4713 * to fabricate a devid for non-Sun qualified disks. 4714 */ 4715 if (sd_check_vpd_page_support(un) == 0) { 4716 /* collect page 80 data if available */ 4717 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4718 4719 mutex_exit(SD_MUTEX(un)); 4720 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4721 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4722 0x01, 0x80, &inq80_resid); 4723 4724 if (rval != 0) { 4725 kmem_free(inq80, inq80_len); 4726 inq80 = NULL; 4727 inq80_len = 0; 4728 } else if (ddi_prop_exists( 4729 DDI_DEV_T_NONE, SD_DEVINFO(un), 4730 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4731 INQUIRY_SERIAL_NO) == 0) { 4732 /* 4733 * If we don't already have a serial number 4734 * property, do quick verify of data returned 4735 * and define property. 4736 */ 4737 dlen = inq80_len - inq80_resid; 4738 len = (size_t)inq80[3]; 4739 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4740 /* 4741 * Ensure sn termination, skip leading 4742 * blanks, and create property 4743 * 'inquiry-serial-no'. 4744 */ 4745 sn = (char *)&inq80[4]; 4746 sn[len] = 0; 4747 while (*sn && (*sn == ' ')) 4748 sn++; 4749 if (*sn) { 4750 (void) ddi_prop_update_string( 4751 DDI_DEV_T_NONE, 4752 SD_DEVINFO(un), 4753 INQUIRY_SERIAL_NO, sn); 4754 } 4755 } 4756 } 4757 mutex_enter(SD_MUTEX(un)); 4758 } 4759 4760 /* collect page 83 data if available */ 4761 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4762 mutex_exit(SD_MUTEX(un)); 4763 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4764 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4765 0x01, 0x83, &inq83_resid); 4766 4767 if (rval != 0) { 4768 kmem_free(inq83, inq83_len); 4769 inq83 = NULL; 4770 inq83_len = 0; 4771 } 4772 mutex_enter(SD_MUTEX(un)); 4773 } 4774 } 4775 4776 /* encode best devid possible based on data available */ 4777 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4778 (char *)ddi_driver_name(SD_DEVINFO(un)), 4779 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4780 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4781 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4782 4783 /* devid successfully encoded, register devid */ 4784 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4785 4786 } else { 4787 /* 4788 * Unable to encode a devid based on data available. 4789 * This is not a Sun qualified disk. Older Sun disk 4790 * drives that have the SD_FAB_DEVID property 4791 * set in the disk_table and non Sun qualified 4792 * disks are treated in the same manner. These 4793 * drives manage the devid's by storing them in 4794 * last 2 available sectors on the drive and 4795 * have them fabricated by the ddi layer by 4796 * calling ddi_devid_init and passing the 4797 * DEVID_FAB flag. 4798 * Create a fabricate devid only if there's no 4799 * fabricate devid existed. 4800 */ 4801 if (sd_get_devid(un) == EINVAL) { 4802 (void) sd_create_devid(un); 4803 } 4804 un->un_f_opt_fab_devid = TRUE; 4805 4806 /* Register the devid if it exists */ 4807 if (un->un_devid != NULL) { 4808 (void) ddi_devid_register(SD_DEVINFO(un), 4809 un->un_devid); 4810 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4811 "sd_register_devid: devid fabricated using " 4812 "ddi framework\n"); 4813 } 4814 } 4815 4816 /* clean up resources */ 4817 if (inq80 != NULL) { 4818 kmem_free(inq80, inq80_len); 4819 } 4820 if (inq83 != NULL) { 4821 kmem_free(inq83, inq83_len); 4822 } 4823 } 4824 4825 4826 4827 /* 4828 * Function: sd_get_devid 4829 * 4830 * Description: This routine will return 0 if a valid device id has been 4831 * obtained from the target and stored in the soft state. If a 4832 * valid device id has not been previously read and stored, a 4833 * read attempt will be made. 4834 * 4835 * Arguments: un - driver soft state (unit) structure 4836 * 4837 * Return Code: 0 if we successfully get the device id 4838 * 4839 * Context: Kernel Thread 4840 */ 4841 4842 static int 4843 sd_get_devid(struct sd_lun *un) 4844 { 4845 struct dk_devid *dkdevid; 4846 ddi_devid_t tmpid; 4847 uint_t *ip; 4848 size_t sz; 4849 diskaddr_t blk; 4850 int status; 4851 int chksum; 4852 int i; 4853 size_t buffer_size; 4854 4855 ASSERT(un != NULL); 4856 ASSERT(mutex_owned(SD_MUTEX(un))); 4857 4858 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4859 un); 4860 4861 if (un->un_devid != NULL) { 4862 return (0); 4863 } 4864 4865 mutex_exit(SD_MUTEX(un)); 4866 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4867 (void *)SD_PATH_DIRECT) != 0) { 4868 mutex_enter(SD_MUTEX(un)); 4869 return (EINVAL); 4870 } 4871 4872 /* 4873 * Read and verify device id, stored in the reserved cylinders at the 4874 * end of the disk. Backup label is on the odd sectors of the last 4875 * track of the last cylinder. Device id will be on track of the next 4876 * to last cylinder. 4877 */ 4878 mutex_enter(SD_MUTEX(un)); 4879 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4880 mutex_exit(SD_MUTEX(un)); 4881 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4882 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4883 SD_PATH_DIRECT); 4884 if (status != 0) { 4885 goto error; 4886 } 4887 4888 /* Validate the revision */ 4889 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4890 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4891 status = EINVAL; 4892 goto error; 4893 } 4894 4895 /* Calculate the checksum */ 4896 chksum = 0; 4897 ip = (uint_t *)dkdevid; 4898 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4899 i++) { 4900 chksum ^= ip[i]; 4901 } 4902 4903 /* Compare the checksums */ 4904 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4905 status = EINVAL; 4906 goto error; 4907 } 4908 4909 /* Validate the device id */ 4910 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4911 status = EINVAL; 4912 goto error; 4913 } 4914 4915 /* 4916 * Store the device id in the driver soft state 4917 */ 4918 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4919 tmpid = kmem_alloc(sz, KM_SLEEP); 4920 4921 mutex_enter(SD_MUTEX(un)); 4922 4923 un->un_devid = tmpid; 4924 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4925 4926 kmem_free(dkdevid, buffer_size); 4927 4928 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4929 4930 return (status); 4931 error: 4932 mutex_enter(SD_MUTEX(un)); 4933 kmem_free(dkdevid, buffer_size); 4934 return (status); 4935 } 4936 4937 4938 /* 4939 * Function: sd_create_devid 4940 * 4941 * Description: This routine will fabricate the device id and write it 4942 * to the disk. 4943 * 4944 * Arguments: un - driver soft state (unit) structure 4945 * 4946 * Return Code: value of the fabricated device id 4947 * 4948 * Context: Kernel Thread 4949 */ 4950 4951 static ddi_devid_t 4952 sd_create_devid(struct sd_lun *un) 4953 { 4954 ASSERT(un != NULL); 4955 4956 /* Fabricate the devid */ 4957 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4958 == DDI_FAILURE) { 4959 return (NULL); 4960 } 4961 4962 /* Write the devid to disk */ 4963 if (sd_write_deviceid(un) != 0) { 4964 ddi_devid_free(un->un_devid); 4965 un->un_devid = NULL; 4966 } 4967 4968 return (un->un_devid); 4969 } 4970 4971 4972 /* 4973 * Function: sd_write_deviceid 4974 * 4975 * Description: This routine will write the device id to the disk 4976 * reserved sector. 4977 * 4978 * Arguments: un - driver soft state (unit) structure 4979 * 4980 * Return Code: EINVAL 4981 * value returned by sd_send_scsi_cmd 4982 * 4983 * Context: Kernel Thread 4984 */ 4985 4986 static int 4987 sd_write_deviceid(struct sd_lun *un) 4988 { 4989 struct dk_devid *dkdevid; 4990 diskaddr_t blk; 4991 uint_t *ip, chksum; 4992 int status; 4993 int i; 4994 4995 ASSERT(mutex_owned(SD_MUTEX(un))); 4996 4997 mutex_exit(SD_MUTEX(un)); 4998 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4999 (void *)SD_PATH_DIRECT) != 0) { 5000 mutex_enter(SD_MUTEX(un)); 5001 return (-1); 5002 } 5003 5004 5005 /* Allocate the buffer */ 5006 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5007 5008 /* Fill in the revision */ 5009 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5010 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5011 5012 /* Copy in the device id */ 5013 mutex_enter(SD_MUTEX(un)); 5014 bcopy(un->un_devid, &dkdevid->dkd_devid, 5015 ddi_devid_sizeof(un->un_devid)); 5016 mutex_exit(SD_MUTEX(un)); 5017 5018 /* Calculate the checksum */ 5019 chksum = 0; 5020 ip = (uint_t *)dkdevid; 5021 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5022 i++) { 5023 chksum ^= ip[i]; 5024 } 5025 5026 /* Fill-in checksum */ 5027 DKD_FORMCHKSUM(chksum, dkdevid); 5028 5029 /* Write the reserved sector */ 5030 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5031 SD_PATH_DIRECT); 5032 5033 kmem_free(dkdevid, un->un_sys_blocksize); 5034 5035 mutex_enter(SD_MUTEX(un)); 5036 return (status); 5037 } 5038 5039 5040 /* 5041 * Function: sd_check_vpd_page_support 5042 * 5043 * Description: This routine sends an inquiry command with the EVPD bit set and 5044 * a page code of 0x00 to the device. It is used to determine which 5045 * vital product pages are availible to find the devid. We are 5046 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5047 * device does not support that command. 5048 * 5049 * Arguments: un - driver soft state (unit) structure 5050 * 5051 * Return Code: 0 - success 5052 * 1 - check condition 5053 * 5054 * Context: This routine can sleep. 5055 */ 5056 5057 static int 5058 sd_check_vpd_page_support(struct sd_lun *un) 5059 { 5060 uchar_t *page_list = NULL; 5061 uchar_t page_length = 0xff; /* Use max possible length */ 5062 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5063 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5064 int rval = 0; 5065 int counter; 5066 5067 ASSERT(un != NULL); 5068 ASSERT(mutex_owned(SD_MUTEX(un))); 5069 5070 mutex_exit(SD_MUTEX(un)); 5071 5072 /* 5073 * We'll set the page length to the maximum to save figuring it out 5074 * with an additional call. 5075 */ 5076 page_list = kmem_zalloc(page_length, KM_SLEEP); 5077 5078 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5079 page_code, NULL); 5080 5081 mutex_enter(SD_MUTEX(un)); 5082 5083 /* 5084 * Now we must validate that the device accepted the command, as some 5085 * drives do not support it. If the drive does support it, we will 5086 * return 0, and the supported pages will be in un_vpd_page_mask. If 5087 * not, we return -1. 5088 */ 5089 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5090 /* Loop to find one of the 2 pages we need */ 5091 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5092 5093 /* 5094 * Pages are returned in ascending order, and 0x83 is what we 5095 * are hoping for. 5096 */ 5097 while ((page_list[counter] <= 0x86) && 5098 (counter <= (page_list[VPD_PAGE_LENGTH] + 5099 VPD_HEAD_OFFSET))) { 5100 /* 5101 * Add 3 because page_list[3] is the number of 5102 * pages minus 3 5103 */ 5104 5105 switch (page_list[counter]) { 5106 case 0x00: 5107 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5108 break; 5109 case 0x80: 5110 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5111 break; 5112 case 0x81: 5113 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5114 break; 5115 case 0x82: 5116 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5117 break; 5118 case 0x83: 5119 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5120 break; 5121 case 0x86: 5122 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5123 break; 5124 } 5125 counter++; 5126 } 5127 5128 } else { 5129 rval = -1; 5130 5131 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5132 "sd_check_vpd_page_support: This drive does not implement " 5133 "VPD pages.\n"); 5134 } 5135 5136 kmem_free(page_list, page_length); 5137 5138 return (rval); 5139 } 5140 5141 5142 /* 5143 * Function: sd_setup_pm 5144 * 5145 * Description: Initialize Power Management on the device 5146 * 5147 * Context: Kernel Thread 5148 */ 5149 5150 static void 5151 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5152 { 5153 uint_t log_page_size; 5154 uchar_t *log_page_data; 5155 int rval; 5156 5157 /* 5158 * Since we are called from attach, holding a mutex for 5159 * un is unnecessary. Because some of the routines called 5160 * from here require SD_MUTEX to not be held, assert this 5161 * right up front. 5162 */ 5163 ASSERT(!mutex_owned(SD_MUTEX(un))); 5164 /* 5165 * Since the sd device does not have the 'reg' property, 5166 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5167 * The following code is to tell cpr that this device 5168 * DOES need to be suspended and resumed. 5169 */ 5170 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5171 "pm-hardware-state", "needs-suspend-resume"); 5172 5173 /* 5174 * This complies with the new power management framework 5175 * for certain desktop machines. Create the pm_components 5176 * property as a string array property. 5177 */ 5178 if (un->un_f_pm_supported) { 5179 /* 5180 * not all devices have a motor, try it first. 5181 * some devices may return ILLEGAL REQUEST, some 5182 * will hang 5183 * The following START_STOP_UNIT is used to check if target 5184 * device has a motor. 5185 */ 5186 un->un_f_start_stop_supported = TRUE; 5187 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5188 SD_PATH_DIRECT) != 0) { 5189 un->un_f_start_stop_supported = FALSE; 5190 } 5191 5192 /* 5193 * create pm properties anyways otherwise the parent can't 5194 * go to sleep 5195 */ 5196 (void) sd_create_pm_components(devi, un); 5197 un->un_f_pm_is_enabled = TRUE; 5198 return; 5199 } 5200 5201 if (!un->un_f_log_sense_supported) { 5202 un->un_power_level = SD_SPINDLE_ON; 5203 un->un_f_pm_is_enabled = FALSE; 5204 return; 5205 } 5206 5207 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5208 5209 #ifdef SDDEBUG 5210 if (sd_force_pm_supported) { 5211 /* Force a successful result */ 5212 rval = 1; 5213 } 5214 #endif 5215 5216 /* 5217 * If the start-stop cycle counter log page is not supported 5218 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5219 * then we should not create the pm_components property. 5220 */ 5221 if (rval == -1) { 5222 /* 5223 * Error. 5224 * Reading log sense failed, most likely this is 5225 * an older drive that does not support log sense. 5226 * If this fails auto-pm is not supported. 5227 */ 5228 un->un_power_level = SD_SPINDLE_ON; 5229 un->un_f_pm_is_enabled = FALSE; 5230 5231 } else if (rval == 0) { 5232 /* 5233 * Page not found. 5234 * The start stop cycle counter is implemented as page 5235 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5236 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5237 */ 5238 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5239 /* 5240 * Page found, use this one. 5241 */ 5242 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5243 un->un_f_pm_is_enabled = TRUE; 5244 } else { 5245 /* 5246 * Error or page not found. 5247 * auto-pm is not supported for this device. 5248 */ 5249 un->un_power_level = SD_SPINDLE_ON; 5250 un->un_f_pm_is_enabled = FALSE; 5251 } 5252 } else { 5253 /* 5254 * Page found, use it. 5255 */ 5256 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5257 un->un_f_pm_is_enabled = TRUE; 5258 } 5259 5260 5261 if (un->un_f_pm_is_enabled == TRUE) { 5262 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5263 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5264 5265 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5266 log_page_size, un->un_start_stop_cycle_page, 5267 0x01, 0, SD_PATH_DIRECT); 5268 #ifdef SDDEBUG 5269 if (sd_force_pm_supported) { 5270 /* Force a successful result */ 5271 rval = 0; 5272 } 5273 #endif 5274 5275 /* 5276 * If the Log sense for Page( Start/stop cycle counter page) 5277 * succeeds, then power managment is supported and we can 5278 * enable auto-pm. 5279 */ 5280 if (rval == 0) { 5281 (void) sd_create_pm_components(devi, un); 5282 } else { 5283 un->un_power_level = SD_SPINDLE_ON; 5284 un->un_f_pm_is_enabled = FALSE; 5285 } 5286 5287 kmem_free(log_page_data, log_page_size); 5288 } 5289 } 5290 5291 5292 /* 5293 * Function: sd_create_pm_components 5294 * 5295 * Description: Initialize PM property. 5296 * 5297 * Context: Kernel thread context 5298 */ 5299 5300 static void 5301 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5302 { 5303 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5304 5305 ASSERT(!mutex_owned(SD_MUTEX(un))); 5306 5307 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5308 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5309 /* 5310 * When components are initially created they are idle, 5311 * power up any non-removables. 5312 * Note: the return value of pm_raise_power can't be used 5313 * for determining if PM should be enabled for this device. 5314 * Even if you check the return values and remove this 5315 * property created above, the PM framework will not honor the 5316 * change after the first call to pm_raise_power. Hence, 5317 * removal of that property does not help if pm_raise_power 5318 * fails. In the case of removable media, the start/stop 5319 * will fail if the media is not present. 5320 */ 5321 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5322 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5323 mutex_enter(SD_MUTEX(un)); 5324 un->un_power_level = SD_SPINDLE_ON; 5325 mutex_enter(&un->un_pm_mutex); 5326 /* Set to on and not busy. */ 5327 un->un_pm_count = 0; 5328 } else { 5329 mutex_enter(SD_MUTEX(un)); 5330 un->un_power_level = SD_SPINDLE_OFF; 5331 mutex_enter(&un->un_pm_mutex); 5332 /* Set to off. */ 5333 un->un_pm_count = -1; 5334 } 5335 mutex_exit(&un->un_pm_mutex); 5336 mutex_exit(SD_MUTEX(un)); 5337 } else { 5338 un->un_power_level = SD_SPINDLE_ON; 5339 un->un_f_pm_is_enabled = FALSE; 5340 } 5341 } 5342 5343 5344 /* 5345 * Function: sd_ddi_suspend 5346 * 5347 * Description: Performs system power-down operations. This includes 5348 * setting the drive state to indicate its suspended so 5349 * that no new commands will be accepted. Also, wait for 5350 * all commands that are in transport or queued to a timer 5351 * for retry to complete. All timeout threads are cancelled. 5352 * 5353 * Return Code: DDI_FAILURE or DDI_SUCCESS 5354 * 5355 * Context: Kernel thread context 5356 */ 5357 5358 static int 5359 sd_ddi_suspend(dev_info_t *devi) 5360 { 5361 struct sd_lun *un; 5362 clock_t wait_cmds_complete; 5363 5364 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5365 if (un == NULL) { 5366 return (DDI_FAILURE); 5367 } 5368 5369 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5370 5371 mutex_enter(SD_MUTEX(un)); 5372 5373 /* Return success if the device is already suspended. */ 5374 if (un->un_state == SD_STATE_SUSPENDED) { 5375 mutex_exit(SD_MUTEX(un)); 5376 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5377 "device already suspended, exiting\n"); 5378 return (DDI_SUCCESS); 5379 } 5380 5381 /* Return failure if the device is being used by HA */ 5382 if (un->un_resvd_status & 5383 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5384 mutex_exit(SD_MUTEX(un)); 5385 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5386 "device in use by HA, exiting\n"); 5387 return (DDI_FAILURE); 5388 } 5389 5390 /* 5391 * Return failure if the device is in a resource wait 5392 * or power changing state. 5393 */ 5394 if ((un->un_state == SD_STATE_RWAIT) || 5395 (un->un_state == SD_STATE_PM_CHANGING)) { 5396 mutex_exit(SD_MUTEX(un)); 5397 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5398 "device in resource wait state, exiting\n"); 5399 return (DDI_FAILURE); 5400 } 5401 5402 5403 un->un_save_state = un->un_last_state; 5404 New_state(un, SD_STATE_SUSPENDED); 5405 5406 /* 5407 * Wait for all commands that are in transport or queued to a timer 5408 * for retry to complete. 5409 * 5410 * While waiting, no new commands will be accepted or sent because of 5411 * the new state we set above. 5412 * 5413 * Wait till current operation has completed. If we are in the resource 5414 * wait state (with an intr outstanding) then we need to wait till the 5415 * intr completes and starts the next cmd. We want to wait for 5416 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5417 */ 5418 wait_cmds_complete = ddi_get_lbolt() + 5419 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5420 5421 while (un->un_ncmds_in_transport != 0) { 5422 /* 5423 * Fail if commands do not finish in the specified time. 5424 */ 5425 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5426 wait_cmds_complete) == -1) { 5427 /* 5428 * Undo the state changes made above. Everything 5429 * must go back to it's original value. 5430 */ 5431 Restore_state(un); 5432 un->un_last_state = un->un_save_state; 5433 /* Wake up any threads that might be waiting. */ 5434 cv_broadcast(&un->un_suspend_cv); 5435 mutex_exit(SD_MUTEX(un)); 5436 SD_ERROR(SD_LOG_IO_PM, un, 5437 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5438 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5439 return (DDI_FAILURE); 5440 } 5441 } 5442 5443 /* 5444 * Cancel SCSI watch thread and timeouts, if any are active 5445 */ 5446 5447 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5448 opaque_t temp_token = un->un_swr_token; 5449 mutex_exit(SD_MUTEX(un)); 5450 scsi_watch_suspend(temp_token); 5451 mutex_enter(SD_MUTEX(un)); 5452 } 5453 5454 if (un->un_reset_throttle_timeid != NULL) { 5455 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5456 un->un_reset_throttle_timeid = NULL; 5457 mutex_exit(SD_MUTEX(un)); 5458 (void) untimeout(temp_id); 5459 mutex_enter(SD_MUTEX(un)); 5460 } 5461 5462 if (un->un_dcvb_timeid != NULL) { 5463 timeout_id_t temp_id = un->un_dcvb_timeid; 5464 un->un_dcvb_timeid = NULL; 5465 mutex_exit(SD_MUTEX(un)); 5466 (void) untimeout(temp_id); 5467 mutex_enter(SD_MUTEX(un)); 5468 } 5469 5470 mutex_enter(&un->un_pm_mutex); 5471 if (un->un_pm_timeid != NULL) { 5472 timeout_id_t temp_id = un->un_pm_timeid; 5473 un->un_pm_timeid = NULL; 5474 mutex_exit(&un->un_pm_mutex); 5475 mutex_exit(SD_MUTEX(un)); 5476 (void) untimeout(temp_id); 5477 mutex_enter(SD_MUTEX(un)); 5478 } else { 5479 mutex_exit(&un->un_pm_mutex); 5480 } 5481 5482 if (un->un_retry_timeid != NULL) { 5483 timeout_id_t temp_id = un->un_retry_timeid; 5484 un->un_retry_timeid = NULL; 5485 mutex_exit(SD_MUTEX(un)); 5486 (void) untimeout(temp_id); 5487 mutex_enter(SD_MUTEX(un)); 5488 5489 if (un->un_retry_bp != NULL) { 5490 un->un_retry_bp->av_forw = un->un_waitq_headp; 5491 un->un_waitq_headp = un->un_retry_bp; 5492 if (un->un_waitq_tailp == NULL) { 5493 un->un_waitq_tailp = un->un_retry_bp; 5494 } 5495 un->un_retry_bp = NULL; 5496 un->un_retry_statp = NULL; 5497 } 5498 } 5499 5500 if (un->un_direct_priority_timeid != NULL) { 5501 timeout_id_t temp_id = un->un_direct_priority_timeid; 5502 un->un_direct_priority_timeid = NULL; 5503 mutex_exit(SD_MUTEX(un)); 5504 (void) untimeout(temp_id); 5505 mutex_enter(SD_MUTEX(un)); 5506 } 5507 5508 if (un->un_f_is_fibre == TRUE) { 5509 /* 5510 * Remove callbacks for insert and remove events 5511 */ 5512 if (un->un_insert_event != NULL) { 5513 mutex_exit(SD_MUTEX(un)); 5514 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5515 mutex_enter(SD_MUTEX(un)); 5516 un->un_insert_event = NULL; 5517 } 5518 5519 if (un->un_remove_event != NULL) { 5520 mutex_exit(SD_MUTEX(un)); 5521 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5522 mutex_enter(SD_MUTEX(un)); 5523 un->un_remove_event = NULL; 5524 } 5525 } 5526 5527 mutex_exit(SD_MUTEX(un)); 5528 5529 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5530 5531 return (DDI_SUCCESS); 5532 } 5533 5534 5535 /* 5536 * Function: sd_ddi_pm_suspend 5537 * 5538 * Description: Set the drive state to low power. 5539 * Someone else is required to actually change the drive 5540 * power level. 5541 * 5542 * Arguments: un - driver soft state (unit) structure 5543 * 5544 * Return Code: DDI_FAILURE or DDI_SUCCESS 5545 * 5546 * Context: Kernel thread context 5547 */ 5548 5549 static int 5550 sd_ddi_pm_suspend(struct sd_lun *un) 5551 { 5552 ASSERT(un != NULL); 5553 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5554 5555 ASSERT(!mutex_owned(SD_MUTEX(un))); 5556 mutex_enter(SD_MUTEX(un)); 5557 5558 /* 5559 * Exit if power management is not enabled for this device, or if 5560 * the device is being used by HA. 5561 */ 5562 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5563 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5564 mutex_exit(SD_MUTEX(un)); 5565 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5566 return (DDI_SUCCESS); 5567 } 5568 5569 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5570 un->un_ncmds_in_driver); 5571 5572 /* 5573 * See if the device is not busy, ie.: 5574 * - we have no commands in the driver for this device 5575 * - not waiting for resources 5576 */ 5577 if ((un->un_ncmds_in_driver == 0) && 5578 (un->un_state != SD_STATE_RWAIT)) { 5579 /* 5580 * The device is not busy, so it is OK to go to low power state. 5581 * Indicate low power, but rely on someone else to actually 5582 * change it. 5583 */ 5584 mutex_enter(&un->un_pm_mutex); 5585 un->un_pm_count = -1; 5586 mutex_exit(&un->un_pm_mutex); 5587 un->un_power_level = SD_SPINDLE_OFF; 5588 } 5589 5590 mutex_exit(SD_MUTEX(un)); 5591 5592 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5593 5594 return (DDI_SUCCESS); 5595 } 5596 5597 5598 /* 5599 * Function: sd_ddi_resume 5600 * 5601 * Description: Performs system power-up operations.. 5602 * 5603 * Return Code: DDI_SUCCESS 5604 * DDI_FAILURE 5605 * 5606 * Context: Kernel thread context 5607 */ 5608 5609 static int 5610 sd_ddi_resume(dev_info_t *devi) 5611 { 5612 struct sd_lun *un; 5613 5614 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5615 if (un == NULL) { 5616 return (DDI_FAILURE); 5617 } 5618 5619 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5620 5621 mutex_enter(SD_MUTEX(un)); 5622 Restore_state(un); 5623 5624 /* 5625 * Restore the state which was saved to give the 5626 * the right state in un_last_state 5627 */ 5628 un->un_last_state = un->un_save_state; 5629 /* 5630 * Note: throttle comes back at full. 5631 * Also note: this MUST be done before calling pm_raise_power 5632 * otherwise the system can get hung in biowait. The scenario where 5633 * this'll happen is under cpr suspend. Writing of the system 5634 * state goes through sddump, which writes 0 to un_throttle. If 5635 * writing the system state then fails, example if the partition is 5636 * too small, then cpr attempts a resume. If throttle isn't restored 5637 * from the saved value until after calling pm_raise_power then 5638 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5639 * in biowait. 5640 */ 5641 un->un_throttle = un->un_saved_throttle; 5642 5643 /* 5644 * The chance of failure is very rare as the only command done in power 5645 * entry point is START command when you transition from 0->1 or 5646 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5647 * which suspend was done. Ignore the return value as the resume should 5648 * not be failed. In the case of removable media the media need not be 5649 * inserted and hence there is a chance that raise power will fail with 5650 * media not present. 5651 */ 5652 if (un->un_f_attach_spinup) { 5653 mutex_exit(SD_MUTEX(un)); 5654 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5655 mutex_enter(SD_MUTEX(un)); 5656 } 5657 5658 /* 5659 * Don't broadcast to the suspend cv and therefore possibly 5660 * start I/O until after power has been restored. 5661 */ 5662 cv_broadcast(&un->un_suspend_cv); 5663 cv_broadcast(&un->un_state_cv); 5664 5665 /* restart thread */ 5666 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5667 scsi_watch_resume(un->un_swr_token); 5668 } 5669 5670 #if (defined(__fibre)) 5671 if (un->un_f_is_fibre == TRUE) { 5672 /* 5673 * Add callbacks for insert and remove events 5674 */ 5675 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5676 sd_init_event_callbacks(un); 5677 } 5678 } 5679 #endif 5680 5681 /* 5682 * Transport any pending commands to the target. 5683 * 5684 * If this is a low-activity device commands in queue will have to wait 5685 * until new commands come in, which may take awhile. Also, we 5686 * specifically don't check un_ncmds_in_transport because we know that 5687 * there really are no commands in progress after the unit was 5688 * suspended and we could have reached the throttle level, been 5689 * suspended, and have no new commands coming in for awhile. Highly 5690 * unlikely, but so is the low-activity disk scenario. 5691 */ 5692 ddi_xbuf_dispatch(un->un_xbuf_attr); 5693 5694 sd_start_cmds(un, NULL); 5695 mutex_exit(SD_MUTEX(un)); 5696 5697 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5698 5699 return (DDI_SUCCESS); 5700 } 5701 5702 5703 /* 5704 * Function: sd_ddi_pm_resume 5705 * 5706 * Description: Set the drive state to powered on. 5707 * Someone else is required to actually change the drive 5708 * power level. 5709 * 5710 * Arguments: un - driver soft state (unit) structure 5711 * 5712 * Return Code: DDI_SUCCESS 5713 * 5714 * Context: Kernel thread context 5715 */ 5716 5717 static int 5718 sd_ddi_pm_resume(struct sd_lun *un) 5719 { 5720 ASSERT(un != NULL); 5721 5722 ASSERT(!mutex_owned(SD_MUTEX(un))); 5723 mutex_enter(SD_MUTEX(un)); 5724 un->un_power_level = SD_SPINDLE_ON; 5725 5726 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5727 mutex_enter(&un->un_pm_mutex); 5728 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5729 un->un_pm_count++; 5730 ASSERT(un->un_pm_count == 0); 5731 /* 5732 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5733 * un_suspend_cv is for a system resume, not a power management 5734 * device resume. (4297749) 5735 * cv_broadcast(&un->un_suspend_cv); 5736 */ 5737 } 5738 mutex_exit(&un->un_pm_mutex); 5739 mutex_exit(SD_MUTEX(un)); 5740 5741 return (DDI_SUCCESS); 5742 } 5743 5744 5745 /* 5746 * Function: sd_pm_idletimeout_handler 5747 * 5748 * Description: A timer routine that's active only while a device is busy. 5749 * The purpose is to extend slightly the pm framework's busy 5750 * view of the device to prevent busy/idle thrashing for 5751 * back-to-back commands. Do this by comparing the current time 5752 * to the time at which the last command completed and when the 5753 * difference is greater than sd_pm_idletime, call 5754 * pm_idle_component. In addition to indicating idle to the pm 5755 * framework, update the chain type to again use the internal pm 5756 * layers of the driver. 5757 * 5758 * Arguments: arg - driver soft state (unit) structure 5759 * 5760 * Context: Executes in a timeout(9F) thread context 5761 */ 5762 5763 static void 5764 sd_pm_idletimeout_handler(void *arg) 5765 { 5766 struct sd_lun *un = arg; 5767 5768 time_t now; 5769 5770 mutex_enter(&sd_detach_mutex); 5771 if (un->un_detach_count != 0) { 5772 /* Abort if the instance is detaching */ 5773 mutex_exit(&sd_detach_mutex); 5774 return; 5775 } 5776 mutex_exit(&sd_detach_mutex); 5777 5778 now = ddi_get_time(); 5779 /* 5780 * Grab both mutexes, in the proper order, since we're accessing 5781 * both PM and softstate variables. 5782 */ 5783 mutex_enter(SD_MUTEX(un)); 5784 mutex_enter(&un->un_pm_mutex); 5785 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5786 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5787 /* 5788 * Update the chain types. 5789 * This takes affect on the next new command received. 5790 */ 5791 if (un->un_f_non_devbsize_supported) { 5792 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5793 } else { 5794 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5795 } 5796 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5797 5798 SD_TRACE(SD_LOG_IO_PM, un, 5799 "sd_pm_idletimeout_handler: idling device\n"); 5800 (void) pm_idle_component(SD_DEVINFO(un), 0); 5801 un->un_pm_idle_timeid = NULL; 5802 } else { 5803 un->un_pm_idle_timeid = 5804 timeout(sd_pm_idletimeout_handler, un, 5805 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5806 } 5807 mutex_exit(&un->un_pm_mutex); 5808 mutex_exit(SD_MUTEX(un)); 5809 } 5810 5811 5812 /* 5813 * Function: sd_pm_timeout_handler 5814 * 5815 * Description: Callback to tell framework we are idle. 5816 * 5817 * Context: timeout(9f) thread context. 5818 */ 5819 5820 static void 5821 sd_pm_timeout_handler(void *arg) 5822 { 5823 struct sd_lun *un = arg; 5824 5825 (void) pm_idle_component(SD_DEVINFO(un), 0); 5826 mutex_enter(&un->un_pm_mutex); 5827 un->un_pm_timeid = NULL; 5828 mutex_exit(&un->un_pm_mutex); 5829 } 5830 5831 5832 /* 5833 * Function: sdpower 5834 * 5835 * Description: PM entry point. 5836 * 5837 * Return Code: DDI_SUCCESS 5838 * DDI_FAILURE 5839 * 5840 * Context: Kernel thread context 5841 */ 5842 5843 static int 5844 sdpower(dev_info_t *devi, int component, int level) 5845 { 5846 struct sd_lun *un; 5847 int instance; 5848 int rval = DDI_SUCCESS; 5849 uint_t i, log_page_size, maxcycles, ncycles; 5850 uchar_t *log_page_data; 5851 int log_sense_page; 5852 int medium_present; 5853 time_t intvlp; 5854 dev_t dev; 5855 struct pm_trans_data sd_pm_tran_data; 5856 uchar_t save_state; 5857 int sval; 5858 uchar_t state_before_pm; 5859 int got_semaphore_here; 5860 5861 instance = ddi_get_instance(devi); 5862 5863 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5864 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5865 component != 0) { 5866 return (DDI_FAILURE); 5867 } 5868 5869 dev = sd_make_device(SD_DEVINFO(un)); 5870 5871 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5872 5873 /* 5874 * Must synchronize power down with close. 5875 * Attempt to decrement/acquire the open/close semaphore, 5876 * but do NOT wait on it. If it's not greater than zero, 5877 * ie. it can't be decremented without waiting, then 5878 * someone else, either open or close, already has it 5879 * and the try returns 0. Use that knowledge here to determine 5880 * if it's OK to change the device power level. 5881 * Also, only increment it on exit if it was decremented, ie. gotten, 5882 * here. 5883 */ 5884 got_semaphore_here = sema_tryp(&un->un_semoclose); 5885 5886 mutex_enter(SD_MUTEX(un)); 5887 5888 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5889 un->un_ncmds_in_driver); 5890 5891 /* 5892 * If un_ncmds_in_driver is non-zero it indicates commands are 5893 * already being processed in the driver, or if the semaphore was 5894 * not gotten here it indicates an open or close is being processed. 5895 * At the same time somebody is requesting to go low power which 5896 * can't happen, therefore we need to return failure. 5897 */ 5898 if ((level == SD_SPINDLE_OFF) && 5899 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5900 mutex_exit(SD_MUTEX(un)); 5901 5902 if (got_semaphore_here != 0) { 5903 sema_v(&un->un_semoclose); 5904 } 5905 SD_TRACE(SD_LOG_IO_PM, un, 5906 "sdpower: exit, device has queued cmds.\n"); 5907 return (DDI_FAILURE); 5908 } 5909 5910 /* 5911 * if it is OFFLINE that means the disk is completely dead 5912 * in our case we have to put the disk in on or off by sending commands 5913 * Of course that will fail anyway so return back here. 5914 * 5915 * Power changes to a device that's OFFLINE or SUSPENDED 5916 * are not allowed. 5917 */ 5918 if ((un->un_state == SD_STATE_OFFLINE) || 5919 (un->un_state == SD_STATE_SUSPENDED)) { 5920 mutex_exit(SD_MUTEX(un)); 5921 5922 if (got_semaphore_here != 0) { 5923 sema_v(&un->un_semoclose); 5924 } 5925 SD_TRACE(SD_LOG_IO_PM, un, 5926 "sdpower: exit, device is off-line.\n"); 5927 return (DDI_FAILURE); 5928 } 5929 5930 /* 5931 * Change the device's state to indicate it's power level 5932 * is being changed. Do this to prevent a power off in the 5933 * middle of commands, which is especially bad on devices 5934 * that are really powered off instead of just spun down. 5935 */ 5936 state_before_pm = un->un_state; 5937 un->un_state = SD_STATE_PM_CHANGING; 5938 5939 mutex_exit(SD_MUTEX(un)); 5940 5941 /* 5942 * If "pm-capable" property is set to TRUE by HBA drivers, 5943 * bypass the following checking, otherwise, check the log 5944 * sense information for this device 5945 */ 5946 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5947 /* 5948 * Get the log sense information to understand whether the 5949 * the powercycle counts have gone beyond the threshhold. 5950 */ 5951 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5952 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5953 5954 mutex_enter(SD_MUTEX(un)); 5955 log_sense_page = un->un_start_stop_cycle_page; 5956 mutex_exit(SD_MUTEX(un)); 5957 5958 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5959 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5960 #ifdef SDDEBUG 5961 if (sd_force_pm_supported) { 5962 /* Force a successful result */ 5963 rval = 0; 5964 } 5965 #endif 5966 if (rval != 0) { 5967 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5968 "Log Sense Failed\n"); 5969 kmem_free(log_page_data, log_page_size); 5970 /* Cannot support power management on those drives */ 5971 5972 if (got_semaphore_here != 0) { 5973 sema_v(&un->un_semoclose); 5974 } 5975 /* 5976 * On exit put the state back to it's original value 5977 * and broadcast to anyone waiting for the power 5978 * change completion. 5979 */ 5980 mutex_enter(SD_MUTEX(un)); 5981 un->un_state = state_before_pm; 5982 cv_broadcast(&un->un_suspend_cv); 5983 mutex_exit(SD_MUTEX(un)); 5984 SD_TRACE(SD_LOG_IO_PM, un, 5985 "sdpower: exit, Log Sense Failed.\n"); 5986 return (DDI_FAILURE); 5987 } 5988 5989 /* 5990 * From the page data - Convert the essential information to 5991 * pm_trans_data 5992 */ 5993 maxcycles = 5994 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5995 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5996 5997 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5998 5999 ncycles = 6000 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6001 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6002 6003 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6004 6005 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6006 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6007 log_page_data[8+i]; 6008 } 6009 6010 kmem_free(log_page_data, log_page_size); 6011 6012 /* 6013 * Call pm_trans_check routine to get the Ok from 6014 * the global policy 6015 */ 6016 6017 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6018 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6019 6020 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6021 #ifdef SDDEBUG 6022 if (sd_force_pm_supported) { 6023 /* Force a successful result */ 6024 rval = 1; 6025 } 6026 #endif 6027 switch (rval) { 6028 case 0: 6029 /* 6030 * Not Ok to Power cycle or error in parameters passed 6031 * Would have given the advised time to consider power 6032 * cycle. Based on the new intvlp parameter we are 6033 * supposed to pretend we are busy so that pm framework 6034 * will never call our power entry point. Because of 6035 * that install a timeout handler and wait for the 6036 * recommended time to elapse so that power management 6037 * can be effective again. 6038 * 6039 * To effect this behavior, call pm_busy_component to 6040 * indicate to the framework this device is busy. 6041 * By not adjusting un_pm_count the rest of PM in 6042 * the driver will function normally, and independant 6043 * of this but because the framework is told the device 6044 * is busy it won't attempt powering down until it gets 6045 * a matching idle. The timeout handler sends this. 6046 * Note: sd_pm_entry can't be called here to do this 6047 * because sdpower may have been called as a result 6048 * of a call to pm_raise_power from within sd_pm_entry. 6049 * 6050 * If a timeout handler is already active then 6051 * don't install another. 6052 */ 6053 mutex_enter(&un->un_pm_mutex); 6054 if (un->un_pm_timeid == NULL) { 6055 un->un_pm_timeid = 6056 timeout(sd_pm_timeout_handler, 6057 un, intvlp * drv_usectohz(1000000)); 6058 mutex_exit(&un->un_pm_mutex); 6059 (void) pm_busy_component(SD_DEVINFO(un), 0); 6060 } else { 6061 mutex_exit(&un->un_pm_mutex); 6062 } 6063 if (got_semaphore_here != 0) { 6064 sema_v(&un->un_semoclose); 6065 } 6066 /* 6067 * On exit put the state back to it's original value 6068 * and broadcast to anyone waiting for the power 6069 * change completion. 6070 */ 6071 mutex_enter(SD_MUTEX(un)); 6072 un->un_state = state_before_pm; 6073 cv_broadcast(&un->un_suspend_cv); 6074 mutex_exit(SD_MUTEX(un)); 6075 6076 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6077 "trans check Failed, not ok to power cycle.\n"); 6078 return (DDI_FAILURE); 6079 6080 case -1: 6081 if (got_semaphore_here != 0) { 6082 sema_v(&un->un_semoclose); 6083 } 6084 /* 6085 * On exit put the state back to it's original value 6086 * and broadcast to anyone waiting for the power 6087 * change completion. 6088 */ 6089 mutex_enter(SD_MUTEX(un)); 6090 un->un_state = state_before_pm; 6091 cv_broadcast(&un->un_suspend_cv); 6092 mutex_exit(SD_MUTEX(un)); 6093 SD_TRACE(SD_LOG_IO_PM, un, 6094 "sdpower: exit, trans check command Failed.\n"); 6095 return (DDI_FAILURE); 6096 } 6097 } 6098 6099 if (level == SD_SPINDLE_OFF) { 6100 /* 6101 * Save the last state... if the STOP FAILS we need it 6102 * for restoring 6103 */ 6104 mutex_enter(SD_MUTEX(un)); 6105 save_state = un->un_last_state; 6106 /* 6107 * There must not be any cmds. getting processed 6108 * in the driver when we get here. Power to the 6109 * device is potentially going off. 6110 */ 6111 ASSERT(un->un_ncmds_in_driver == 0); 6112 mutex_exit(SD_MUTEX(un)); 6113 6114 /* 6115 * For now suspend the device completely before spindle is 6116 * turned off 6117 */ 6118 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6119 if (got_semaphore_here != 0) { 6120 sema_v(&un->un_semoclose); 6121 } 6122 /* 6123 * On exit put the state back to it's original value 6124 * and broadcast to anyone waiting for the power 6125 * change completion. 6126 */ 6127 mutex_enter(SD_MUTEX(un)); 6128 un->un_state = state_before_pm; 6129 cv_broadcast(&un->un_suspend_cv); 6130 mutex_exit(SD_MUTEX(un)); 6131 SD_TRACE(SD_LOG_IO_PM, un, 6132 "sdpower: exit, PM suspend Failed.\n"); 6133 return (DDI_FAILURE); 6134 } 6135 } 6136 6137 /* 6138 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6139 * close, or strategy. Dump no long uses this routine, it uses it's 6140 * own code so it can be done in polled mode. 6141 */ 6142 6143 medium_present = TRUE; 6144 6145 /* 6146 * When powering up, issue a TUR in case the device is at unit 6147 * attention. Don't do retries. Bypass the PM layer, otherwise 6148 * a deadlock on un_pm_busy_cv will occur. 6149 */ 6150 if (level == SD_SPINDLE_ON) { 6151 (void) sd_send_scsi_TEST_UNIT_READY(un, 6152 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6153 } 6154 6155 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6156 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6157 6158 sval = sd_send_scsi_START_STOP_UNIT(un, 6159 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6160 SD_PATH_DIRECT); 6161 /* Command failed, check for media present. */ 6162 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6163 medium_present = FALSE; 6164 } 6165 6166 /* 6167 * The conditions of interest here are: 6168 * if a spindle off with media present fails, 6169 * then restore the state and return an error. 6170 * else if a spindle on fails, 6171 * then return an error (there's no state to restore). 6172 * In all other cases we setup for the new state 6173 * and return success. 6174 */ 6175 switch (level) { 6176 case SD_SPINDLE_OFF: 6177 if ((medium_present == TRUE) && (sval != 0)) { 6178 /* The stop command from above failed */ 6179 rval = DDI_FAILURE; 6180 /* 6181 * The stop command failed, and we have media 6182 * present. Put the level back by calling the 6183 * sd_pm_resume() and set the state back to 6184 * it's previous value. 6185 */ 6186 (void) sd_ddi_pm_resume(un); 6187 mutex_enter(SD_MUTEX(un)); 6188 un->un_last_state = save_state; 6189 mutex_exit(SD_MUTEX(un)); 6190 break; 6191 } 6192 /* 6193 * The stop command from above succeeded. 6194 */ 6195 if (un->un_f_monitor_media_state) { 6196 /* 6197 * Terminate watch thread in case of removable media 6198 * devices going into low power state. This is as per 6199 * the requirements of pm framework, otherwise commands 6200 * will be generated for the device (through watch 6201 * thread), even when the device is in low power state. 6202 */ 6203 mutex_enter(SD_MUTEX(un)); 6204 un->un_f_watcht_stopped = FALSE; 6205 if (un->un_swr_token != NULL) { 6206 opaque_t temp_token = un->un_swr_token; 6207 un->un_f_watcht_stopped = TRUE; 6208 un->un_swr_token = NULL; 6209 mutex_exit(SD_MUTEX(un)); 6210 (void) scsi_watch_request_terminate(temp_token, 6211 SCSI_WATCH_TERMINATE_WAIT); 6212 } else { 6213 mutex_exit(SD_MUTEX(un)); 6214 } 6215 } 6216 break; 6217 6218 default: /* The level requested is spindle on... */ 6219 /* 6220 * Legacy behavior: return success on a failed spinup 6221 * if there is no media in the drive. 6222 * Do this by looking at medium_present here. 6223 */ 6224 if ((sval != 0) && medium_present) { 6225 /* The start command from above failed */ 6226 rval = DDI_FAILURE; 6227 break; 6228 } 6229 /* 6230 * The start command from above succeeded 6231 * Resume the devices now that we have 6232 * started the disks 6233 */ 6234 (void) sd_ddi_pm_resume(un); 6235 6236 /* 6237 * Resume the watch thread since it was suspended 6238 * when the device went into low power mode. 6239 */ 6240 if (un->un_f_monitor_media_state) { 6241 mutex_enter(SD_MUTEX(un)); 6242 if (un->un_f_watcht_stopped == TRUE) { 6243 opaque_t temp_token; 6244 6245 un->un_f_watcht_stopped = FALSE; 6246 mutex_exit(SD_MUTEX(un)); 6247 temp_token = scsi_watch_request_submit( 6248 SD_SCSI_DEVP(un), 6249 sd_check_media_time, 6250 SENSE_LENGTH, sd_media_watch_cb, 6251 (caddr_t)dev); 6252 mutex_enter(SD_MUTEX(un)); 6253 un->un_swr_token = temp_token; 6254 } 6255 mutex_exit(SD_MUTEX(un)); 6256 } 6257 } 6258 if (got_semaphore_here != 0) { 6259 sema_v(&un->un_semoclose); 6260 } 6261 /* 6262 * On exit put the state back to it's original value 6263 * and broadcast to anyone waiting for the power 6264 * change completion. 6265 */ 6266 mutex_enter(SD_MUTEX(un)); 6267 un->un_state = state_before_pm; 6268 cv_broadcast(&un->un_suspend_cv); 6269 mutex_exit(SD_MUTEX(un)); 6270 6271 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6272 6273 return (rval); 6274 } 6275 6276 6277 6278 /* 6279 * Function: sdattach 6280 * 6281 * Description: Driver's attach(9e) entry point function. 6282 * 6283 * Arguments: devi - opaque device info handle 6284 * cmd - attach type 6285 * 6286 * Return Code: DDI_SUCCESS 6287 * DDI_FAILURE 6288 * 6289 * Context: Kernel thread context 6290 */ 6291 6292 static int 6293 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6294 { 6295 switch (cmd) { 6296 case DDI_ATTACH: 6297 return (sd_unit_attach(devi)); 6298 case DDI_RESUME: 6299 return (sd_ddi_resume(devi)); 6300 default: 6301 break; 6302 } 6303 return (DDI_FAILURE); 6304 } 6305 6306 6307 /* 6308 * Function: sddetach 6309 * 6310 * Description: Driver's detach(9E) entry point function. 6311 * 6312 * Arguments: devi - opaque device info handle 6313 * cmd - detach type 6314 * 6315 * Return Code: DDI_SUCCESS 6316 * DDI_FAILURE 6317 * 6318 * Context: Kernel thread context 6319 */ 6320 6321 static int 6322 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6323 { 6324 switch (cmd) { 6325 case DDI_DETACH: 6326 return (sd_unit_detach(devi)); 6327 case DDI_SUSPEND: 6328 return (sd_ddi_suspend(devi)); 6329 default: 6330 break; 6331 } 6332 return (DDI_FAILURE); 6333 } 6334 6335 6336 /* 6337 * Function: sd_sync_with_callback 6338 * 6339 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6340 * state while the callback routine is active. 6341 * 6342 * Arguments: un: softstate structure for the instance 6343 * 6344 * Context: Kernel thread context 6345 */ 6346 6347 static void 6348 sd_sync_with_callback(struct sd_lun *un) 6349 { 6350 ASSERT(un != NULL); 6351 6352 mutex_enter(SD_MUTEX(un)); 6353 6354 ASSERT(un->un_in_callback >= 0); 6355 6356 while (un->un_in_callback > 0) { 6357 mutex_exit(SD_MUTEX(un)); 6358 delay(2); 6359 mutex_enter(SD_MUTEX(un)); 6360 } 6361 6362 mutex_exit(SD_MUTEX(un)); 6363 } 6364 6365 /* 6366 * Function: sd_unit_attach 6367 * 6368 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6369 * the soft state structure for the device and performs 6370 * all necessary structure and device initializations. 6371 * 6372 * Arguments: devi: the system's dev_info_t for the device. 6373 * 6374 * Return Code: DDI_SUCCESS if attach is successful. 6375 * DDI_FAILURE if any part of the attach fails. 6376 * 6377 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6378 * Kernel thread context only. Can sleep. 6379 */ 6380 6381 static int 6382 sd_unit_attach(dev_info_t *devi) 6383 { 6384 struct scsi_device *devp; 6385 struct sd_lun *un; 6386 char *variantp; 6387 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6388 int instance; 6389 int rval; 6390 int wc_enabled; 6391 int tgt; 6392 uint64_t capacity; 6393 uint_t lbasize = 0; 6394 dev_info_t *pdip = ddi_get_parent(devi); 6395 int offbyone = 0; 6396 int geom_label_valid = 0; 6397 #if defined(__sparc) 6398 int max_xfer_size; 6399 #endif 6400 6401 /* 6402 * Retrieve the target driver's private data area. This was set 6403 * up by the HBA. 6404 */ 6405 devp = ddi_get_driver_private(devi); 6406 6407 /* 6408 * Retrieve the target ID of the device. 6409 */ 6410 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6411 SCSI_ADDR_PROP_TARGET, -1); 6412 6413 /* 6414 * Since we have no idea what state things were left in by the last 6415 * user of the device, set up some 'default' settings, ie. turn 'em 6416 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6417 * Do this before the scsi_probe, which sends an inquiry. 6418 * This is a fix for bug (4430280). 6419 * Of special importance is wide-xfer. The drive could have been left 6420 * in wide transfer mode by the last driver to communicate with it, 6421 * this includes us. If that's the case, and if the following is not 6422 * setup properly or we don't re-negotiate with the drive prior to 6423 * transferring data to/from the drive, it causes bus parity errors, 6424 * data overruns, and unexpected interrupts. This first occurred when 6425 * the fix for bug (4378686) was made. 6426 */ 6427 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6428 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6429 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6430 6431 /* 6432 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6433 * on a target. Setting it per lun instance actually sets the 6434 * capability of this target, which affects those luns already 6435 * attached on the same target. So during attach, we can only disable 6436 * this capability only when no other lun has been attached on this 6437 * target. By doing this, we assume a target has the same tagged-qing 6438 * capability for every lun. The condition can be removed when HBA 6439 * is changed to support per lun based tagged-qing capability. 6440 */ 6441 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6442 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6443 } 6444 6445 /* 6446 * Use scsi_probe() to issue an INQUIRY command to the device. 6447 * This call will allocate and fill in the scsi_inquiry structure 6448 * and point the sd_inq member of the scsi_device structure to it. 6449 * If the attach succeeds, then this memory will not be de-allocated 6450 * (via scsi_unprobe()) until the instance is detached. 6451 */ 6452 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6453 goto probe_failed; 6454 } 6455 6456 /* 6457 * Check the device type as specified in the inquiry data and 6458 * claim it if it is of a type that we support. 6459 */ 6460 switch (devp->sd_inq->inq_dtype) { 6461 case DTYPE_DIRECT: 6462 break; 6463 case DTYPE_RODIRECT: 6464 break; 6465 case DTYPE_OPTICAL: 6466 break; 6467 case DTYPE_NOTPRESENT: 6468 default: 6469 /* Unsupported device type; fail the attach. */ 6470 goto probe_failed; 6471 } 6472 6473 /* 6474 * Allocate the soft state structure for this unit. 6475 * 6476 * We rely upon this memory being set to all zeroes by 6477 * ddi_soft_state_zalloc(). We assume that any member of the 6478 * soft state structure that is not explicitly initialized by 6479 * this routine will have a value of zero. 6480 */ 6481 instance = ddi_get_instance(devp->sd_dev); 6482 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6483 goto probe_failed; 6484 } 6485 6486 /* 6487 * Retrieve a pointer to the newly-allocated soft state. 6488 * 6489 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6490 * was successful, unless something has gone horribly wrong and the 6491 * ddi's soft state internals are corrupt (in which case it is 6492 * probably better to halt here than just fail the attach....) 6493 */ 6494 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6495 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6496 instance); 6497 /*NOTREACHED*/ 6498 } 6499 6500 /* 6501 * Link the back ptr of the driver soft state to the scsi_device 6502 * struct for this lun. 6503 * Save a pointer to the softstate in the driver-private area of 6504 * the scsi_device struct. 6505 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6506 * we first set un->un_sd below. 6507 */ 6508 un->un_sd = devp; 6509 devp->sd_private = (opaque_t)un; 6510 6511 /* 6512 * The following must be after devp is stored in the soft state struct. 6513 */ 6514 #ifdef SDDEBUG 6515 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6516 "%s_unit_attach: un:0x%p instance:%d\n", 6517 ddi_driver_name(devi), un, instance); 6518 #endif 6519 6520 /* 6521 * Set up the device type and node type (for the minor nodes). 6522 * By default we assume that the device can at least support the 6523 * Common Command Set. Call it a CD-ROM if it reports itself 6524 * as a RODIRECT device. 6525 */ 6526 switch (devp->sd_inq->inq_dtype) { 6527 case DTYPE_RODIRECT: 6528 un->un_node_type = DDI_NT_CD_CHAN; 6529 un->un_ctype = CTYPE_CDROM; 6530 break; 6531 case DTYPE_OPTICAL: 6532 un->un_node_type = DDI_NT_BLOCK_CHAN; 6533 un->un_ctype = CTYPE_ROD; 6534 break; 6535 default: 6536 un->un_node_type = DDI_NT_BLOCK_CHAN; 6537 un->un_ctype = CTYPE_CCS; 6538 break; 6539 } 6540 6541 /* 6542 * Try to read the interconnect type from the HBA. 6543 * 6544 * Note: This driver is currently compiled as two binaries, a parallel 6545 * scsi version (sd) and a fibre channel version (ssd). All functional 6546 * differences are determined at compile time. In the future a single 6547 * binary will be provided and the inteconnect type will be used to 6548 * differentiate between fibre and parallel scsi behaviors. At that time 6549 * it will be necessary for all fibre channel HBAs to support this 6550 * property. 6551 * 6552 * set un_f_is_fiber to TRUE ( default fiber ) 6553 */ 6554 un->un_f_is_fibre = TRUE; 6555 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6556 case INTERCONNECT_SSA: 6557 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6559 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6560 break; 6561 case INTERCONNECT_PARALLEL: 6562 un->un_f_is_fibre = FALSE; 6563 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6564 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6565 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6566 break; 6567 case INTERCONNECT_SATA: 6568 un->un_f_is_fibre = FALSE; 6569 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6570 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6571 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6572 break; 6573 case INTERCONNECT_FIBRE: 6574 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6575 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6576 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6577 break; 6578 case INTERCONNECT_FABRIC: 6579 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6580 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6581 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6582 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6583 break; 6584 default: 6585 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6586 /* 6587 * The HBA does not support the "interconnect-type" property 6588 * (or did not provide a recognized type). 6589 * 6590 * Note: This will be obsoleted when a single fibre channel 6591 * and parallel scsi driver is delivered. In the meantime the 6592 * interconnect type will be set to the platform default.If that 6593 * type is not parallel SCSI, it means that we should be 6594 * assuming "ssd" semantics. However, here this also means that 6595 * the FC HBA is not supporting the "interconnect-type" property 6596 * like we expect it to, so log this occurrence. 6597 */ 6598 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6599 if (!SD_IS_PARALLEL_SCSI(un)) { 6600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6601 "sd_unit_attach: un:0x%p Assuming " 6602 "INTERCONNECT_FIBRE\n", un); 6603 } else { 6604 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6605 "sd_unit_attach: un:0x%p Assuming " 6606 "INTERCONNECT_PARALLEL\n", un); 6607 un->un_f_is_fibre = FALSE; 6608 } 6609 #else 6610 /* 6611 * Note: This source will be implemented when a single fibre 6612 * channel and parallel scsi driver is delivered. The default 6613 * will be to assume that if a device does not support the 6614 * "interconnect-type" property it is a parallel SCSI HBA and 6615 * we will set the interconnect type for parallel scsi. 6616 */ 6617 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6618 un->un_f_is_fibre = FALSE; 6619 #endif 6620 break; 6621 } 6622 6623 if (un->un_f_is_fibre == TRUE) { 6624 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6625 SCSI_VERSION_3) { 6626 switch (un->un_interconnect_type) { 6627 case SD_INTERCONNECT_FIBRE: 6628 case SD_INTERCONNECT_SSA: 6629 un->un_node_type = DDI_NT_BLOCK_WWN; 6630 break; 6631 default: 6632 break; 6633 } 6634 } 6635 } 6636 6637 /* 6638 * Initialize the Request Sense command for the target 6639 */ 6640 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6641 goto alloc_rqs_failed; 6642 } 6643 6644 /* 6645 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6646 * with separate binary for sd and ssd. 6647 * 6648 * x86 has 1 binary, un_retry_count is set base on connection type. 6649 * The hardcoded values will go away when Sparc uses 1 binary 6650 * for sd and ssd. This hardcoded values need to match 6651 * SD_RETRY_COUNT in sddef.h 6652 * The value used is base on interconnect type. 6653 * fibre = 3, parallel = 5 6654 */ 6655 #if defined(__i386) || defined(__amd64) 6656 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6657 #else 6658 un->un_retry_count = SD_RETRY_COUNT; 6659 #endif 6660 6661 /* 6662 * Set the per disk retry count to the default number of retries 6663 * for disks and CDROMs. This value can be overridden by the 6664 * disk property list or an entry in sd.conf. 6665 */ 6666 un->un_notready_retry_count = 6667 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6668 : DISK_NOT_READY_RETRY_COUNT(un); 6669 6670 /* 6671 * Set the busy retry count to the default value of un_retry_count. 6672 * This can be overridden by entries in sd.conf or the device 6673 * config table. 6674 */ 6675 un->un_busy_retry_count = un->un_retry_count; 6676 6677 /* 6678 * Init the reset threshold for retries. This number determines 6679 * how many retries must be performed before a reset can be issued 6680 * (for certain error conditions). This can be overridden by entries 6681 * in sd.conf or the device config table. 6682 */ 6683 un->un_reset_retry_count = (un->un_retry_count / 2); 6684 6685 /* 6686 * Set the victim_retry_count to the default un_retry_count 6687 */ 6688 un->un_victim_retry_count = (2 * un->un_retry_count); 6689 6690 /* 6691 * Set the reservation release timeout to the default value of 6692 * 5 seconds. This can be overridden by entries in ssd.conf or the 6693 * device config table. 6694 */ 6695 un->un_reserve_release_time = 5; 6696 6697 /* 6698 * Set up the default maximum transfer size. Note that this may 6699 * get updated later in the attach, when setting up default wide 6700 * operations for disks. 6701 */ 6702 #if defined(__i386) || defined(__amd64) 6703 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6704 un->un_partial_dma_supported = 1; 6705 #else 6706 un->un_max_xfer_size = (uint_t)maxphys; 6707 #endif 6708 6709 /* 6710 * Get "allow bus device reset" property (defaults to "enabled" if 6711 * the property was not defined). This is to disable bus resets for 6712 * certain kinds of error recovery. Note: In the future when a run-time 6713 * fibre check is available the soft state flag should default to 6714 * enabled. 6715 */ 6716 if (un->un_f_is_fibre == TRUE) { 6717 un->un_f_allow_bus_device_reset = TRUE; 6718 } else { 6719 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6720 "allow-bus-device-reset", 1) != 0) { 6721 un->un_f_allow_bus_device_reset = TRUE; 6722 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6723 "sd_unit_attach: un:0x%p Bus device reset " 6724 "enabled\n", un); 6725 } else { 6726 un->un_f_allow_bus_device_reset = FALSE; 6727 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6728 "sd_unit_attach: un:0x%p Bus device reset " 6729 "disabled\n", un); 6730 } 6731 } 6732 6733 /* 6734 * Check if this is an ATAPI device. ATAPI devices use Group 1 6735 * Read/Write commands and Group 2 Mode Sense/Select commands. 6736 * 6737 * Note: The "obsolete" way of doing this is to check for the "atapi" 6738 * property. The new "variant" property with a value of "atapi" has been 6739 * introduced so that future 'variants' of standard SCSI behavior (like 6740 * atapi) could be specified by the underlying HBA drivers by supplying 6741 * a new value for the "variant" property, instead of having to define a 6742 * new property. 6743 */ 6744 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6745 un->un_f_cfg_is_atapi = TRUE; 6746 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6747 "sd_unit_attach: un:0x%p Atapi device\n", un); 6748 } 6749 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6750 &variantp) == DDI_PROP_SUCCESS) { 6751 if (strcmp(variantp, "atapi") == 0) { 6752 un->un_f_cfg_is_atapi = TRUE; 6753 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6754 "sd_unit_attach: un:0x%p Atapi device\n", un); 6755 } 6756 ddi_prop_free(variantp); 6757 } 6758 6759 un->un_cmd_timeout = SD_IO_TIME; 6760 6761 /* Info on current states, statuses, etc. (Updated frequently) */ 6762 un->un_state = SD_STATE_NORMAL; 6763 un->un_last_state = SD_STATE_NORMAL; 6764 6765 /* Control & status info for command throttling */ 6766 un->un_throttle = sd_max_throttle; 6767 un->un_saved_throttle = sd_max_throttle; 6768 un->un_min_throttle = sd_min_throttle; 6769 6770 if (un->un_f_is_fibre == TRUE) { 6771 un->un_f_use_adaptive_throttle = TRUE; 6772 } else { 6773 un->un_f_use_adaptive_throttle = FALSE; 6774 } 6775 6776 /* Removable media support. */ 6777 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6778 un->un_mediastate = DKIO_NONE; 6779 un->un_specified_mediastate = DKIO_NONE; 6780 6781 /* CVs for suspend/resume (PM or DR) */ 6782 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6783 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6784 6785 /* Power management support. */ 6786 un->un_power_level = SD_SPINDLE_UNINIT; 6787 6788 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6789 un->un_f_wcc_inprog = 0; 6790 6791 /* 6792 * The open/close semaphore is used to serialize threads executing 6793 * in the driver's open & close entry point routines for a given 6794 * instance. 6795 */ 6796 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6797 6798 /* 6799 * The conf file entry and softstate variable is a forceful override, 6800 * meaning a non-zero value must be entered to change the default. 6801 */ 6802 un->un_f_disksort_disabled = FALSE; 6803 6804 /* 6805 * Retrieve the properties from the static driver table or the driver 6806 * configuration file (.conf) for this unit and update the soft state 6807 * for the device as needed for the indicated properties. 6808 * Note: the property configuration needs to occur here as some of the 6809 * following routines may have dependancies on soft state flags set 6810 * as part of the driver property configuration. 6811 */ 6812 sd_read_unit_properties(un); 6813 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6814 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6815 6816 /* 6817 * Only if a device has "hotpluggable" property, it is 6818 * treated as hotpluggable device. Otherwise, it is 6819 * regarded as non-hotpluggable one. 6820 */ 6821 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6822 -1) != -1) { 6823 un->un_f_is_hotpluggable = TRUE; 6824 } 6825 6826 /* 6827 * set unit's attributes(flags) according to "hotpluggable" and 6828 * RMB bit in INQUIRY data. 6829 */ 6830 sd_set_unit_attributes(un, devi); 6831 6832 /* 6833 * By default, we mark the capacity, lbasize, and geometry 6834 * as invalid. Only if we successfully read a valid capacity 6835 * will we update the un_blockcount and un_tgt_blocksize with the 6836 * valid values (the geometry will be validated later). 6837 */ 6838 un->un_f_blockcount_is_valid = FALSE; 6839 un->un_f_tgt_blocksize_is_valid = FALSE; 6840 6841 /* 6842 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6843 * otherwise. 6844 */ 6845 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6846 un->un_blockcount = 0; 6847 6848 /* 6849 * Set up the per-instance info needed to determine the correct 6850 * CDBs and other info for issuing commands to the target. 6851 */ 6852 sd_init_cdb_limits(un); 6853 6854 /* 6855 * Set up the IO chains to use, based upon the target type. 6856 */ 6857 if (un->un_f_non_devbsize_supported) { 6858 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6859 } else { 6860 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6861 } 6862 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6863 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6864 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6865 6866 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6867 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6868 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6869 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6870 6871 6872 if (ISCD(un)) { 6873 un->un_additional_codes = sd_additional_codes; 6874 } else { 6875 un->un_additional_codes = NULL; 6876 } 6877 6878 /* 6879 * Create the kstats here so they can be available for attach-time 6880 * routines that send commands to the unit (either polled or via 6881 * sd_send_scsi_cmd). 6882 * 6883 * Note: This is a critical sequence that needs to be maintained: 6884 * 1) Instantiate the kstats here, before any routines using the 6885 * iopath (i.e. sd_send_scsi_cmd). 6886 * 2) Instantiate and initialize the partition stats 6887 * (sd_set_pstats). 6888 * 3) Initialize the error stats (sd_set_errstats), following 6889 * sd_validate_geometry(),sd_register_devid(), 6890 * and sd_cache_control(). 6891 */ 6892 6893 un->un_stats = kstat_create(sd_label, instance, 6894 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6895 if (un->un_stats != NULL) { 6896 un->un_stats->ks_lock = SD_MUTEX(un); 6897 kstat_install(un->un_stats); 6898 } 6899 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6900 "sd_unit_attach: un:0x%p un_stats created\n", un); 6901 6902 sd_create_errstats(un, instance); 6903 if (un->un_errstats == NULL) { 6904 goto create_errstats_failed; 6905 } 6906 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6907 "sd_unit_attach: un:0x%p errstats created\n", un); 6908 6909 /* 6910 * The following if/else code was relocated here from below as part 6911 * of the fix for bug (4430280). However with the default setup added 6912 * on entry to this routine, it's no longer absolutely necessary for 6913 * this to be before the call to sd_spin_up_unit. 6914 */ 6915 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6916 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6917 (devp->sd_inq->inq_ansi == 5)) && 6918 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6919 6920 /* 6921 * If tagged queueing is supported by the target 6922 * and by the host adapter then we will enable it 6923 */ 6924 un->un_tagflags = 0; 6925 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6926 (un->un_f_arq_enabled == TRUE)) { 6927 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6928 1, 1) == 1) { 6929 un->un_tagflags = FLAG_STAG; 6930 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6931 "sd_unit_attach: un:0x%p tag queueing " 6932 "enabled\n", un); 6933 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6934 "untagged-qing", 0) == 1) { 6935 un->un_f_opt_queueing = TRUE; 6936 un->un_saved_throttle = un->un_throttle = 6937 min(un->un_throttle, 3); 6938 } else { 6939 un->un_f_opt_queueing = FALSE; 6940 un->un_saved_throttle = un->un_throttle = 1; 6941 } 6942 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6943 == 1) && (un->un_f_arq_enabled == TRUE)) { 6944 /* The Host Adapter supports internal queueing. */ 6945 un->un_f_opt_queueing = TRUE; 6946 un->un_saved_throttle = un->un_throttle = 6947 min(un->un_throttle, 3); 6948 } else { 6949 un->un_f_opt_queueing = FALSE; 6950 un->un_saved_throttle = un->un_throttle = 1; 6951 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6952 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6953 } 6954 6955 /* 6956 * Enable large transfers for SATA/SAS drives 6957 */ 6958 if (SD_IS_SERIAL(un)) { 6959 un->un_max_xfer_size = 6960 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6961 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6962 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6963 "sd_unit_attach: un:0x%p max transfer " 6964 "size=0x%x\n", un, un->un_max_xfer_size); 6965 6966 } 6967 6968 /* Setup or tear down default wide operations for disks */ 6969 6970 /* 6971 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6972 * and "ssd_max_xfer_size" to exist simultaneously on the same 6973 * system and be set to different values. In the future this 6974 * code may need to be updated when the ssd module is 6975 * obsoleted and removed from the system. (4299588) 6976 */ 6977 if (SD_IS_PARALLEL_SCSI(un) && 6978 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6979 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6980 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6981 1, 1) == 1) { 6982 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6983 "sd_unit_attach: un:0x%p Wide Transfer " 6984 "enabled\n", un); 6985 } 6986 6987 /* 6988 * If tagged queuing has also been enabled, then 6989 * enable large xfers 6990 */ 6991 if (un->un_saved_throttle == sd_max_throttle) { 6992 un->un_max_xfer_size = 6993 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6994 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6995 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6996 "sd_unit_attach: un:0x%p max transfer " 6997 "size=0x%x\n", un, un->un_max_xfer_size); 6998 } 6999 } else { 7000 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7001 0, 1) == 1) { 7002 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7003 "sd_unit_attach: un:0x%p " 7004 "Wide Transfer disabled\n", un); 7005 } 7006 } 7007 } else { 7008 un->un_tagflags = FLAG_STAG; 7009 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7010 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7011 } 7012 7013 /* 7014 * If this target supports LUN reset, try to enable it. 7015 */ 7016 if (un->un_f_lun_reset_enabled) { 7017 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7018 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7019 "un:0x%p lun_reset capability set\n", un); 7020 } else { 7021 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7022 "un:0x%p lun-reset capability not set\n", un); 7023 } 7024 } 7025 7026 /* 7027 * Adjust the maximum transfer size. This is to fix 7028 * the problem of partial DMA support on SPARC. Some 7029 * HBA driver, like aac, has very small dma_attr_maxxfer 7030 * size, which requires partial DMA support on SPARC. 7031 * In the future the SPARC pci nexus driver may solve 7032 * the problem instead of this fix. 7033 */ 7034 #if defined(__sparc) 7035 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7036 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7037 un->un_max_xfer_size = max_xfer_size; 7038 un->un_partial_dma_supported = 1; 7039 } 7040 #endif 7041 7042 /* 7043 * Set PKT_DMA_PARTIAL flag. 7044 */ 7045 if (un->un_partial_dma_supported == 1) { 7046 un->un_pkt_flags = PKT_DMA_PARTIAL; 7047 } else { 7048 un->un_pkt_flags = 0; 7049 } 7050 7051 /* 7052 * At this point in the attach, we have enough info in the 7053 * soft state to be able to issue commands to the target. 7054 * 7055 * All command paths used below MUST issue their commands as 7056 * SD_PATH_DIRECT. This is important as intermediate layers 7057 * are not all initialized yet (such as PM). 7058 */ 7059 7060 /* 7061 * Send a TEST UNIT READY command to the device. This should clear 7062 * any outstanding UNIT ATTENTION that may be present. 7063 * 7064 * Note: Don't check for success, just track if there is a reservation, 7065 * this is a throw away command to clear any unit attentions. 7066 * 7067 * Note: This MUST be the first command issued to the target during 7068 * attach to ensure power on UNIT ATTENTIONS are cleared. 7069 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7070 * with attempts at spinning up a device with no media. 7071 */ 7072 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7073 reservation_flag = SD_TARGET_IS_RESERVED; 7074 } 7075 7076 /* 7077 * If the device is NOT a removable media device, attempt to spin 7078 * it up (using the START_STOP_UNIT command) and read its capacity 7079 * (using the READ CAPACITY command). Note, however, that either 7080 * of these could fail and in some cases we would continue with 7081 * the attach despite the failure (see below). 7082 */ 7083 if (un->un_f_descr_format_supported) { 7084 switch (sd_spin_up_unit(un)) { 7085 case 0: 7086 /* 7087 * Spin-up was successful; now try to read the 7088 * capacity. If successful then save the results 7089 * and mark the capacity & lbasize as valid. 7090 */ 7091 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7092 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7093 7094 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7095 &lbasize, SD_PATH_DIRECT)) { 7096 case 0: { 7097 if (capacity > DK_MAX_BLOCKS) { 7098 #ifdef _LP64 7099 if (capacity + 1 > 7100 SD_GROUP1_MAX_ADDRESS) { 7101 /* 7102 * Enable descriptor format 7103 * sense data so that we can 7104 * get 64 bit sense data 7105 * fields. 7106 */ 7107 sd_enable_descr_sense(un); 7108 } 7109 #else 7110 /* 32-bit kernels can't handle this */ 7111 scsi_log(SD_DEVINFO(un), 7112 sd_label, CE_WARN, 7113 "disk has %llu blocks, which " 7114 "is too large for a 32-bit " 7115 "kernel", capacity); 7116 7117 #if defined(__i386) || defined(__amd64) 7118 /* 7119 * 1TB disk was treated as (1T - 512)B 7120 * in the past, so that it might have 7121 * valid VTOC and solaris partitions, 7122 * we have to allow it to continue to 7123 * work. 7124 */ 7125 if (capacity -1 > DK_MAX_BLOCKS) 7126 #endif 7127 goto spinup_failed; 7128 #endif 7129 } 7130 7131 /* 7132 * Here it's not necessary to check the case: 7133 * the capacity of the device is bigger than 7134 * what the max hba cdb can support. Because 7135 * sd_send_scsi_READ_CAPACITY will retrieve 7136 * the capacity by sending USCSI command, which 7137 * is constrained by the max hba cdb. Actually, 7138 * sd_send_scsi_READ_CAPACITY will return 7139 * EINVAL when using bigger cdb than required 7140 * cdb length. Will handle this case in 7141 * "case EINVAL". 7142 */ 7143 7144 /* 7145 * The following relies on 7146 * sd_send_scsi_READ_CAPACITY never 7147 * returning 0 for capacity and/or lbasize. 7148 */ 7149 sd_update_block_info(un, lbasize, capacity); 7150 7151 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7152 "sd_unit_attach: un:0x%p capacity = %ld " 7153 "blocks; lbasize= %ld.\n", un, 7154 un->un_blockcount, un->un_tgt_blocksize); 7155 7156 break; 7157 } 7158 case EINVAL: 7159 /* 7160 * In the case where the max-cdb-length property 7161 * is smaller than the required CDB length for 7162 * a SCSI device, a target driver can fail to 7163 * attach to that device. 7164 */ 7165 scsi_log(SD_DEVINFO(un), 7166 sd_label, CE_WARN, 7167 "disk capacity is too large " 7168 "for current cdb length"); 7169 goto spinup_failed; 7170 case EACCES: 7171 /* 7172 * Should never get here if the spin-up 7173 * succeeded, but code it in anyway. 7174 * From here, just continue with the attach... 7175 */ 7176 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7177 "sd_unit_attach: un:0x%p " 7178 "sd_send_scsi_READ_CAPACITY " 7179 "returned reservation conflict\n", un); 7180 reservation_flag = SD_TARGET_IS_RESERVED; 7181 break; 7182 default: 7183 /* 7184 * Likewise, should never get here if the 7185 * spin-up succeeded. Just continue with 7186 * the attach... 7187 */ 7188 break; 7189 } 7190 break; 7191 case EACCES: 7192 /* 7193 * Device is reserved by another host. In this case 7194 * we could not spin it up or read the capacity, but 7195 * we continue with the attach anyway. 7196 */ 7197 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7198 "sd_unit_attach: un:0x%p spin-up reservation " 7199 "conflict.\n", un); 7200 reservation_flag = SD_TARGET_IS_RESERVED; 7201 break; 7202 default: 7203 /* Fail the attach if the spin-up failed. */ 7204 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7205 "sd_unit_attach: un:0x%p spin-up failed.", un); 7206 goto spinup_failed; 7207 } 7208 } 7209 7210 /* 7211 * Check to see if this is a MMC drive 7212 */ 7213 if (ISCD(un)) { 7214 sd_set_mmc_caps(un); 7215 } 7216 7217 7218 /* 7219 * Add a zero-length attribute to tell the world we support 7220 * kernel ioctls (for layered drivers) 7221 */ 7222 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7223 DDI_KERNEL_IOCTL, NULL, 0); 7224 7225 /* 7226 * Add a boolean property to tell the world we support 7227 * the B_FAILFAST flag (for layered drivers) 7228 */ 7229 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7230 "ddi-failfast-supported", NULL, 0); 7231 7232 /* 7233 * Initialize power management 7234 */ 7235 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7236 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7237 sd_setup_pm(un, devi); 7238 if (un->un_f_pm_is_enabled == FALSE) { 7239 /* 7240 * For performance, point to a jump table that does 7241 * not include pm. 7242 * The direct and priority chains don't change with PM. 7243 * 7244 * Note: this is currently done based on individual device 7245 * capabilities. When an interface for determining system 7246 * power enabled state becomes available, or when additional 7247 * layers are added to the command chain, these values will 7248 * have to be re-evaluated for correctness. 7249 */ 7250 if (un->un_f_non_devbsize_supported) { 7251 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7252 } else { 7253 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7254 } 7255 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7256 } 7257 7258 /* 7259 * This property is set to 0 by HA software to avoid retries 7260 * on a reserved disk. (The preferred property name is 7261 * "retry-on-reservation-conflict") (1189689) 7262 * 7263 * Note: The use of a global here can have unintended consequences. A 7264 * per instance variable is preferrable to match the capabilities of 7265 * different underlying hba's (4402600) 7266 */ 7267 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7268 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7269 sd_retry_on_reservation_conflict); 7270 if (sd_retry_on_reservation_conflict != 0) { 7271 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7272 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7273 sd_retry_on_reservation_conflict); 7274 } 7275 7276 /* Set up options for QFULL handling. */ 7277 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7278 "qfull-retries", -1)) != -1) { 7279 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7280 rval, 1); 7281 } 7282 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7283 "qfull-retry-interval", -1)) != -1) { 7284 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7285 rval, 1); 7286 } 7287 7288 /* 7289 * This just prints a message that announces the existence of the 7290 * device. The message is always printed in the system logfile, but 7291 * only appears on the console if the system is booted with the 7292 * -v (verbose) argument. 7293 */ 7294 ddi_report_dev(devi); 7295 7296 un->un_mediastate = DKIO_NONE; 7297 7298 cmlb_alloc_handle(&un->un_cmlbhandle); 7299 7300 #if defined(__i386) || defined(__amd64) 7301 /* 7302 * On x86, compensate for off-by-1 legacy error 7303 */ 7304 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7305 (lbasize == un->un_sys_blocksize)) 7306 offbyone = CMLB_OFF_BY_ONE; 7307 #endif 7308 7309 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7310 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7311 un->un_node_type, offbyone, un->un_cmlbhandle, 7312 (void *)SD_PATH_DIRECT) != 0) { 7313 goto cmlb_attach_failed; 7314 } 7315 7316 7317 /* 7318 * Read and validate the device's geometry (ie, disk label) 7319 * A new unformatted drive will not have a valid geometry, but 7320 * the driver needs to successfully attach to this device so 7321 * the drive can be formatted via ioctls. 7322 */ 7323 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7324 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7325 7326 mutex_enter(SD_MUTEX(un)); 7327 7328 /* 7329 * Read and initialize the devid for the unit. 7330 */ 7331 if (un->un_f_devid_supported) { 7332 sd_register_devid(un, devi, reservation_flag); 7333 } 7334 mutex_exit(SD_MUTEX(un)); 7335 7336 #if (defined(__fibre)) 7337 /* 7338 * Register callbacks for fibre only. You can't do this soley 7339 * on the basis of the devid_type because this is hba specific. 7340 * We need to query our hba capabilities to find out whether to 7341 * register or not. 7342 */ 7343 if (un->un_f_is_fibre) { 7344 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7345 sd_init_event_callbacks(un); 7346 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7347 "sd_unit_attach: un:0x%p event callbacks inserted", 7348 un); 7349 } 7350 } 7351 #endif 7352 7353 if (un->un_f_opt_disable_cache == TRUE) { 7354 /* 7355 * Disable both read cache and write cache. This is 7356 * the historic behavior of the keywords in the config file. 7357 */ 7358 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7359 0) { 7360 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7361 "sd_unit_attach: un:0x%p Could not disable " 7362 "caching", un); 7363 goto devid_failed; 7364 } 7365 } 7366 7367 /* 7368 * Check the value of the WCE bit now and 7369 * set un_f_write_cache_enabled accordingly. 7370 */ 7371 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7372 mutex_enter(SD_MUTEX(un)); 7373 un->un_f_write_cache_enabled = (wc_enabled != 0); 7374 mutex_exit(SD_MUTEX(un)); 7375 7376 /* 7377 * Check the value of the NV_SUP bit and set 7378 * un_f_suppress_cache_flush accordingly. 7379 */ 7380 sd_get_nv_sup(un); 7381 7382 /* 7383 * Find out what type of reservation this disk supports. 7384 */ 7385 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7386 case 0: 7387 /* 7388 * SCSI-3 reservations are supported. 7389 */ 7390 un->un_reservation_type = SD_SCSI3_RESERVATION; 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7392 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7393 break; 7394 case ENOTSUP: 7395 /* 7396 * The PERSISTENT RESERVE IN command would not be recognized by 7397 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7398 */ 7399 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7401 un->un_reservation_type = SD_SCSI2_RESERVATION; 7402 break; 7403 default: 7404 /* 7405 * default to SCSI-3 reservations 7406 */ 7407 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7408 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7409 un->un_reservation_type = SD_SCSI3_RESERVATION; 7410 break; 7411 } 7412 7413 /* 7414 * Set the pstat and error stat values here, so data obtained during the 7415 * previous attach-time routines is available. 7416 * 7417 * Note: This is a critical sequence that needs to be maintained: 7418 * 1) Instantiate the kstats before any routines using the iopath 7419 * (i.e. sd_send_scsi_cmd). 7420 * 2) Initialize the error stats (sd_set_errstats) and partition 7421 * stats (sd_set_pstats)here, following 7422 * cmlb_validate_geometry(), sd_register_devid(), and 7423 * sd_cache_control(). 7424 */ 7425 7426 if (un->un_f_pkstats_enabled && geom_label_valid) { 7427 sd_set_pstats(un); 7428 SD_TRACE(SD_LOG_IO_PARTITION, un, 7429 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7430 } 7431 7432 sd_set_errstats(un); 7433 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7434 "sd_unit_attach: un:0x%p errstats set\n", un); 7435 7436 7437 /* 7438 * After successfully attaching an instance, we record the information 7439 * of how many luns have been attached on the relative target and 7440 * controller for parallel SCSI. This information is used when sd tries 7441 * to set the tagged queuing capability in HBA. 7442 */ 7443 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7444 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7445 } 7446 7447 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7448 "sd_unit_attach: un:0x%p exit success\n", un); 7449 7450 return (DDI_SUCCESS); 7451 7452 /* 7453 * An error occurred during the attach; clean up & return failure. 7454 */ 7455 7456 devid_failed: 7457 7458 setup_pm_failed: 7459 ddi_remove_minor_node(devi, NULL); 7460 7461 cmlb_attach_failed: 7462 /* 7463 * Cleanup from the scsi_ifsetcap() calls (437868) 7464 */ 7465 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7466 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7467 7468 /* 7469 * Refer to the comments of setting tagged-qing in the beginning of 7470 * sd_unit_attach. We can only disable tagged queuing when there is 7471 * no lun attached on the target. 7472 */ 7473 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7474 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7475 } 7476 7477 if (un->un_f_is_fibre == FALSE) { 7478 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7479 } 7480 7481 spinup_failed: 7482 7483 mutex_enter(SD_MUTEX(un)); 7484 7485 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7486 if (un->un_direct_priority_timeid != NULL) { 7487 timeout_id_t temp_id = un->un_direct_priority_timeid; 7488 un->un_direct_priority_timeid = NULL; 7489 mutex_exit(SD_MUTEX(un)); 7490 (void) untimeout(temp_id); 7491 mutex_enter(SD_MUTEX(un)); 7492 } 7493 7494 /* Cancel any pending start/stop timeouts */ 7495 if (un->un_startstop_timeid != NULL) { 7496 timeout_id_t temp_id = un->un_startstop_timeid; 7497 un->un_startstop_timeid = NULL; 7498 mutex_exit(SD_MUTEX(un)); 7499 (void) untimeout(temp_id); 7500 mutex_enter(SD_MUTEX(un)); 7501 } 7502 7503 /* Cancel any pending reset-throttle timeouts */ 7504 if (un->un_reset_throttle_timeid != NULL) { 7505 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7506 un->un_reset_throttle_timeid = NULL; 7507 mutex_exit(SD_MUTEX(un)); 7508 (void) untimeout(temp_id); 7509 mutex_enter(SD_MUTEX(un)); 7510 } 7511 7512 /* Cancel any pending retry timeouts */ 7513 if (un->un_retry_timeid != NULL) { 7514 timeout_id_t temp_id = un->un_retry_timeid; 7515 un->un_retry_timeid = NULL; 7516 mutex_exit(SD_MUTEX(un)); 7517 (void) untimeout(temp_id); 7518 mutex_enter(SD_MUTEX(un)); 7519 } 7520 7521 /* Cancel any pending delayed cv broadcast timeouts */ 7522 if (un->un_dcvb_timeid != NULL) { 7523 timeout_id_t temp_id = un->un_dcvb_timeid; 7524 un->un_dcvb_timeid = NULL; 7525 mutex_exit(SD_MUTEX(un)); 7526 (void) untimeout(temp_id); 7527 mutex_enter(SD_MUTEX(un)); 7528 } 7529 7530 mutex_exit(SD_MUTEX(un)); 7531 7532 /* There should not be any in-progress I/O so ASSERT this check */ 7533 ASSERT(un->un_ncmds_in_transport == 0); 7534 ASSERT(un->un_ncmds_in_driver == 0); 7535 7536 /* Do not free the softstate if the callback routine is active */ 7537 sd_sync_with_callback(un); 7538 7539 /* 7540 * Partition stats apparently are not used with removables. These would 7541 * not have been created during attach, so no need to clean them up... 7542 */ 7543 if (un->un_errstats != NULL) { 7544 kstat_delete(un->un_errstats); 7545 un->un_errstats = NULL; 7546 } 7547 7548 create_errstats_failed: 7549 7550 if (un->un_stats != NULL) { 7551 kstat_delete(un->un_stats); 7552 un->un_stats = NULL; 7553 } 7554 7555 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7556 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7557 7558 ddi_prop_remove_all(devi); 7559 sema_destroy(&un->un_semoclose); 7560 cv_destroy(&un->un_state_cv); 7561 7562 getrbuf_failed: 7563 7564 sd_free_rqs(un); 7565 7566 alloc_rqs_failed: 7567 7568 devp->sd_private = NULL; 7569 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7570 7571 get_softstate_failed: 7572 /* 7573 * Note: the man pages are unclear as to whether or not doing a 7574 * ddi_soft_state_free(sd_state, instance) is the right way to 7575 * clean up after the ddi_soft_state_zalloc() if the subsequent 7576 * ddi_get_soft_state() fails. The implication seems to be 7577 * that the get_soft_state cannot fail if the zalloc succeeds. 7578 */ 7579 ddi_soft_state_free(sd_state, instance); 7580 7581 probe_failed: 7582 scsi_unprobe(devp); 7583 7584 return (DDI_FAILURE); 7585 } 7586 7587 7588 /* 7589 * Function: sd_unit_detach 7590 * 7591 * Description: Performs DDI_DETACH processing for sddetach(). 7592 * 7593 * Return Code: DDI_SUCCESS 7594 * DDI_FAILURE 7595 * 7596 * Context: Kernel thread context 7597 */ 7598 7599 static int 7600 sd_unit_detach(dev_info_t *devi) 7601 { 7602 struct scsi_device *devp; 7603 struct sd_lun *un; 7604 int i; 7605 int tgt; 7606 dev_t dev; 7607 dev_info_t *pdip = ddi_get_parent(devi); 7608 int instance = ddi_get_instance(devi); 7609 7610 mutex_enter(&sd_detach_mutex); 7611 7612 /* 7613 * Fail the detach for any of the following: 7614 * - Unable to get the sd_lun struct for the instance 7615 * - A layered driver has an outstanding open on the instance 7616 * - Another thread is already detaching this instance 7617 * - Another thread is currently performing an open 7618 */ 7619 devp = ddi_get_driver_private(devi); 7620 if ((devp == NULL) || 7621 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7622 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7623 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7624 mutex_exit(&sd_detach_mutex); 7625 return (DDI_FAILURE); 7626 } 7627 7628 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7629 7630 /* 7631 * Mark this instance as currently in a detach, to inhibit any 7632 * opens from a layered driver. 7633 */ 7634 un->un_detach_count++; 7635 mutex_exit(&sd_detach_mutex); 7636 7637 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7638 SCSI_ADDR_PROP_TARGET, -1); 7639 7640 dev = sd_make_device(SD_DEVINFO(un)); 7641 7642 #ifndef lint 7643 _NOTE(COMPETING_THREADS_NOW); 7644 #endif 7645 7646 mutex_enter(SD_MUTEX(un)); 7647 7648 /* 7649 * Fail the detach if there are any outstanding layered 7650 * opens on this device. 7651 */ 7652 for (i = 0; i < NDKMAP; i++) { 7653 if (un->un_ocmap.lyropen[i] != 0) { 7654 goto err_notclosed; 7655 } 7656 } 7657 7658 /* 7659 * Verify there are NO outstanding commands issued to this device. 7660 * ie, un_ncmds_in_transport == 0. 7661 * It's possible to have outstanding commands through the physio 7662 * code path, even though everything's closed. 7663 */ 7664 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7665 (un->un_direct_priority_timeid != NULL) || 7666 (un->un_state == SD_STATE_RWAIT)) { 7667 mutex_exit(SD_MUTEX(un)); 7668 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7669 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7670 goto err_stillbusy; 7671 } 7672 7673 /* 7674 * If we have the device reserved, release the reservation. 7675 */ 7676 if ((un->un_resvd_status & SD_RESERVE) && 7677 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7678 mutex_exit(SD_MUTEX(un)); 7679 /* 7680 * Note: sd_reserve_release sends a command to the device 7681 * via the sd_ioctlcmd() path, and can sleep. 7682 */ 7683 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7684 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7685 "sd_dr_detach: Cannot release reservation \n"); 7686 } 7687 } else { 7688 mutex_exit(SD_MUTEX(un)); 7689 } 7690 7691 /* 7692 * Untimeout any reserve recover, throttle reset, restart unit 7693 * and delayed broadcast timeout threads. Protect the timeout pointer 7694 * from getting nulled by their callback functions. 7695 */ 7696 mutex_enter(SD_MUTEX(un)); 7697 if (un->un_resvd_timeid != NULL) { 7698 timeout_id_t temp_id = un->un_resvd_timeid; 7699 un->un_resvd_timeid = NULL; 7700 mutex_exit(SD_MUTEX(un)); 7701 (void) untimeout(temp_id); 7702 mutex_enter(SD_MUTEX(un)); 7703 } 7704 7705 if (un->un_reset_throttle_timeid != NULL) { 7706 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7707 un->un_reset_throttle_timeid = NULL; 7708 mutex_exit(SD_MUTEX(un)); 7709 (void) untimeout(temp_id); 7710 mutex_enter(SD_MUTEX(un)); 7711 } 7712 7713 if (un->un_startstop_timeid != NULL) { 7714 timeout_id_t temp_id = un->un_startstop_timeid; 7715 un->un_startstop_timeid = NULL; 7716 mutex_exit(SD_MUTEX(un)); 7717 (void) untimeout(temp_id); 7718 mutex_enter(SD_MUTEX(un)); 7719 } 7720 7721 if (un->un_dcvb_timeid != NULL) { 7722 timeout_id_t temp_id = un->un_dcvb_timeid; 7723 un->un_dcvb_timeid = NULL; 7724 mutex_exit(SD_MUTEX(un)); 7725 (void) untimeout(temp_id); 7726 } else { 7727 mutex_exit(SD_MUTEX(un)); 7728 } 7729 7730 /* Remove any pending reservation reclaim requests for this device */ 7731 sd_rmv_resv_reclaim_req(dev); 7732 7733 mutex_enter(SD_MUTEX(un)); 7734 7735 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7736 if (un->un_direct_priority_timeid != NULL) { 7737 timeout_id_t temp_id = un->un_direct_priority_timeid; 7738 un->un_direct_priority_timeid = NULL; 7739 mutex_exit(SD_MUTEX(un)); 7740 (void) untimeout(temp_id); 7741 mutex_enter(SD_MUTEX(un)); 7742 } 7743 7744 /* Cancel any active multi-host disk watch thread requests */ 7745 if (un->un_mhd_token != NULL) { 7746 mutex_exit(SD_MUTEX(un)); 7747 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7748 if (scsi_watch_request_terminate(un->un_mhd_token, 7749 SCSI_WATCH_TERMINATE_NOWAIT)) { 7750 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7751 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7752 /* 7753 * Note: We are returning here after having removed 7754 * some driver timeouts above. This is consistent with 7755 * the legacy implementation but perhaps the watch 7756 * terminate call should be made with the wait flag set. 7757 */ 7758 goto err_stillbusy; 7759 } 7760 mutex_enter(SD_MUTEX(un)); 7761 un->un_mhd_token = NULL; 7762 } 7763 7764 if (un->un_swr_token != NULL) { 7765 mutex_exit(SD_MUTEX(un)); 7766 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7767 if (scsi_watch_request_terminate(un->un_swr_token, 7768 SCSI_WATCH_TERMINATE_NOWAIT)) { 7769 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7770 "sd_dr_detach: Cannot cancel swr watch request\n"); 7771 /* 7772 * Note: We are returning here after having removed 7773 * some driver timeouts above. This is consistent with 7774 * the legacy implementation but perhaps the watch 7775 * terminate call should be made with the wait flag set. 7776 */ 7777 goto err_stillbusy; 7778 } 7779 mutex_enter(SD_MUTEX(un)); 7780 un->un_swr_token = NULL; 7781 } 7782 7783 mutex_exit(SD_MUTEX(un)); 7784 7785 /* 7786 * Clear any scsi_reset_notifies. We clear the reset notifies 7787 * if we have not registered one. 7788 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7789 */ 7790 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7791 sd_mhd_reset_notify_cb, (caddr_t)un); 7792 7793 /* 7794 * protect the timeout pointers from getting nulled by 7795 * their callback functions during the cancellation process. 7796 * In such a scenario untimeout can be invoked with a null value. 7797 */ 7798 _NOTE(NO_COMPETING_THREADS_NOW); 7799 7800 mutex_enter(&un->un_pm_mutex); 7801 if (un->un_pm_idle_timeid != NULL) { 7802 timeout_id_t temp_id = un->un_pm_idle_timeid; 7803 un->un_pm_idle_timeid = NULL; 7804 mutex_exit(&un->un_pm_mutex); 7805 7806 /* 7807 * Timeout is active; cancel it. 7808 * Note that it'll never be active on a device 7809 * that does not support PM therefore we don't 7810 * have to check before calling pm_idle_component. 7811 */ 7812 (void) untimeout(temp_id); 7813 (void) pm_idle_component(SD_DEVINFO(un), 0); 7814 mutex_enter(&un->un_pm_mutex); 7815 } 7816 7817 /* 7818 * Check whether there is already a timeout scheduled for power 7819 * management. If yes then don't lower the power here, that's. 7820 * the timeout handler's job. 7821 */ 7822 if (un->un_pm_timeid != NULL) { 7823 timeout_id_t temp_id = un->un_pm_timeid; 7824 un->un_pm_timeid = NULL; 7825 mutex_exit(&un->un_pm_mutex); 7826 /* 7827 * Timeout is active; cancel it. 7828 * Note that it'll never be active on a device 7829 * that does not support PM therefore we don't 7830 * have to check before calling pm_idle_component. 7831 */ 7832 (void) untimeout(temp_id); 7833 (void) pm_idle_component(SD_DEVINFO(un), 0); 7834 7835 } else { 7836 mutex_exit(&un->un_pm_mutex); 7837 if ((un->un_f_pm_is_enabled == TRUE) && 7838 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7839 DDI_SUCCESS)) { 7840 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7841 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7842 /* 7843 * Fix for bug: 4297749, item # 13 7844 * The above test now includes a check to see if PM is 7845 * supported by this device before call 7846 * pm_lower_power(). 7847 * Note, the following is not dead code. The call to 7848 * pm_lower_power above will generate a call back into 7849 * our sdpower routine which might result in a timeout 7850 * handler getting activated. Therefore the following 7851 * code is valid and necessary. 7852 */ 7853 mutex_enter(&un->un_pm_mutex); 7854 if (un->un_pm_timeid != NULL) { 7855 timeout_id_t temp_id = un->un_pm_timeid; 7856 un->un_pm_timeid = NULL; 7857 mutex_exit(&un->un_pm_mutex); 7858 (void) untimeout(temp_id); 7859 (void) pm_idle_component(SD_DEVINFO(un), 0); 7860 } else { 7861 mutex_exit(&un->un_pm_mutex); 7862 } 7863 } 7864 } 7865 7866 /* 7867 * Cleanup from the scsi_ifsetcap() calls (437868) 7868 * Relocated here from above to be after the call to 7869 * pm_lower_power, which was getting errors. 7870 */ 7871 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7872 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7873 7874 /* 7875 * Currently, tagged queuing is supported per target based by HBA. 7876 * Setting this per lun instance actually sets the capability of this 7877 * target in HBA, which affects those luns already attached on the 7878 * same target. So during detach, we can only disable this capability 7879 * only when this is the only lun left on this target. By doing 7880 * this, we assume a target has the same tagged queuing capability 7881 * for every lun. The condition can be removed when HBA is changed to 7882 * support per lun based tagged queuing capability. 7883 */ 7884 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7885 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7886 } 7887 7888 if (un->un_f_is_fibre == FALSE) { 7889 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7890 } 7891 7892 /* 7893 * Remove any event callbacks, fibre only 7894 */ 7895 if (un->un_f_is_fibre == TRUE) { 7896 if ((un->un_insert_event != NULL) && 7897 (ddi_remove_event_handler(un->un_insert_cb_id) != 7898 DDI_SUCCESS)) { 7899 /* 7900 * Note: We are returning here after having done 7901 * substantial cleanup above. This is consistent 7902 * with the legacy implementation but this may not 7903 * be the right thing to do. 7904 */ 7905 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7906 "sd_dr_detach: Cannot cancel insert event\n"); 7907 goto err_remove_event; 7908 } 7909 un->un_insert_event = NULL; 7910 7911 if ((un->un_remove_event != NULL) && 7912 (ddi_remove_event_handler(un->un_remove_cb_id) != 7913 DDI_SUCCESS)) { 7914 /* 7915 * Note: We are returning here after having done 7916 * substantial cleanup above. This is consistent 7917 * with the legacy implementation but this may not 7918 * be the right thing to do. 7919 */ 7920 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7921 "sd_dr_detach: Cannot cancel remove event\n"); 7922 goto err_remove_event; 7923 } 7924 un->un_remove_event = NULL; 7925 } 7926 7927 /* Do not free the softstate if the callback routine is active */ 7928 sd_sync_with_callback(un); 7929 7930 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7931 cmlb_free_handle(&un->un_cmlbhandle); 7932 7933 /* 7934 * Hold the detach mutex here, to make sure that no other threads ever 7935 * can access a (partially) freed soft state structure. 7936 */ 7937 mutex_enter(&sd_detach_mutex); 7938 7939 /* 7940 * Clean up the soft state struct. 7941 * Cleanup is done in reverse order of allocs/inits. 7942 * At this point there should be no competing threads anymore. 7943 */ 7944 7945 /* Unregister and free device id. */ 7946 ddi_devid_unregister(devi); 7947 if (un->un_devid) { 7948 ddi_devid_free(un->un_devid); 7949 un->un_devid = NULL; 7950 } 7951 7952 /* 7953 * Destroy wmap cache if it exists. 7954 */ 7955 if (un->un_wm_cache != NULL) { 7956 kmem_cache_destroy(un->un_wm_cache); 7957 un->un_wm_cache = NULL; 7958 } 7959 7960 /* 7961 * kstat cleanup is done in detach for all device types (4363169). 7962 * We do not want to fail detach if the device kstats are not deleted 7963 * since there is a confusion about the devo_refcnt for the device. 7964 * We just delete the kstats and let detach complete successfully. 7965 */ 7966 if (un->un_stats != NULL) { 7967 kstat_delete(un->un_stats); 7968 un->un_stats = NULL; 7969 } 7970 if (un->un_errstats != NULL) { 7971 kstat_delete(un->un_errstats); 7972 un->un_errstats = NULL; 7973 } 7974 7975 /* Remove partition stats */ 7976 if (un->un_f_pkstats_enabled) { 7977 for (i = 0; i < NSDMAP; i++) { 7978 if (un->un_pstats[i] != NULL) { 7979 kstat_delete(un->un_pstats[i]); 7980 un->un_pstats[i] = NULL; 7981 } 7982 } 7983 } 7984 7985 /* Remove xbuf registration */ 7986 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7987 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7988 7989 /* Remove driver properties */ 7990 ddi_prop_remove_all(devi); 7991 7992 mutex_destroy(&un->un_pm_mutex); 7993 cv_destroy(&un->un_pm_busy_cv); 7994 7995 cv_destroy(&un->un_wcc_cv); 7996 7997 /* Open/close semaphore */ 7998 sema_destroy(&un->un_semoclose); 7999 8000 /* Removable media condvar. */ 8001 cv_destroy(&un->un_state_cv); 8002 8003 /* Suspend/resume condvar. */ 8004 cv_destroy(&un->un_suspend_cv); 8005 cv_destroy(&un->un_disk_busy_cv); 8006 8007 sd_free_rqs(un); 8008 8009 /* Free up soft state */ 8010 devp->sd_private = NULL; 8011 8012 bzero(un, sizeof (struct sd_lun)); 8013 ddi_soft_state_free(sd_state, instance); 8014 8015 mutex_exit(&sd_detach_mutex); 8016 8017 /* This frees up the INQUIRY data associated with the device. */ 8018 scsi_unprobe(devp); 8019 8020 /* 8021 * After successfully detaching an instance, we update the information 8022 * of how many luns have been attached in the relative target and 8023 * controller for parallel SCSI. This information is used when sd tries 8024 * to set the tagged queuing capability in HBA. 8025 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8026 * check if the device is parallel SCSI. However, we don't need to 8027 * check here because we've already checked during attach. No device 8028 * that is not parallel SCSI is in the chain. 8029 */ 8030 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8031 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8032 } 8033 8034 return (DDI_SUCCESS); 8035 8036 err_notclosed: 8037 mutex_exit(SD_MUTEX(un)); 8038 8039 err_stillbusy: 8040 _NOTE(NO_COMPETING_THREADS_NOW); 8041 8042 err_remove_event: 8043 mutex_enter(&sd_detach_mutex); 8044 un->un_detach_count--; 8045 mutex_exit(&sd_detach_mutex); 8046 8047 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8048 return (DDI_FAILURE); 8049 } 8050 8051 8052 /* 8053 * Function: sd_create_errstats 8054 * 8055 * Description: This routine instantiates the device error stats. 8056 * 8057 * Note: During attach the stats are instantiated first so they are 8058 * available for attach-time routines that utilize the driver 8059 * iopath to send commands to the device. The stats are initialized 8060 * separately so data obtained during some attach-time routines is 8061 * available. (4362483) 8062 * 8063 * Arguments: un - driver soft state (unit) structure 8064 * instance - driver instance 8065 * 8066 * Context: Kernel thread context 8067 */ 8068 8069 static void 8070 sd_create_errstats(struct sd_lun *un, int instance) 8071 { 8072 struct sd_errstats *stp; 8073 char kstatmodule_err[KSTAT_STRLEN]; 8074 char kstatname[KSTAT_STRLEN]; 8075 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8076 8077 ASSERT(un != NULL); 8078 8079 if (un->un_errstats != NULL) { 8080 return; 8081 } 8082 8083 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8084 "%serr", sd_label); 8085 (void) snprintf(kstatname, sizeof (kstatname), 8086 "%s%d,err", sd_label, instance); 8087 8088 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8089 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8090 8091 if (un->un_errstats == NULL) { 8092 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8093 "sd_create_errstats: Failed kstat_create\n"); 8094 return; 8095 } 8096 8097 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8098 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8099 KSTAT_DATA_UINT32); 8100 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_vid, "Vendor", 8105 KSTAT_DATA_CHAR); 8106 kstat_named_init(&stp->sd_pid, "Product", 8107 KSTAT_DATA_CHAR); 8108 kstat_named_init(&stp->sd_revision, "Revision", 8109 KSTAT_DATA_CHAR); 8110 kstat_named_init(&stp->sd_serial, "Serial No", 8111 KSTAT_DATA_CHAR); 8112 kstat_named_init(&stp->sd_capacity, "Size", 8113 KSTAT_DATA_ULONGLONG); 8114 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8115 KSTAT_DATA_UINT32); 8116 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8117 KSTAT_DATA_UINT32); 8118 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8119 KSTAT_DATA_UINT32); 8120 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8121 KSTAT_DATA_UINT32); 8122 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8123 KSTAT_DATA_UINT32); 8124 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8125 KSTAT_DATA_UINT32); 8126 8127 un->un_errstats->ks_private = un; 8128 un->un_errstats->ks_update = nulldev; 8129 8130 kstat_install(un->un_errstats); 8131 } 8132 8133 8134 /* 8135 * Function: sd_set_errstats 8136 * 8137 * Description: This routine sets the value of the vendor id, product id, 8138 * revision, serial number, and capacity device error stats. 8139 * 8140 * Note: During attach the stats are instantiated first so they are 8141 * available for attach-time routines that utilize the driver 8142 * iopath to send commands to the device. The stats are initialized 8143 * separately so data obtained during some attach-time routines is 8144 * available. (4362483) 8145 * 8146 * Arguments: un - driver soft state (unit) structure 8147 * 8148 * Context: Kernel thread context 8149 */ 8150 8151 static void 8152 sd_set_errstats(struct sd_lun *un) 8153 { 8154 struct sd_errstats *stp; 8155 8156 ASSERT(un != NULL); 8157 ASSERT(un->un_errstats != NULL); 8158 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8159 ASSERT(stp != NULL); 8160 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8161 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8162 (void) strncpy(stp->sd_revision.value.c, 8163 un->un_sd->sd_inq->inq_revision, 4); 8164 8165 /* 8166 * All the errstats are persistent across detach/attach, 8167 * so reset all the errstats here in case of the hot 8168 * replacement of disk drives, except for not changed 8169 * Sun qualified drives. 8170 */ 8171 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8172 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8173 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8174 stp->sd_softerrs.value.ui32 = 0; 8175 stp->sd_harderrs.value.ui32 = 0; 8176 stp->sd_transerrs.value.ui32 = 0; 8177 stp->sd_rq_media_err.value.ui32 = 0; 8178 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8179 stp->sd_rq_nodev_err.value.ui32 = 0; 8180 stp->sd_rq_recov_err.value.ui32 = 0; 8181 stp->sd_rq_illrq_err.value.ui32 = 0; 8182 stp->sd_rq_pfa_err.value.ui32 = 0; 8183 } 8184 8185 /* 8186 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8187 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8188 * (4376302)) 8189 */ 8190 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8191 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8192 sizeof (SD_INQUIRY(un)->inq_serial)); 8193 } 8194 8195 if (un->un_f_blockcount_is_valid != TRUE) { 8196 /* 8197 * Set capacity error stat to 0 for no media. This ensures 8198 * a valid capacity is displayed in response to 'iostat -E' 8199 * when no media is present in the device. 8200 */ 8201 stp->sd_capacity.value.ui64 = 0; 8202 } else { 8203 /* 8204 * Multiply un_blockcount by un->un_sys_blocksize to get 8205 * capacity. 8206 * 8207 * Note: for non-512 blocksize devices "un_blockcount" has been 8208 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8209 * (un_tgt_blocksize / un->un_sys_blocksize). 8210 */ 8211 stp->sd_capacity.value.ui64 = (uint64_t) 8212 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8213 } 8214 } 8215 8216 8217 /* 8218 * Function: sd_set_pstats 8219 * 8220 * Description: This routine instantiates and initializes the partition 8221 * stats for each partition with more than zero blocks. 8222 * (4363169) 8223 * 8224 * Arguments: un - driver soft state (unit) structure 8225 * 8226 * Context: Kernel thread context 8227 */ 8228 8229 static void 8230 sd_set_pstats(struct sd_lun *un) 8231 { 8232 char kstatname[KSTAT_STRLEN]; 8233 int instance; 8234 int i; 8235 diskaddr_t nblks = 0; 8236 char *partname = NULL; 8237 8238 ASSERT(un != NULL); 8239 8240 instance = ddi_get_instance(SD_DEVINFO(un)); 8241 8242 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8243 for (i = 0; i < NSDMAP; i++) { 8244 8245 if (cmlb_partinfo(un->un_cmlbhandle, i, 8246 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8247 continue; 8248 mutex_enter(SD_MUTEX(un)); 8249 8250 if ((un->un_pstats[i] == NULL) && 8251 (nblks != 0)) { 8252 8253 (void) snprintf(kstatname, sizeof (kstatname), 8254 "%s%d,%s", sd_label, instance, 8255 partname); 8256 8257 un->un_pstats[i] = kstat_create(sd_label, 8258 instance, kstatname, "partition", KSTAT_TYPE_IO, 8259 1, KSTAT_FLAG_PERSISTENT); 8260 if (un->un_pstats[i] != NULL) { 8261 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8262 kstat_install(un->un_pstats[i]); 8263 } 8264 } 8265 mutex_exit(SD_MUTEX(un)); 8266 } 8267 } 8268 8269 8270 #if (defined(__fibre)) 8271 /* 8272 * Function: sd_init_event_callbacks 8273 * 8274 * Description: This routine initializes the insertion and removal event 8275 * callbacks. (fibre only) 8276 * 8277 * Arguments: un - driver soft state (unit) structure 8278 * 8279 * Context: Kernel thread context 8280 */ 8281 8282 static void 8283 sd_init_event_callbacks(struct sd_lun *un) 8284 { 8285 ASSERT(un != NULL); 8286 8287 if ((un->un_insert_event == NULL) && 8288 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8289 &un->un_insert_event) == DDI_SUCCESS)) { 8290 /* 8291 * Add the callback for an insertion event 8292 */ 8293 (void) ddi_add_event_handler(SD_DEVINFO(un), 8294 un->un_insert_event, sd_event_callback, (void *)un, 8295 &(un->un_insert_cb_id)); 8296 } 8297 8298 if ((un->un_remove_event == NULL) && 8299 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8300 &un->un_remove_event) == DDI_SUCCESS)) { 8301 /* 8302 * Add the callback for a removal event 8303 */ 8304 (void) ddi_add_event_handler(SD_DEVINFO(un), 8305 un->un_remove_event, sd_event_callback, (void *)un, 8306 &(un->un_remove_cb_id)); 8307 } 8308 } 8309 8310 8311 /* 8312 * Function: sd_event_callback 8313 * 8314 * Description: This routine handles insert/remove events (photon). The 8315 * state is changed to OFFLINE which can be used to supress 8316 * error msgs. (fibre only) 8317 * 8318 * Arguments: un - driver soft state (unit) structure 8319 * 8320 * Context: Callout thread context 8321 */ 8322 /* ARGSUSED */ 8323 static void 8324 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8325 void *bus_impldata) 8326 { 8327 struct sd_lun *un = (struct sd_lun *)arg; 8328 8329 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8330 if (event == un->un_insert_event) { 8331 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8332 mutex_enter(SD_MUTEX(un)); 8333 if (un->un_state == SD_STATE_OFFLINE) { 8334 if (un->un_last_state != SD_STATE_SUSPENDED) { 8335 un->un_state = un->un_last_state; 8336 } else { 8337 /* 8338 * We have gone through SUSPEND/RESUME while 8339 * we were offline. Restore the last state 8340 */ 8341 un->un_state = un->un_save_state; 8342 } 8343 } 8344 mutex_exit(SD_MUTEX(un)); 8345 8346 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8347 } else if (event == un->un_remove_event) { 8348 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8349 mutex_enter(SD_MUTEX(un)); 8350 /* 8351 * We need to handle an event callback that occurs during 8352 * the suspend operation, since we don't prevent it. 8353 */ 8354 if (un->un_state != SD_STATE_OFFLINE) { 8355 if (un->un_state != SD_STATE_SUSPENDED) { 8356 New_state(un, SD_STATE_OFFLINE); 8357 } else { 8358 un->un_last_state = SD_STATE_OFFLINE; 8359 } 8360 } 8361 mutex_exit(SD_MUTEX(un)); 8362 } else { 8363 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8364 "!Unknown event\n"); 8365 } 8366 8367 } 8368 #endif 8369 8370 /* 8371 * Function: sd_cache_control() 8372 * 8373 * Description: This routine is the driver entry point for setting 8374 * read and write caching by modifying the WCE (write cache 8375 * enable) and RCD (read cache disable) bits of mode 8376 * page 8 (MODEPAGE_CACHING). 8377 * 8378 * Arguments: un - driver soft state (unit) structure 8379 * rcd_flag - flag for controlling the read cache 8380 * wce_flag - flag for controlling the write cache 8381 * 8382 * Return Code: EIO 8383 * code returned by sd_send_scsi_MODE_SENSE and 8384 * sd_send_scsi_MODE_SELECT 8385 * 8386 * Context: Kernel Thread 8387 */ 8388 8389 static int 8390 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8391 { 8392 struct mode_caching *mode_caching_page; 8393 uchar_t *header; 8394 size_t buflen; 8395 int hdrlen; 8396 int bd_len; 8397 int rval = 0; 8398 struct mode_header_grp2 *mhp; 8399 8400 ASSERT(un != NULL); 8401 8402 /* 8403 * Do a test unit ready, otherwise a mode sense may not work if this 8404 * is the first command sent to the device after boot. 8405 */ 8406 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8407 8408 if (un->un_f_cfg_is_atapi == TRUE) { 8409 hdrlen = MODE_HEADER_LENGTH_GRP2; 8410 } else { 8411 hdrlen = MODE_HEADER_LENGTH; 8412 } 8413 8414 /* 8415 * Allocate memory for the retrieved mode page and its headers. Set 8416 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8417 * we get all of the mode sense data otherwise, the mode select 8418 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8419 */ 8420 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8421 sizeof (struct mode_cache_scsi3); 8422 8423 header = kmem_zalloc(buflen, KM_SLEEP); 8424 8425 /* Get the information from the device. */ 8426 if (un->un_f_cfg_is_atapi == TRUE) { 8427 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8428 MODEPAGE_CACHING, SD_PATH_DIRECT); 8429 } else { 8430 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8431 MODEPAGE_CACHING, SD_PATH_DIRECT); 8432 } 8433 if (rval != 0) { 8434 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8435 "sd_cache_control: Mode Sense Failed\n"); 8436 kmem_free(header, buflen); 8437 return (rval); 8438 } 8439 8440 /* 8441 * Determine size of Block Descriptors in order to locate 8442 * the mode page data. ATAPI devices return 0, SCSI devices 8443 * should return MODE_BLK_DESC_LENGTH. 8444 */ 8445 if (un->un_f_cfg_is_atapi == TRUE) { 8446 mhp = (struct mode_header_grp2 *)header; 8447 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8448 } else { 8449 bd_len = ((struct mode_header *)header)->bdesc_length; 8450 } 8451 8452 if (bd_len > MODE_BLK_DESC_LENGTH) { 8453 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8454 "sd_cache_control: Mode Sense returned invalid " 8455 "block descriptor length\n"); 8456 kmem_free(header, buflen); 8457 return (EIO); 8458 } 8459 8460 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8461 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8462 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8463 " caching page code mismatch %d\n", 8464 mode_caching_page->mode_page.code); 8465 kmem_free(header, buflen); 8466 return (EIO); 8467 } 8468 8469 /* Check the relevant bits on successful mode sense. */ 8470 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8471 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8472 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8473 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8474 8475 size_t sbuflen; 8476 uchar_t save_pg; 8477 8478 /* 8479 * Construct select buffer length based on the 8480 * length of the sense data returned. 8481 */ 8482 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8483 sizeof (struct mode_page) + 8484 (int)mode_caching_page->mode_page.length; 8485 8486 /* 8487 * Set the caching bits as requested. 8488 */ 8489 if (rcd_flag == SD_CACHE_ENABLE) 8490 mode_caching_page->rcd = 0; 8491 else if (rcd_flag == SD_CACHE_DISABLE) 8492 mode_caching_page->rcd = 1; 8493 8494 if (wce_flag == SD_CACHE_ENABLE) 8495 mode_caching_page->wce = 1; 8496 else if (wce_flag == SD_CACHE_DISABLE) 8497 mode_caching_page->wce = 0; 8498 8499 /* 8500 * Save the page if the mode sense says the 8501 * drive supports it. 8502 */ 8503 save_pg = mode_caching_page->mode_page.ps ? 8504 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8505 8506 /* Clear reserved bits before mode select. */ 8507 mode_caching_page->mode_page.ps = 0; 8508 8509 /* 8510 * Clear out mode header for mode select. 8511 * The rest of the retrieved page will be reused. 8512 */ 8513 bzero(header, hdrlen); 8514 8515 if (un->un_f_cfg_is_atapi == TRUE) { 8516 mhp = (struct mode_header_grp2 *)header; 8517 mhp->bdesc_length_hi = bd_len >> 8; 8518 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8519 } else { 8520 ((struct mode_header *)header)->bdesc_length = bd_len; 8521 } 8522 8523 /* Issue mode select to change the cache settings */ 8524 if (un->un_f_cfg_is_atapi == TRUE) { 8525 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8526 sbuflen, save_pg, SD_PATH_DIRECT); 8527 } else { 8528 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8529 sbuflen, save_pg, SD_PATH_DIRECT); 8530 } 8531 } 8532 8533 kmem_free(header, buflen); 8534 return (rval); 8535 } 8536 8537 8538 /* 8539 * Function: sd_get_write_cache_enabled() 8540 * 8541 * Description: This routine is the driver entry point for determining if 8542 * write caching is enabled. It examines the WCE (write cache 8543 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8544 * 8545 * Arguments: un - driver soft state (unit) structure 8546 * is_enabled - pointer to int where write cache enabled state 8547 * is returned (non-zero -> write cache enabled) 8548 * 8549 * 8550 * Return Code: EIO 8551 * code returned by sd_send_scsi_MODE_SENSE 8552 * 8553 * Context: Kernel Thread 8554 * 8555 * NOTE: If ioctl is added to disable write cache, this sequence should 8556 * be followed so that no locking is required for accesses to 8557 * un->un_f_write_cache_enabled: 8558 * do mode select to clear wce 8559 * do synchronize cache to flush cache 8560 * set un->un_f_write_cache_enabled = FALSE 8561 * 8562 * Conversely, an ioctl to enable the write cache should be done 8563 * in this order: 8564 * set un->un_f_write_cache_enabled = TRUE 8565 * do mode select to set wce 8566 */ 8567 8568 static int 8569 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8570 { 8571 struct mode_caching *mode_caching_page; 8572 uchar_t *header; 8573 size_t buflen; 8574 int hdrlen; 8575 int bd_len; 8576 int rval = 0; 8577 8578 ASSERT(un != NULL); 8579 ASSERT(is_enabled != NULL); 8580 8581 /* in case of error, flag as enabled */ 8582 *is_enabled = TRUE; 8583 8584 /* 8585 * Do a test unit ready, otherwise a mode sense may not work if this 8586 * is the first command sent to the device after boot. 8587 */ 8588 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8589 8590 if (un->un_f_cfg_is_atapi == TRUE) { 8591 hdrlen = MODE_HEADER_LENGTH_GRP2; 8592 } else { 8593 hdrlen = MODE_HEADER_LENGTH; 8594 } 8595 8596 /* 8597 * Allocate memory for the retrieved mode page and its headers. Set 8598 * a pointer to the page itself. 8599 */ 8600 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8601 header = kmem_zalloc(buflen, KM_SLEEP); 8602 8603 /* Get the information from the device. */ 8604 if (un->un_f_cfg_is_atapi == TRUE) { 8605 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8606 MODEPAGE_CACHING, SD_PATH_DIRECT); 8607 } else { 8608 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8609 MODEPAGE_CACHING, SD_PATH_DIRECT); 8610 } 8611 if (rval != 0) { 8612 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8613 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8614 kmem_free(header, buflen); 8615 return (rval); 8616 } 8617 8618 /* 8619 * Determine size of Block Descriptors in order to locate 8620 * the mode page data. ATAPI devices return 0, SCSI devices 8621 * should return MODE_BLK_DESC_LENGTH. 8622 */ 8623 if (un->un_f_cfg_is_atapi == TRUE) { 8624 struct mode_header_grp2 *mhp; 8625 mhp = (struct mode_header_grp2 *)header; 8626 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8627 } else { 8628 bd_len = ((struct mode_header *)header)->bdesc_length; 8629 } 8630 8631 if (bd_len > MODE_BLK_DESC_LENGTH) { 8632 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8633 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8634 "block descriptor length\n"); 8635 kmem_free(header, buflen); 8636 return (EIO); 8637 } 8638 8639 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8640 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8641 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8642 " caching page code mismatch %d\n", 8643 mode_caching_page->mode_page.code); 8644 kmem_free(header, buflen); 8645 return (EIO); 8646 } 8647 *is_enabled = mode_caching_page->wce; 8648 8649 kmem_free(header, buflen); 8650 return (0); 8651 } 8652 8653 /* 8654 * Function: sd_get_nv_sup() 8655 * 8656 * Description: This routine is the driver entry point for 8657 * determining whether non-volatile cache is supported. This 8658 * determination process works as follows: 8659 * 8660 * 1. sd first queries sd.conf on whether 8661 * suppress_cache_flush bit is set for this device. 8662 * 8663 * 2. if not there, then queries the internal disk table. 8664 * 8665 * 3. if either sd.conf or internal disk table specifies 8666 * cache flush be suppressed, we don't bother checking 8667 * NV_SUP bit. 8668 * 8669 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8670 * the optional INQUIRY VPD page 0x86. If the device 8671 * supports VPD page 0x86, sd examines the NV_SUP 8672 * (non-volatile cache support) bit in the INQUIRY VPD page 8673 * 0x86: 8674 * o If NV_SUP bit is set, sd assumes the device has a 8675 * non-volatile cache and set the 8676 * un_f_sync_nv_supported to TRUE. 8677 * o Otherwise cache is not non-volatile, 8678 * un_f_sync_nv_supported is set to FALSE. 8679 * 8680 * Arguments: un - driver soft state (unit) structure 8681 * 8682 * Return Code: 8683 * 8684 * Context: Kernel Thread 8685 */ 8686 8687 static void 8688 sd_get_nv_sup(struct sd_lun *un) 8689 { 8690 int rval = 0; 8691 uchar_t *inq86 = NULL; 8692 size_t inq86_len = MAX_INQUIRY_SIZE; 8693 size_t inq86_resid = 0; 8694 struct dk_callback *dkc; 8695 8696 ASSERT(un != NULL); 8697 8698 mutex_enter(SD_MUTEX(un)); 8699 8700 /* 8701 * Be conservative on the device's support of 8702 * SYNC_NV bit: un_f_sync_nv_supported is 8703 * initialized to be false. 8704 */ 8705 un->un_f_sync_nv_supported = FALSE; 8706 8707 /* 8708 * If either sd.conf or internal disk table 8709 * specifies cache flush be suppressed, then 8710 * we don't bother checking NV_SUP bit. 8711 */ 8712 if (un->un_f_suppress_cache_flush == TRUE) { 8713 mutex_exit(SD_MUTEX(un)); 8714 return; 8715 } 8716 8717 if (sd_check_vpd_page_support(un) == 0 && 8718 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8719 mutex_exit(SD_MUTEX(un)); 8720 /* collect page 86 data if available */ 8721 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8722 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8723 0x01, 0x86, &inq86_resid); 8724 8725 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8726 SD_TRACE(SD_LOG_COMMON, un, 8727 "sd_get_nv_sup: \ 8728 successfully get VPD page: %x \ 8729 PAGE LENGTH: %x BYTE 6: %x\n", 8730 inq86[1], inq86[3], inq86[6]); 8731 8732 mutex_enter(SD_MUTEX(un)); 8733 /* 8734 * check the value of NV_SUP bit: only if the device 8735 * reports NV_SUP bit to be 1, the 8736 * un_f_sync_nv_supported bit will be set to true. 8737 */ 8738 if (inq86[6] & SD_VPD_NV_SUP) { 8739 un->un_f_sync_nv_supported = TRUE; 8740 } 8741 mutex_exit(SD_MUTEX(un)); 8742 } 8743 kmem_free(inq86, inq86_len); 8744 } else { 8745 mutex_exit(SD_MUTEX(un)); 8746 } 8747 8748 /* 8749 * Send a SYNC CACHE command to check whether 8750 * SYNC_NV bit is supported. This command should have 8751 * un_f_sync_nv_supported set to correct value. 8752 */ 8753 mutex_enter(SD_MUTEX(un)); 8754 if (un->un_f_sync_nv_supported) { 8755 mutex_exit(SD_MUTEX(un)); 8756 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8757 dkc->dkc_flag = FLUSH_VOLATILE; 8758 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8759 8760 /* 8761 * Send a TEST UNIT READY command to the device. This should 8762 * clear any outstanding UNIT ATTENTION that may be present. 8763 */ 8764 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8765 8766 kmem_free(dkc, sizeof (struct dk_callback)); 8767 } else { 8768 mutex_exit(SD_MUTEX(un)); 8769 } 8770 8771 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8772 un_f_suppress_cache_flush is set to %d\n", 8773 un->un_f_suppress_cache_flush); 8774 } 8775 8776 /* 8777 * Function: sd_make_device 8778 * 8779 * Description: Utility routine to return the Solaris device number from 8780 * the data in the device's dev_info structure. 8781 * 8782 * Return Code: The Solaris device number 8783 * 8784 * Context: Any 8785 */ 8786 8787 static dev_t 8788 sd_make_device(dev_info_t *devi) 8789 { 8790 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8791 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8792 } 8793 8794 8795 /* 8796 * Function: sd_pm_entry 8797 * 8798 * Description: Called at the start of a new command to manage power 8799 * and busy status of a device. This includes determining whether 8800 * the current power state of the device is sufficient for 8801 * performing the command or whether it must be changed. 8802 * The PM framework is notified appropriately. 8803 * Only with a return status of DDI_SUCCESS will the 8804 * component be busy to the framework. 8805 * 8806 * All callers of sd_pm_entry must check the return status 8807 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8808 * of DDI_FAILURE indicates the device failed to power up. 8809 * In this case un_pm_count has been adjusted so the result 8810 * on exit is still powered down, ie. count is less than 0. 8811 * Calling sd_pm_exit with this count value hits an ASSERT. 8812 * 8813 * Return Code: DDI_SUCCESS or DDI_FAILURE 8814 * 8815 * Context: Kernel thread context. 8816 */ 8817 8818 static int 8819 sd_pm_entry(struct sd_lun *un) 8820 { 8821 int return_status = DDI_SUCCESS; 8822 8823 ASSERT(!mutex_owned(SD_MUTEX(un))); 8824 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8825 8826 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8827 8828 if (un->un_f_pm_is_enabled == FALSE) { 8829 SD_TRACE(SD_LOG_IO_PM, un, 8830 "sd_pm_entry: exiting, PM not enabled\n"); 8831 return (return_status); 8832 } 8833 8834 /* 8835 * Just increment a counter if PM is enabled. On the transition from 8836 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8837 * the count with each IO and mark the device as idle when the count 8838 * hits 0. 8839 * 8840 * If the count is less than 0 the device is powered down. If a powered 8841 * down device is successfully powered up then the count must be 8842 * incremented to reflect the power up. Note that it'll get incremented 8843 * a second time to become busy. 8844 * 8845 * Because the following has the potential to change the device state 8846 * and must release the un_pm_mutex to do so, only one thread can be 8847 * allowed through at a time. 8848 */ 8849 8850 mutex_enter(&un->un_pm_mutex); 8851 while (un->un_pm_busy == TRUE) { 8852 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8853 } 8854 un->un_pm_busy = TRUE; 8855 8856 if (un->un_pm_count < 1) { 8857 8858 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8859 8860 /* 8861 * Indicate we are now busy so the framework won't attempt to 8862 * power down the device. This call will only fail if either 8863 * we passed a bad component number or the device has no 8864 * components. Neither of these should ever happen. 8865 */ 8866 mutex_exit(&un->un_pm_mutex); 8867 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8868 ASSERT(return_status == DDI_SUCCESS); 8869 8870 mutex_enter(&un->un_pm_mutex); 8871 8872 if (un->un_pm_count < 0) { 8873 mutex_exit(&un->un_pm_mutex); 8874 8875 SD_TRACE(SD_LOG_IO_PM, un, 8876 "sd_pm_entry: power up component\n"); 8877 8878 /* 8879 * pm_raise_power will cause sdpower to be called 8880 * which brings the device power level to the 8881 * desired state, ON in this case. If successful, 8882 * un_pm_count and un_power_level will be updated 8883 * appropriately. 8884 */ 8885 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8886 SD_SPINDLE_ON); 8887 8888 mutex_enter(&un->un_pm_mutex); 8889 8890 if (return_status != DDI_SUCCESS) { 8891 /* 8892 * Power up failed. 8893 * Idle the device and adjust the count 8894 * so the result on exit is that we're 8895 * still powered down, ie. count is less than 0. 8896 */ 8897 SD_TRACE(SD_LOG_IO_PM, un, 8898 "sd_pm_entry: power up failed," 8899 " idle the component\n"); 8900 8901 (void) pm_idle_component(SD_DEVINFO(un), 0); 8902 un->un_pm_count--; 8903 } else { 8904 /* 8905 * Device is powered up, verify the 8906 * count is non-negative. 8907 * This is debug only. 8908 */ 8909 ASSERT(un->un_pm_count == 0); 8910 } 8911 } 8912 8913 if (return_status == DDI_SUCCESS) { 8914 /* 8915 * For performance, now that the device has been tagged 8916 * as busy, and it's known to be powered up, update the 8917 * chain types to use jump tables that do not include 8918 * pm. This significantly lowers the overhead and 8919 * therefore improves performance. 8920 */ 8921 8922 mutex_exit(&un->un_pm_mutex); 8923 mutex_enter(SD_MUTEX(un)); 8924 SD_TRACE(SD_LOG_IO_PM, un, 8925 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8926 un->un_uscsi_chain_type); 8927 8928 if (un->un_f_non_devbsize_supported) { 8929 un->un_buf_chain_type = 8930 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8931 } else { 8932 un->un_buf_chain_type = 8933 SD_CHAIN_INFO_DISK_NO_PM; 8934 } 8935 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8936 8937 SD_TRACE(SD_LOG_IO_PM, un, 8938 " changed uscsi_chain_type to %d\n", 8939 un->un_uscsi_chain_type); 8940 mutex_exit(SD_MUTEX(un)); 8941 mutex_enter(&un->un_pm_mutex); 8942 8943 if (un->un_pm_idle_timeid == NULL) { 8944 /* 300 ms. */ 8945 un->un_pm_idle_timeid = 8946 timeout(sd_pm_idletimeout_handler, un, 8947 (drv_usectohz((clock_t)300000))); 8948 /* 8949 * Include an extra call to busy which keeps the 8950 * device busy with-respect-to the PM layer 8951 * until the timer fires, at which time it'll 8952 * get the extra idle call. 8953 */ 8954 (void) pm_busy_component(SD_DEVINFO(un), 0); 8955 } 8956 } 8957 } 8958 un->un_pm_busy = FALSE; 8959 /* Next... */ 8960 cv_signal(&un->un_pm_busy_cv); 8961 8962 un->un_pm_count++; 8963 8964 SD_TRACE(SD_LOG_IO_PM, un, 8965 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8966 8967 mutex_exit(&un->un_pm_mutex); 8968 8969 return (return_status); 8970 } 8971 8972 8973 /* 8974 * Function: sd_pm_exit 8975 * 8976 * Description: Called at the completion of a command to manage busy 8977 * status for the device. If the device becomes idle the 8978 * PM framework is notified. 8979 * 8980 * Context: Kernel thread context 8981 */ 8982 8983 static void 8984 sd_pm_exit(struct sd_lun *un) 8985 { 8986 ASSERT(!mutex_owned(SD_MUTEX(un))); 8987 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8988 8989 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8990 8991 /* 8992 * After attach the following flag is only read, so don't 8993 * take the penalty of acquiring a mutex for it. 8994 */ 8995 if (un->un_f_pm_is_enabled == TRUE) { 8996 8997 mutex_enter(&un->un_pm_mutex); 8998 un->un_pm_count--; 8999 9000 SD_TRACE(SD_LOG_IO_PM, un, 9001 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9002 9003 ASSERT(un->un_pm_count >= 0); 9004 if (un->un_pm_count == 0) { 9005 mutex_exit(&un->un_pm_mutex); 9006 9007 SD_TRACE(SD_LOG_IO_PM, un, 9008 "sd_pm_exit: idle component\n"); 9009 9010 (void) pm_idle_component(SD_DEVINFO(un), 0); 9011 9012 } else { 9013 mutex_exit(&un->un_pm_mutex); 9014 } 9015 } 9016 9017 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9018 } 9019 9020 9021 /* 9022 * Function: sdopen 9023 * 9024 * Description: Driver's open(9e) entry point function. 9025 * 9026 * Arguments: dev_i - pointer to device number 9027 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9028 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9029 * cred_p - user credential pointer 9030 * 9031 * Return Code: EINVAL 9032 * ENXIO 9033 * EIO 9034 * EROFS 9035 * EBUSY 9036 * 9037 * Context: Kernel thread context 9038 */ 9039 /* ARGSUSED */ 9040 static int 9041 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9042 { 9043 struct sd_lun *un; 9044 int nodelay; 9045 int part; 9046 uint64_t partmask; 9047 int instance; 9048 dev_t dev; 9049 int rval = EIO; 9050 diskaddr_t nblks = 0; 9051 diskaddr_t label_cap; 9052 9053 /* Validate the open type */ 9054 if (otyp >= OTYPCNT) { 9055 return (EINVAL); 9056 } 9057 9058 dev = *dev_p; 9059 instance = SDUNIT(dev); 9060 mutex_enter(&sd_detach_mutex); 9061 9062 /* 9063 * Fail the open if there is no softstate for the instance, or 9064 * if another thread somewhere is trying to detach the instance. 9065 */ 9066 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9067 (un->un_detach_count != 0)) { 9068 mutex_exit(&sd_detach_mutex); 9069 /* 9070 * The probe cache only needs to be cleared when open (9e) fails 9071 * with ENXIO (4238046). 9072 */ 9073 /* 9074 * un-conditionally clearing probe cache is ok with 9075 * separate sd/ssd binaries 9076 * x86 platform can be an issue with both parallel 9077 * and fibre in 1 binary 9078 */ 9079 sd_scsi_clear_probe_cache(); 9080 return (ENXIO); 9081 } 9082 9083 /* 9084 * The un_layer_count is to prevent another thread in specfs from 9085 * trying to detach the instance, which can happen when we are 9086 * called from a higher-layer driver instead of thru specfs. 9087 * This will not be needed when DDI provides a layered driver 9088 * interface that allows specfs to know that an instance is in 9089 * use by a layered driver & should not be detached. 9090 * 9091 * Note: the semantics for layered driver opens are exactly one 9092 * close for every open. 9093 */ 9094 if (otyp == OTYP_LYR) { 9095 un->un_layer_count++; 9096 } 9097 9098 /* 9099 * Keep a count of the current # of opens in progress. This is because 9100 * some layered drivers try to call us as a regular open. This can 9101 * cause problems that we cannot prevent, however by keeping this count 9102 * we can at least keep our open and detach routines from racing against 9103 * each other under such conditions. 9104 */ 9105 un->un_opens_in_progress++; 9106 mutex_exit(&sd_detach_mutex); 9107 9108 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9109 part = SDPART(dev); 9110 partmask = 1 << part; 9111 9112 /* 9113 * We use a semaphore here in order to serialize 9114 * open and close requests on the device. 9115 */ 9116 sema_p(&un->un_semoclose); 9117 9118 mutex_enter(SD_MUTEX(un)); 9119 9120 /* 9121 * All device accesses go thru sdstrategy() where we check 9122 * on suspend status but there could be a scsi_poll command, 9123 * which bypasses sdstrategy(), so we need to check pm 9124 * status. 9125 */ 9126 9127 if (!nodelay) { 9128 while ((un->un_state == SD_STATE_SUSPENDED) || 9129 (un->un_state == SD_STATE_PM_CHANGING)) { 9130 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9131 } 9132 9133 mutex_exit(SD_MUTEX(un)); 9134 if (sd_pm_entry(un) != DDI_SUCCESS) { 9135 rval = EIO; 9136 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9137 "sdopen: sd_pm_entry failed\n"); 9138 goto open_failed_with_pm; 9139 } 9140 mutex_enter(SD_MUTEX(un)); 9141 } 9142 9143 /* check for previous exclusive open */ 9144 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9145 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9146 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9147 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9148 9149 if (un->un_exclopen & (partmask)) { 9150 goto excl_open_fail; 9151 } 9152 9153 if (flag & FEXCL) { 9154 int i; 9155 if (un->un_ocmap.lyropen[part]) { 9156 goto excl_open_fail; 9157 } 9158 for (i = 0; i < (OTYPCNT - 1); i++) { 9159 if (un->un_ocmap.regopen[i] & (partmask)) { 9160 goto excl_open_fail; 9161 } 9162 } 9163 } 9164 9165 /* 9166 * Check the write permission if this is a removable media device, 9167 * NDELAY has not been set, and writable permission is requested. 9168 * 9169 * Note: If NDELAY was set and this is write-protected media the WRITE 9170 * attempt will fail with EIO as part of the I/O processing. This is a 9171 * more permissive implementation that allows the open to succeed and 9172 * WRITE attempts to fail when appropriate. 9173 */ 9174 if (un->un_f_chk_wp_open) { 9175 if ((flag & FWRITE) && (!nodelay)) { 9176 mutex_exit(SD_MUTEX(un)); 9177 /* 9178 * Defer the check for write permission on writable 9179 * DVD drive till sdstrategy and will not fail open even 9180 * if FWRITE is set as the device can be writable 9181 * depending upon the media and the media can change 9182 * after the call to open(). 9183 */ 9184 if (un->un_f_dvdram_writable_device == FALSE) { 9185 if (ISCD(un) || sr_check_wp(dev)) { 9186 rval = EROFS; 9187 mutex_enter(SD_MUTEX(un)); 9188 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9189 "write to cd or write protected media\n"); 9190 goto open_fail; 9191 } 9192 } 9193 mutex_enter(SD_MUTEX(un)); 9194 } 9195 } 9196 9197 /* 9198 * If opening in NDELAY/NONBLOCK mode, just return. 9199 * Check if disk is ready and has a valid geometry later. 9200 */ 9201 if (!nodelay) { 9202 mutex_exit(SD_MUTEX(un)); 9203 rval = sd_ready_and_valid(un); 9204 mutex_enter(SD_MUTEX(un)); 9205 /* 9206 * Fail if device is not ready or if the number of disk 9207 * blocks is zero or negative for non CD devices. 9208 */ 9209 9210 nblks = 0; 9211 9212 if (rval == SD_READY_VALID && (!ISCD(un))) { 9213 /* if cmlb_partinfo fails, nblks remains 0 */ 9214 mutex_exit(SD_MUTEX(un)); 9215 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9216 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9217 mutex_enter(SD_MUTEX(un)); 9218 } 9219 9220 if ((rval != SD_READY_VALID) || 9221 (!ISCD(un) && nblks <= 0)) { 9222 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9223 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9224 "device not ready or invalid disk block value\n"); 9225 goto open_fail; 9226 } 9227 #if defined(__i386) || defined(__amd64) 9228 } else { 9229 uchar_t *cp; 9230 /* 9231 * x86 requires special nodelay handling, so that p0 is 9232 * always defined and accessible. 9233 * Invalidate geometry only if device is not already open. 9234 */ 9235 cp = &un->un_ocmap.chkd[0]; 9236 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9237 if (*cp != (uchar_t)0) { 9238 break; 9239 } 9240 cp++; 9241 } 9242 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9243 mutex_exit(SD_MUTEX(un)); 9244 cmlb_invalidate(un->un_cmlbhandle, 9245 (void *)SD_PATH_DIRECT); 9246 mutex_enter(SD_MUTEX(un)); 9247 } 9248 9249 #endif 9250 } 9251 9252 if (otyp == OTYP_LYR) { 9253 un->un_ocmap.lyropen[part]++; 9254 } else { 9255 un->un_ocmap.regopen[otyp] |= partmask; 9256 } 9257 9258 /* Set up open and exclusive open flags */ 9259 if (flag & FEXCL) { 9260 un->un_exclopen |= (partmask); 9261 } 9262 9263 /* 9264 * If the lun is EFI labeled and lun capacity is greater than the 9265 * capacity contained in the label, log a sys-event to notify the 9266 * interested module. 9267 * To avoid an infinite loop of logging sys-event, we only log the 9268 * event when the lun is not opened in NDELAY mode. The event handler 9269 * should open the lun in NDELAY mode. 9270 */ 9271 if (!(flag & FNDELAY)) { 9272 mutex_exit(SD_MUTEX(un)); 9273 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9274 (void*)SD_PATH_DIRECT) == 0) { 9275 mutex_enter(SD_MUTEX(un)); 9276 if (un->un_f_blockcount_is_valid && 9277 un->un_blockcount > label_cap) { 9278 mutex_exit(SD_MUTEX(un)); 9279 sd_log_lun_expansion_event(un, 9280 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9281 mutex_enter(SD_MUTEX(un)); 9282 } 9283 } else { 9284 mutex_enter(SD_MUTEX(un)); 9285 } 9286 } 9287 9288 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9289 "open of part %d type %d\n", part, otyp); 9290 9291 mutex_exit(SD_MUTEX(un)); 9292 if (!nodelay) { 9293 sd_pm_exit(un); 9294 } 9295 9296 sema_v(&un->un_semoclose); 9297 9298 mutex_enter(&sd_detach_mutex); 9299 un->un_opens_in_progress--; 9300 mutex_exit(&sd_detach_mutex); 9301 9302 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9303 return (DDI_SUCCESS); 9304 9305 excl_open_fail: 9306 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9307 rval = EBUSY; 9308 9309 open_fail: 9310 mutex_exit(SD_MUTEX(un)); 9311 9312 /* 9313 * On a failed open we must exit the pm management. 9314 */ 9315 if (!nodelay) { 9316 sd_pm_exit(un); 9317 } 9318 open_failed_with_pm: 9319 sema_v(&un->un_semoclose); 9320 9321 mutex_enter(&sd_detach_mutex); 9322 un->un_opens_in_progress--; 9323 if (otyp == OTYP_LYR) { 9324 un->un_layer_count--; 9325 } 9326 mutex_exit(&sd_detach_mutex); 9327 9328 return (rval); 9329 } 9330 9331 9332 /* 9333 * Function: sdclose 9334 * 9335 * Description: Driver's close(9e) entry point function. 9336 * 9337 * Arguments: dev - device number 9338 * flag - file status flag, informational only 9339 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9340 * cred_p - user credential pointer 9341 * 9342 * Return Code: ENXIO 9343 * 9344 * Context: Kernel thread context 9345 */ 9346 /* ARGSUSED */ 9347 static int 9348 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9349 { 9350 struct sd_lun *un; 9351 uchar_t *cp; 9352 int part; 9353 int nodelay; 9354 int rval = 0; 9355 9356 /* Validate the open type */ 9357 if (otyp >= OTYPCNT) { 9358 return (ENXIO); 9359 } 9360 9361 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9362 return (ENXIO); 9363 } 9364 9365 part = SDPART(dev); 9366 nodelay = flag & (FNDELAY | FNONBLOCK); 9367 9368 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9369 "sdclose: close of part %d type %d\n", part, otyp); 9370 9371 /* 9372 * We use a semaphore here in order to serialize 9373 * open and close requests on the device. 9374 */ 9375 sema_p(&un->un_semoclose); 9376 9377 mutex_enter(SD_MUTEX(un)); 9378 9379 /* Don't proceed if power is being changed. */ 9380 while (un->un_state == SD_STATE_PM_CHANGING) { 9381 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9382 } 9383 9384 if (un->un_exclopen & (1 << part)) { 9385 un->un_exclopen &= ~(1 << part); 9386 } 9387 9388 /* Update the open partition map */ 9389 if (otyp == OTYP_LYR) { 9390 un->un_ocmap.lyropen[part] -= 1; 9391 } else { 9392 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9393 } 9394 9395 cp = &un->un_ocmap.chkd[0]; 9396 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9397 if (*cp != NULL) { 9398 break; 9399 } 9400 cp++; 9401 } 9402 9403 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9404 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9405 9406 /* 9407 * We avoid persistance upon the last close, and set 9408 * the throttle back to the maximum. 9409 */ 9410 un->un_throttle = un->un_saved_throttle; 9411 9412 if (un->un_state == SD_STATE_OFFLINE) { 9413 if (un->un_f_is_fibre == FALSE) { 9414 scsi_log(SD_DEVINFO(un), sd_label, 9415 CE_WARN, "offline\n"); 9416 } 9417 mutex_exit(SD_MUTEX(un)); 9418 cmlb_invalidate(un->un_cmlbhandle, 9419 (void *)SD_PATH_DIRECT); 9420 mutex_enter(SD_MUTEX(un)); 9421 9422 } else { 9423 /* 9424 * Flush any outstanding writes in NVRAM cache. 9425 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9426 * cmd, it may not work for non-Pluto devices. 9427 * SYNCHRONIZE CACHE is not required for removables, 9428 * except DVD-RAM drives. 9429 * 9430 * Also note: because SYNCHRONIZE CACHE is currently 9431 * the only command issued here that requires the 9432 * drive be powered up, only do the power up before 9433 * sending the Sync Cache command. If additional 9434 * commands are added which require a powered up 9435 * drive, the following sequence may have to change. 9436 * 9437 * And finally, note that parallel SCSI on SPARC 9438 * only issues a Sync Cache to DVD-RAM, a newly 9439 * supported device. 9440 */ 9441 #if defined(__i386) || defined(__amd64) 9442 if (un->un_f_sync_cache_supported || 9443 un->un_f_dvdram_writable_device == TRUE) { 9444 #else 9445 if (un->un_f_dvdram_writable_device == TRUE) { 9446 #endif 9447 mutex_exit(SD_MUTEX(un)); 9448 if (sd_pm_entry(un) == DDI_SUCCESS) { 9449 rval = 9450 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9451 NULL); 9452 /* ignore error if not supported */ 9453 if (rval == ENOTSUP) { 9454 rval = 0; 9455 } else if (rval != 0) { 9456 rval = EIO; 9457 } 9458 sd_pm_exit(un); 9459 } else { 9460 rval = EIO; 9461 } 9462 mutex_enter(SD_MUTEX(un)); 9463 } 9464 9465 /* 9466 * For devices which supports DOOR_LOCK, send an ALLOW 9467 * MEDIA REMOVAL command, but don't get upset if it 9468 * fails. We need to raise the power of the drive before 9469 * we can call sd_send_scsi_DOORLOCK() 9470 */ 9471 if (un->un_f_doorlock_supported) { 9472 mutex_exit(SD_MUTEX(un)); 9473 if (sd_pm_entry(un) == DDI_SUCCESS) { 9474 rval = sd_send_scsi_DOORLOCK(un, 9475 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9476 9477 sd_pm_exit(un); 9478 if (ISCD(un) && (rval != 0) && 9479 (nodelay != 0)) { 9480 rval = ENXIO; 9481 } 9482 } else { 9483 rval = EIO; 9484 } 9485 mutex_enter(SD_MUTEX(un)); 9486 } 9487 9488 /* 9489 * If a device has removable media, invalidate all 9490 * parameters related to media, such as geometry, 9491 * blocksize, and blockcount. 9492 */ 9493 if (un->un_f_has_removable_media) { 9494 sr_ejected(un); 9495 } 9496 9497 /* 9498 * Destroy the cache (if it exists) which was 9499 * allocated for the write maps since this is 9500 * the last close for this media. 9501 */ 9502 if (un->un_wm_cache) { 9503 /* 9504 * Check if there are pending commands. 9505 * and if there are give a warning and 9506 * do not destroy the cache. 9507 */ 9508 if (un->un_ncmds_in_driver > 0) { 9509 scsi_log(SD_DEVINFO(un), 9510 sd_label, CE_WARN, 9511 "Unable to clean up memory " 9512 "because of pending I/O\n"); 9513 } else { 9514 kmem_cache_destroy( 9515 un->un_wm_cache); 9516 un->un_wm_cache = NULL; 9517 } 9518 } 9519 } 9520 } 9521 9522 mutex_exit(SD_MUTEX(un)); 9523 sema_v(&un->un_semoclose); 9524 9525 if (otyp == OTYP_LYR) { 9526 mutex_enter(&sd_detach_mutex); 9527 /* 9528 * The detach routine may run when the layer count 9529 * drops to zero. 9530 */ 9531 un->un_layer_count--; 9532 mutex_exit(&sd_detach_mutex); 9533 } 9534 9535 return (rval); 9536 } 9537 9538 9539 /* 9540 * Function: sd_ready_and_valid 9541 * 9542 * Description: Test if device is ready and has a valid geometry. 9543 * 9544 * Arguments: dev - device number 9545 * un - driver soft state (unit) structure 9546 * 9547 * Return Code: SD_READY_VALID ready and valid label 9548 * SD_NOT_READY_VALID not ready, no label 9549 * SD_RESERVED_BY_OTHERS reservation conflict 9550 * 9551 * Context: Never called at interrupt context. 9552 */ 9553 9554 static int 9555 sd_ready_and_valid(struct sd_lun *un) 9556 { 9557 struct sd_errstats *stp; 9558 uint64_t capacity; 9559 uint_t lbasize; 9560 int rval = SD_READY_VALID; 9561 char name_str[48]; 9562 int is_valid; 9563 9564 ASSERT(un != NULL); 9565 ASSERT(!mutex_owned(SD_MUTEX(un))); 9566 9567 mutex_enter(SD_MUTEX(un)); 9568 /* 9569 * If a device has removable media, we must check if media is 9570 * ready when checking if this device is ready and valid. 9571 */ 9572 if (un->un_f_has_removable_media) { 9573 mutex_exit(SD_MUTEX(un)); 9574 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9575 rval = SD_NOT_READY_VALID; 9576 mutex_enter(SD_MUTEX(un)); 9577 goto done; 9578 } 9579 9580 is_valid = SD_IS_VALID_LABEL(un); 9581 mutex_enter(SD_MUTEX(un)); 9582 if (!is_valid || 9583 (un->un_f_blockcount_is_valid == FALSE) || 9584 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9585 9586 /* capacity has to be read every open. */ 9587 mutex_exit(SD_MUTEX(un)); 9588 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9589 &lbasize, SD_PATH_DIRECT) != 0) { 9590 cmlb_invalidate(un->un_cmlbhandle, 9591 (void *)SD_PATH_DIRECT); 9592 mutex_enter(SD_MUTEX(un)); 9593 rval = SD_NOT_READY_VALID; 9594 goto done; 9595 } else { 9596 mutex_enter(SD_MUTEX(un)); 9597 sd_update_block_info(un, lbasize, capacity); 9598 } 9599 } 9600 9601 /* 9602 * Check if the media in the device is writable or not. 9603 */ 9604 if (!is_valid && ISCD(un)) { 9605 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9606 } 9607 9608 } else { 9609 /* 9610 * Do a test unit ready to clear any unit attention from non-cd 9611 * devices. 9612 */ 9613 mutex_exit(SD_MUTEX(un)); 9614 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9615 mutex_enter(SD_MUTEX(un)); 9616 } 9617 9618 9619 /* 9620 * If this is a non 512 block device, allocate space for 9621 * the wmap cache. This is being done here since every time 9622 * a media is changed this routine will be called and the 9623 * block size is a function of media rather than device. 9624 */ 9625 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9626 if (!(un->un_wm_cache)) { 9627 (void) snprintf(name_str, sizeof (name_str), 9628 "%s%d_cache", 9629 ddi_driver_name(SD_DEVINFO(un)), 9630 ddi_get_instance(SD_DEVINFO(un))); 9631 un->un_wm_cache = kmem_cache_create( 9632 name_str, sizeof (struct sd_w_map), 9633 8, sd_wm_cache_constructor, 9634 sd_wm_cache_destructor, NULL, 9635 (void *)un, NULL, 0); 9636 if (!(un->un_wm_cache)) { 9637 rval = ENOMEM; 9638 goto done; 9639 } 9640 } 9641 } 9642 9643 if (un->un_state == SD_STATE_NORMAL) { 9644 /* 9645 * If the target is not yet ready here (defined by a TUR 9646 * failure), invalidate the geometry and print an 'offline' 9647 * message. This is a legacy message, as the state of the 9648 * target is not actually changed to SD_STATE_OFFLINE. 9649 * 9650 * If the TUR fails for EACCES (Reservation Conflict), 9651 * SD_RESERVED_BY_OTHERS will be returned to indicate 9652 * reservation conflict. If the TUR fails for other 9653 * reasons, SD_NOT_READY_VALID will be returned. 9654 */ 9655 int err; 9656 9657 mutex_exit(SD_MUTEX(un)); 9658 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9659 mutex_enter(SD_MUTEX(un)); 9660 9661 if (err != 0) { 9662 mutex_exit(SD_MUTEX(un)); 9663 cmlb_invalidate(un->un_cmlbhandle, 9664 (void *)SD_PATH_DIRECT); 9665 mutex_enter(SD_MUTEX(un)); 9666 if (err == EACCES) { 9667 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9668 "reservation conflict\n"); 9669 rval = SD_RESERVED_BY_OTHERS; 9670 } else { 9671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9672 "drive offline\n"); 9673 rval = SD_NOT_READY_VALID; 9674 } 9675 goto done; 9676 } 9677 } 9678 9679 if (un->un_f_format_in_progress == FALSE) { 9680 mutex_exit(SD_MUTEX(un)); 9681 if (cmlb_validate(un->un_cmlbhandle, 0, 9682 (void *)SD_PATH_DIRECT) != 0) { 9683 rval = SD_NOT_READY_VALID; 9684 mutex_enter(SD_MUTEX(un)); 9685 goto done; 9686 } 9687 if (un->un_f_pkstats_enabled) { 9688 sd_set_pstats(un); 9689 SD_TRACE(SD_LOG_IO_PARTITION, un, 9690 "sd_ready_and_valid: un:0x%p pstats created and " 9691 "set\n", un); 9692 } 9693 mutex_enter(SD_MUTEX(un)); 9694 } 9695 9696 /* 9697 * If this device supports DOOR_LOCK command, try and send 9698 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9699 * if it fails. For a CD, however, it is an error 9700 */ 9701 if (un->un_f_doorlock_supported) { 9702 mutex_exit(SD_MUTEX(un)); 9703 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9704 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9705 rval = SD_NOT_READY_VALID; 9706 mutex_enter(SD_MUTEX(un)); 9707 goto done; 9708 } 9709 mutex_enter(SD_MUTEX(un)); 9710 } 9711 9712 /* The state has changed, inform the media watch routines */ 9713 un->un_mediastate = DKIO_INSERTED; 9714 cv_broadcast(&un->un_state_cv); 9715 rval = SD_READY_VALID; 9716 9717 done: 9718 9719 /* 9720 * Initialize the capacity kstat value, if no media previously 9721 * (capacity kstat is 0) and a media has been inserted 9722 * (un_blockcount > 0). 9723 */ 9724 if (un->un_errstats != NULL) { 9725 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9726 if ((stp->sd_capacity.value.ui64 == 0) && 9727 (un->un_f_blockcount_is_valid == TRUE)) { 9728 stp->sd_capacity.value.ui64 = 9729 (uint64_t)((uint64_t)un->un_blockcount * 9730 un->un_sys_blocksize); 9731 } 9732 } 9733 9734 mutex_exit(SD_MUTEX(un)); 9735 return (rval); 9736 } 9737 9738 9739 /* 9740 * Function: sdmin 9741 * 9742 * Description: Routine to limit the size of a data transfer. Used in 9743 * conjunction with physio(9F). 9744 * 9745 * Arguments: bp - pointer to the indicated buf(9S) struct. 9746 * 9747 * Context: Kernel thread context. 9748 */ 9749 9750 static void 9751 sdmin(struct buf *bp) 9752 { 9753 struct sd_lun *un; 9754 int instance; 9755 9756 instance = SDUNIT(bp->b_edev); 9757 9758 un = ddi_get_soft_state(sd_state, instance); 9759 ASSERT(un != NULL); 9760 9761 if (bp->b_bcount > un->un_max_xfer_size) { 9762 bp->b_bcount = un->un_max_xfer_size; 9763 } 9764 } 9765 9766 9767 /* 9768 * Function: sdread 9769 * 9770 * Description: Driver's read(9e) entry point function. 9771 * 9772 * Arguments: dev - device number 9773 * uio - structure pointer describing where data is to be stored 9774 * in user's space 9775 * cred_p - user credential pointer 9776 * 9777 * Return Code: ENXIO 9778 * EIO 9779 * EINVAL 9780 * value returned by physio 9781 * 9782 * Context: Kernel thread context. 9783 */ 9784 /* ARGSUSED */ 9785 static int 9786 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9787 { 9788 struct sd_lun *un = NULL; 9789 int secmask; 9790 int err; 9791 9792 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9793 return (ENXIO); 9794 } 9795 9796 ASSERT(!mutex_owned(SD_MUTEX(un))); 9797 9798 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9799 mutex_enter(SD_MUTEX(un)); 9800 /* 9801 * Because the call to sd_ready_and_valid will issue I/O we 9802 * must wait here if either the device is suspended or 9803 * if it's power level is changing. 9804 */ 9805 while ((un->un_state == SD_STATE_SUSPENDED) || 9806 (un->un_state == SD_STATE_PM_CHANGING)) { 9807 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9808 } 9809 un->un_ncmds_in_driver++; 9810 mutex_exit(SD_MUTEX(un)); 9811 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9812 mutex_enter(SD_MUTEX(un)); 9813 un->un_ncmds_in_driver--; 9814 ASSERT(un->un_ncmds_in_driver >= 0); 9815 mutex_exit(SD_MUTEX(un)); 9816 return (EIO); 9817 } 9818 mutex_enter(SD_MUTEX(un)); 9819 un->un_ncmds_in_driver--; 9820 ASSERT(un->un_ncmds_in_driver >= 0); 9821 mutex_exit(SD_MUTEX(un)); 9822 } 9823 9824 /* 9825 * Read requests are restricted to multiples of the system block size. 9826 */ 9827 secmask = un->un_sys_blocksize - 1; 9828 9829 if (uio->uio_loffset & ((offset_t)(secmask))) { 9830 SD_ERROR(SD_LOG_READ_WRITE, un, 9831 "sdread: file offset not modulo %d\n", 9832 un->un_sys_blocksize); 9833 err = EINVAL; 9834 } else if (uio->uio_iov->iov_len & (secmask)) { 9835 SD_ERROR(SD_LOG_READ_WRITE, un, 9836 "sdread: transfer length not modulo %d\n", 9837 un->un_sys_blocksize); 9838 err = EINVAL; 9839 } else { 9840 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9841 } 9842 return (err); 9843 } 9844 9845 9846 /* 9847 * Function: sdwrite 9848 * 9849 * Description: Driver's write(9e) entry point function. 9850 * 9851 * Arguments: dev - device number 9852 * uio - structure pointer describing where data is stored in 9853 * user's space 9854 * cred_p - user credential pointer 9855 * 9856 * Return Code: ENXIO 9857 * EIO 9858 * EINVAL 9859 * value returned by physio 9860 * 9861 * Context: Kernel thread context. 9862 */ 9863 /* ARGSUSED */ 9864 static int 9865 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9866 { 9867 struct sd_lun *un = NULL; 9868 int secmask; 9869 int err; 9870 9871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9872 return (ENXIO); 9873 } 9874 9875 ASSERT(!mutex_owned(SD_MUTEX(un))); 9876 9877 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9878 mutex_enter(SD_MUTEX(un)); 9879 /* 9880 * Because the call to sd_ready_and_valid will issue I/O we 9881 * must wait here if either the device is suspended or 9882 * if it's power level is changing. 9883 */ 9884 while ((un->un_state == SD_STATE_SUSPENDED) || 9885 (un->un_state == SD_STATE_PM_CHANGING)) { 9886 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9887 } 9888 un->un_ncmds_in_driver++; 9889 mutex_exit(SD_MUTEX(un)); 9890 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9891 mutex_enter(SD_MUTEX(un)); 9892 un->un_ncmds_in_driver--; 9893 ASSERT(un->un_ncmds_in_driver >= 0); 9894 mutex_exit(SD_MUTEX(un)); 9895 return (EIO); 9896 } 9897 mutex_enter(SD_MUTEX(un)); 9898 un->un_ncmds_in_driver--; 9899 ASSERT(un->un_ncmds_in_driver >= 0); 9900 mutex_exit(SD_MUTEX(un)); 9901 } 9902 9903 /* 9904 * Write requests are restricted to multiples of the system block size. 9905 */ 9906 secmask = un->un_sys_blocksize - 1; 9907 9908 if (uio->uio_loffset & ((offset_t)(secmask))) { 9909 SD_ERROR(SD_LOG_READ_WRITE, un, 9910 "sdwrite: file offset not modulo %d\n", 9911 un->un_sys_blocksize); 9912 err = EINVAL; 9913 } else if (uio->uio_iov->iov_len & (secmask)) { 9914 SD_ERROR(SD_LOG_READ_WRITE, un, 9915 "sdwrite: transfer length not modulo %d\n", 9916 un->un_sys_blocksize); 9917 err = EINVAL; 9918 } else { 9919 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9920 } 9921 return (err); 9922 } 9923 9924 9925 /* 9926 * Function: sdaread 9927 * 9928 * Description: Driver's aread(9e) entry point function. 9929 * 9930 * Arguments: dev - device number 9931 * aio - structure pointer describing where data is to be stored 9932 * cred_p - user credential pointer 9933 * 9934 * Return Code: ENXIO 9935 * EIO 9936 * EINVAL 9937 * value returned by aphysio 9938 * 9939 * Context: Kernel thread context. 9940 */ 9941 /* ARGSUSED */ 9942 static int 9943 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9944 { 9945 struct sd_lun *un = NULL; 9946 struct uio *uio = aio->aio_uio; 9947 int secmask; 9948 int err; 9949 9950 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9951 return (ENXIO); 9952 } 9953 9954 ASSERT(!mutex_owned(SD_MUTEX(un))); 9955 9956 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9957 mutex_enter(SD_MUTEX(un)); 9958 /* 9959 * Because the call to sd_ready_and_valid will issue I/O we 9960 * must wait here if either the device is suspended or 9961 * if it's power level is changing. 9962 */ 9963 while ((un->un_state == SD_STATE_SUSPENDED) || 9964 (un->un_state == SD_STATE_PM_CHANGING)) { 9965 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9966 } 9967 un->un_ncmds_in_driver++; 9968 mutex_exit(SD_MUTEX(un)); 9969 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9970 mutex_enter(SD_MUTEX(un)); 9971 un->un_ncmds_in_driver--; 9972 ASSERT(un->un_ncmds_in_driver >= 0); 9973 mutex_exit(SD_MUTEX(un)); 9974 return (EIO); 9975 } 9976 mutex_enter(SD_MUTEX(un)); 9977 un->un_ncmds_in_driver--; 9978 ASSERT(un->un_ncmds_in_driver >= 0); 9979 mutex_exit(SD_MUTEX(un)); 9980 } 9981 9982 /* 9983 * Read requests are restricted to multiples of the system block size. 9984 */ 9985 secmask = un->un_sys_blocksize - 1; 9986 9987 if (uio->uio_loffset & ((offset_t)(secmask))) { 9988 SD_ERROR(SD_LOG_READ_WRITE, un, 9989 "sdaread: file offset not modulo %d\n", 9990 un->un_sys_blocksize); 9991 err = EINVAL; 9992 } else if (uio->uio_iov->iov_len & (secmask)) { 9993 SD_ERROR(SD_LOG_READ_WRITE, un, 9994 "sdaread: transfer length not modulo %d\n", 9995 un->un_sys_blocksize); 9996 err = EINVAL; 9997 } else { 9998 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9999 } 10000 return (err); 10001 } 10002 10003 10004 /* 10005 * Function: sdawrite 10006 * 10007 * Description: Driver's awrite(9e) entry point function. 10008 * 10009 * Arguments: dev - device number 10010 * aio - structure pointer describing where data is stored 10011 * cred_p - user credential pointer 10012 * 10013 * Return Code: ENXIO 10014 * EIO 10015 * EINVAL 10016 * value returned by aphysio 10017 * 10018 * Context: Kernel thread context. 10019 */ 10020 /* ARGSUSED */ 10021 static int 10022 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10023 { 10024 struct sd_lun *un = NULL; 10025 struct uio *uio = aio->aio_uio; 10026 int secmask; 10027 int err; 10028 10029 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10030 return (ENXIO); 10031 } 10032 10033 ASSERT(!mutex_owned(SD_MUTEX(un))); 10034 10035 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10036 mutex_enter(SD_MUTEX(un)); 10037 /* 10038 * Because the call to sd_ready_and_valid will issue I/O we 10039 * must wait here if either the device is suspended or 10040 * if it's power level is changing. 10041 */ 10042 while ((un->un_state == SD_STATE_SUSPENDED) || 10043 (un->un_state == SD_STATE_PM_CHANGING)) { 10044 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10045 } 10046 un->un_ncmds_in_driver++; 10047 mutex_exit(SD_MUTEX(un)); 10048 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10049 mutex_enter(SD_MUTEX(un)); 10050 un->un_ncmds_in_driver--; 10051 ASSERT(un->un_ncmds_in_driver >= 0); 10052 mutex_exit(SD_MUTEX(un)); 10053 return (EIO); 10054 } 10055 mutex_enter(SD_MUTEX(un)); 10056 un->un_ncmds_in_driver--; 10057 ASSERT(un->un_ncmds_in_driver >= 0); 10058 mutex_exit(SD_MUTEX(un)); 10059 } 10060 10061 /* 10062 * Write requests are restricted to multiples of the system block size. 10063 */ 10064 secmask = un->un_sys_blocksize - 1; 10065 10066 if (uio->uio_loffset & ((offset_t)(secmask))) { 10067 SD_ERROR(SD_LOG_READ_WRITE, un, 10068 "sdawrite: file offset not modulo %d\n", 10069 un->un_sys_blocksize); 10070 err = EINVAL; 10071 } else if (uio->uio_iov->iov_len & (secmask)) { 10072 SD_ERROR(SD_LOG_READ_WRITE, un, 10073 "sdawrite: transfer length not modulo %d\n", 10074 un->un_sys_blocksize); 10075 err = EINVAL; 10076 } else { 10077 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10078 } 10079 return (err); 10080 } 10081 10082 10083 10084 10085 10086 /* 10087 * Driver IO processing follows the following sequence: 10088 * 10089 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10090 * | | ^ 10091 * v v | 10092 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10093 * | | | | 10094 * v | | | 10095 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10096 * | | ^ ^ 10097 * v v | | 10098 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10099 * | | | | 10100 * +---+ | +------------+ +-------+ 10101 * | | | | 10102 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10103 * | v | | 10104 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10105 * | | ^ | 10106 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10107 * | v | | 10108 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10109 * | | ^ | 10110 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10111 * | v | | 10112 * | sd_checksum_iostart() sd_checksum_iodone() | 10113 * | | ^ | 10114 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10115 * | v | | 10116 * | sd_pm_iostart() sd_pm_iodone() | 10117 * | | ^ | 10118 * | | | | 10119 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10120 * | ^ 10121 * v | 10122 * sd_core_iostart() | 10123 * | | 10124 * | +------>(*destroypkt)() 10125 * +-> sd_start_cmds() <-+ | | 10126 * | | | v 10127 * | | | scsi_destroy_pkt(9F) 10128 * | | | 10129 * +->(*initpkt)() +- sdintr() 10130 * | | | | 10131 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10132 * | +-> scsi_setup_cdb(9F) | 10133 * | | 10134 * +--> scsi_transport(9F) | 10135 * | | 10136 * +----> SCSA ---->+ 10137 * 10138 * 10139 * This code is based upon the following presumptions: 10140 * 10141 * - iostart and iodone functions operate on buf(9S) structures. These 10142 * functions perform the necessary operations on the buf(9S) and pass 10143 * them along to the next function in the chain by using the macros 10144 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10145 * (for iodone side functions). 10146 * 10147 * - The iostart side functions may sleep. The iodone side functions 10148 * are called under interrupt context and may NOT sleep. Therefore 10149 * iodone side functions also may not call iostart side functions. 10150 * (NOTE: iostart side functions should NOT sleep for memory, as 10151 * this could result in deadlock.) 10152 * 10153 * - An iostart side function may call its corresponding iodone side 10154 * function directly (if necessary). 10155 * 10156 * - In the event of an error, an iostart side function can return a buf(9S) 10157 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10158 * b_error in the usual way of course). 10159 * 10160 * - The taskq mechanism may be used by the iodone side functions to dispatch 10161 * requests to the iostart side functions. The iostart side functions in 10162 * this case would be called under the context of a taskq thread, so it's 10163 * OK for them to block/sleep/spin in this case. 10164 * 10165 * - iostart side functions may allocate "shadow" buf(9S) structs and 10166 * pass them along to the next function in the chain. The corresponding 10167 * iodone side functions must coalesce the "shadow" bufs and return 10168 * the "original" buf to the next higher layer. 10169 * 10170 * - The b_private field of the buf(9S) struct holds a pointer to 10171 * an sd_xbuf struct, which contains information needed to 10172 * construct the scsi_pkt for the command. 10173 * 10174 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10175 * layer must acquire & release the SD_MUTEX(un) as needed. 10176 */ 10177 10178 10179 /* 10180 * Create taskq for all targets in the system. This is created at 10181 * _init(9E) and destroyed at _fini(9E). 10182 * 10183 * Note: here we set the minalloc to a reasonably high number to ensure that 10184 * we will have an adequate supply of task entries available at interrupt time. 10185 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10186 * sd_create_taskq(). Since we do not want to sleep for allocations at 10187 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10188 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10189 * requests any one instant in time. 10190 */ 10191 #define SD_TASKQ_NUMTHREADS 8 10192 #define SD_TASKQ_MINALLOC 256 10193 #define SD_TASKQ_MAXALLOC 256 10194 10195 static taskq_t *sd_tq = NULL; 10196 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10197 10198 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10199 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10200 10201 /* 10202 * The following task queue is being created for the write part of 10203 * read-modify-write of non-512 block size devices. 10204 * Limit the number of threads to 1 for now. This number has been chosen 10205 * considering the fact that it applies only to dvd ram drives/MO drives 10206 * currently. Performance for which is not main criteria at this stage. 10207 * Note: It needs to be explored if we can use a single taskq in future 10208 */ 10209 #define SD_WMR_TASKQ_NUMTHREADS 1 10210 static taskq_t *sd_wmr_tq = NULL; 10211 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10212 10213 /* 10214 * Function: sd_taskq_create 10215 * 10216 * Description: Create taskq thread(s) and preallocate task entries 10217 * 10218 * Return Code: Returns a pointer to the allocated taskq_t. 10219 * 10220 * Context: Can sleep. Requires blockable context. 10221 * 10222 * Notes: - The taskq() facility currently is NOT part of the DDI. 10223 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10224 * - taskq_create() will block for memory, also it will panic 10225 * if it cannot create the requested number of threads. 10226 * - Currently taskq_create() creates threads that cannot be 10227 * swapped. 10228 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10229 * supply of taskq entries at interrupt time (ie, so that we 10230 * do not have to sleep for memory) 10231 */ 10232 10233 static void 10234 sd_taskq_create(void) 10235 { 10236 char taskq_name[TASKQ_NAMELEN]; 10237 10238 ASSERT(sd_tq == NULL); 10239 ASSERT(sd_wmr_tq == NULL); 10240 10241 (void) snprintf(taskq_name, sizeof (taskq_name), 10242 "%s_drv_taskq", sd_label); 10243 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10244 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10245 TASKQ_PREPOPULATE)); 10246 10247 (void) snprintf(taskq_name, sizeof (taskq_name), 10248 "%s_rmw_taskq", sd_label); 10249 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10250 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10251 TASKQ_PREPOPULATE)); 10252 } 10253 10254 10255 /* 10256 * Function: sd_taskq_delete 10257 * 10258 * Description: Complementary cleanup routine for sd_taskq_create(). 10259 * 10260 * Context: Kernel thread context. 10261 */ 10262 10263 static void 10264 sd_taskq_delete(void) 10265 { 10266 ASSERT(sd_tq != NULL); 10267 ASSERT(sd_wmr_tq != NULL); 10268 taskq_destroy(sd_tq); 10269 taskq_destroy(sd_wmr_tq); 10270 sd_tq = NULL; 10271 sd_wmr_tq = NULL; 10272 } 10273 10274 10275 /* 10276 * Function: sdstrategy 10277 * 10278 * Description: Driver's strategy (9E) entry point function. 10279 * 10280 * Arguments: bp - pointer to buf(9S) 10281 * 10282 * Return Code: Always returns zero 10283 * 10284 * Context: Kernel thread context. 10285 */ 10286 10287 static int 10288 sdstrategy(struct buf *bp) 10289 { 10290 struct sd_lun *un; 10291 10292 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10293 if (un == NULL) { 10294 bioerror(bp, EIO); 10295 bp->b_resid = bp->b_bcount; 10296 biodone(bp); 10297 return (0); 10298 } 10299 /* As was done in the past, fail new cmds. if state is dumping. */ 10300 if (un->un_state == SD_STATE_DUMPING) { 10301 bioerror(bp, ENXIO); 10302 bp->b_resid = bp->b_bcount; 10303 biodone(bp); 10304 return (0); 10305 } 10306 10307 ASSERT(!mutex_owned(SD_MUTEX(un))); 10308 10309 /* 10310 * Commands may sneak in while we released the mutex in 10311 * DDI_SUSPEND, we should block new commands. However, old 10312 * commands that are still in the driver at this point should 10313 * still be allowed to drain. 10314 */ 10315 mutex_enter(SD_MUTEX(un)); 10316 /* 10317 * Must wait here if either the device is suspended or 10318 * if it's power level is changing. 10319 */ 10320 while ((un->un_state == SD_STATE_SUSPENDED) || 10321 (un->un_state == SD_STATE_PM_CHANGING)) { 10322 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10323 } 10324 10325 un->un_ncmds_in_driver++; 10326 10327 /* 10328 * atapi: Since we are running the CD for now in PIO mode we need to 10329 * call bp_mapin here to avoid bp_mapin called interrupt context under 10330 * the HBA's init_pkt routine. 10331 */ 10332 if (un->un_f_cfg_is_atapi == TRUE) { 10333 mutex_exit(SD_MUTEX(un)); 10334 bp_mapin(bp); 10335 mutex_enter(SD_MUTEX(un)); 10336 } 10337 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10338 un->un_ncmds_in_driver); 10339 10340 mutex_exit(SD_MUTEX(un)); 10341 10342 /* 10343 * This will (eventually) allocate the sd_xbuf area and 10344 * call sd_xbuf_strategy(). We just want to return the 10345 * result of ddi_xbuf_qstrategy so that we have an opt- 10346 * imized tail call which saves us a stack frame. 10347 */ 10348 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10349 } 10350 10351 10352 /* 10353 * Function: sd_xbuf_strategy 10354 * 10355 * Description: Function for initiating IO operations via the 10356 * ddi_xbuf_qstrategy() mechanism. 10357 * 10358 * Context: Kernel thread context. 10359 */ 10360 10361 static void 10362 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10363 { 10364 struct sd_lun *un = arg; 10365 10366 ASSERT(bp != NULL); 10367 ASSERT(xp != NULL); 10368 ASSERT(un != NULL); 10369 ASSERT(!mutex_owned(SD_MUTEX(un))); 10370 10371 /* 10372 * Initialize the fields in the xbuf and save a pointer to the 10373 * xbuf in bp->b_private. 10374 */ 10375 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10376 10377 /* Send the buf down the iostart chain */ 10378 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10379 } 10380 10381 10382 /* 10383 * Function: sd_xbuf_init 10384 * 10385 * Description: Prepare the given sd_xbuf struct for use. 10386 * 10387 * Arguments: un - ptr to softstate 10388 * bp - ptr to associated buf(9S) 10389 * xp - ptr to associated sd_xbuf 10390 * chain_type - IO chain type to use: 10391 * SD_CHAIN_NULL 10392 * SD_CHAIN_BUFIO 10393 * SD_CHAIN_USCSI 10394 * SD_CHAIN_DIRECT 10395 * SD_CHAIN_DIRECT_PRIORITY 10396 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10397 * initialization; may be NULL if none. 10398 * 10399 * Context: Kernel thread context 10400 */ 10401 10402 static void 10403 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10404 uchar_t chain_type, void *pktinfop) 10405 { 10406 int index; 10407 10408 ASSERT(un != NULL); 10409 ASSERT(bp != NULL); 10410 ASSERT(xp != NULL); 10411 10412 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10413 bp, chain_type); 10414 10415 xp->xb_un = un; 10416 xp->xb_pktp = NULL; 10417 xp->xb_pktinfo = pktinfop; 10418 xp->xb_private = bp->b_private; 10419 xp->xb_blkno = (daddr_t)bp->b_blkno; 10420 10421 /* 10422 * Set up the iostart and iodone chain indexes in the xbuf, based 10423 * upon the specified chain type to use. 10424 */ 10425 switch (chain_type) { 10426 case SD_CHAIN_NULL: 10427 /* 10428 * Fall thru to just use the values for the buf type, even 10429 * tho for the NULL chain these values will never be used. 10430 */ 10431 /* FALLTHRU */ 10432 case SD_CHAIN_BUFIO: 10433 index = un->un_buf_chain_type; 10434 break; 10435 case SD_CHAIN_USCSI: 10436 index = un->un_uscsi_chain_type; 10437 break; 10438 case SD_CHAIN_DIRECT: 10439 index = un->un_direct_chain_type; 10440 break; 10441 case SD_CHAIN_DIRECT_PRIORITY: 10442 index = un->un_priority_chain_type; 10443 break; 10444 default: 10445 /* We're really broken if we ever get here... */ 10446 panic("sd_xbuf_init: illegal chain type!"); 10447 /*NOTREACHED*/ 10448 } 10449 10450 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10451 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10452 10453 /* 10454 * It might be a bit easier to simply bzero the entire xbuf above, 10455 * but it turns out that since we init a fair number of members anyway, 10456 * we save a fair number cycles by doing explicit assignment of zero. 10457 */ 10458 xp->xb_pkt_flags = 0; 10459 xp->xb_dma_resid = 0; 10460 xp->xb_retry_count = 0; 10461 xp->xb_victim_retry_count = 0; 10462 xp->xb_ua_retry_count = 0; 10463 xp->xb_nr_retry_count = 0; 10464 xp->xb_sense_bp = NULL; 10465 xp->xb_sense_status = 0; 10466 xp->xb_sense_state = 0; 10467 xp->xb_sense_resid = 0; 10468 10469 bp->b_private = xp; 10470 bp->b_flags &= ~(B_DONE | B_ERROR); 10471 bp->b_resid = 0; 10472 bp->av_forw = NULL; 10473 bp->av_back = NULL; 10474 bioerror(bp, 0); 10475 10476 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10477 } 10478 10479 10480 /* 10481 * Function: sd_uscsi_strategy 10482 * 10483 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10484 * 10485 * Arguments: bp - buf struct ptr 10486 * 10487 * Return Code: Always returns 0 10488 * 10489 * Context: Kernel thread context 10490 */ 10491 10492 static int 10493 sd_uscsi_strategy(struct buf *bp) 10494 { 10495 struct sd_lun *un; 10496 struct sd_uscsi_info *uip; 10497 struct sd_xbuf *xp; 10498 uchar_t chain_type; 10499 10500 ASSERT(bp != NULL); 10501 10502 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10503 if (un == NULL) { 10504 bioerror(bp, EIO); 10505 bp->b_resid = bp->b_bcount; 10506 biodone(bp); 10507 return (0); 10508 } 10509 10510 ASSERT(!mutex_owned(SD_MUTEX(un))); 10511 10512 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10513 10514 mutex_enter(SD_MUTEX(un)); 10515 /* 10516 * atapi: Since we are running the CD for now in PIO mode we need to 10517 * call bp_mapin here to avoid bp_mapin called interrupt context under 10518 * the HBA's init_pkt routine. 10519 */ 10520 if (un->un_f_cfg_is_atapi == TRUE) { 10521 mutex_exit(SD_MUTEX(un)); 10522 bp_mapin(bp); 10523 mutex_enter(SD_MUTEX(un)); 10524 } 10525 un->un_ncmds_in_driver++; 10526 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10527 un->un_ncmds_in_driver); 10528 mutex_exit(SD_MUTEX(un)); 10529 10530 /* 10531 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10532 */ 10533 ASSERT(bp->b_private != NULL); 10534 uip = (struct sd_uscsi_info *)bp->b_private; 10535 10536 switch (uip->ui_flags) { 10537 case SD_PATH_DIRECT: 10538 chain_type = SD_CHAIN_DIRECT; 10539 break; 10540 case SD_PATH_DIRECT_PRIORITY: 10541 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10542 break; 10543 default: 10544 chain_type = SD_CHAIN_USCSI; 10545 break; 10546 } 10547 10548 /* 10549 * We may allocate extra buf for external USCSI commands. If the 10550 * application asks for bigger than 20-byte sense data via USCSI, 10551 * SCSA layer will allocate 252 bytes sense buf for that command. 10552 */ 10553 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10554 SENSE_LENGTH) { 10555 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10556 MAX_SENSE_LENGTH, KM_SLEEP); 10557 } else { 10558 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10559 } 10560 10561 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10562 10563 /* Use the index obtained within xbuf_init */ 10564 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10565 10566 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10567 10568 return (0); 10569 } 10570 10571 /* 10572 * Function: sd_send_scsi_cmd 10573 * 10574 * Description: Runs a USCSI command for user (when called thru sdioctl), 10575 * or for the driver 10576 * 10577 * Arguments: dev - the dev_t for the device 10578 * incmd - ptr to a valid uscsi_cmd struct 10579 * flag - bit flag, indicating open settings, 32/64 bit type 10580 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10581 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10582 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10583 * to use the USCSI "direct" chain and bypass the normal 10584 * command waitq. 10585 * 10586 * Return Code: 0 - successful completion of the given command 10587 * EIO - scsi_uscsi_handle_command() failed 10588 * ENXIO - soft state not found for specified dev 10589 * EINVAL 10590 * EFAULT - copyin/copyout error 10591 * return code of scsi_uscsi_handle_command(): 10592 * EIO 10593 * ENXIO 10594 * EACCES 10595 * 10596 * Context: Waits for command to complete. Can sleep. 10597 */ 10598 10599 static int 10600 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10601 enum uio_seg dataspace, int path_flag) 10602 { 10603 struct sd_uscsi_info *uip; 10604 struct uscsi_cmd *uscmd; 10605 struct sd_lun *un; 10606 int format = 0; 10607 int rval; 10608 10609 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10610 if (un == NULL) { 10611 return (ENXIO); 10612 } 10613 10614 ASSERT(!mutex_owned(SD_MUTEX(un))); 10615 10616 #ifdef SDDEBUG 10617 switch (dataspace) { 10618 case UIO_USERSPACE: 10619 SD_TRACE(SD_LOG_IO, un, 10620 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10621 break; 10622 case UIO_SYSSPACE: 10623 SD_TRACE(SD_LOG_IO, un, 10624 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10625 break; 10626 default: 10627 SD_TRACE(SD_LOG_IO, un, 10628 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10629 break; 10630 } 10631 #endif 10632 10633 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10634 SD_ADDRESS(un), &uscmd); 10635 if (rval != 0) { 10636 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10637 "scsi_uscsi_alloc_and_copyin failed\n", un); 10638 return (rval); 10639 } 10640 10641 if ((uscmd->uscsi_cdb != NULL) && 10642 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10643 mutex_enter(SD_MUTEX(un)); 10644 un->un_f_format_in_progress = TRUE; 10645 mutex_exit(SD_MUTEX(un)); 10646 format = 1; 10647 } 10648 10649 /* 10650 * Allocate an sd_uscsi_info struct and fill it with the info 10651 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10652 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10653 * since we allocate the buf here in this function, we do not 10654 * need to preserve the prior contents of b_private. 10655 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10656 */ 10657 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10658 uip->ui_flags = path_flag; 10659 uip->ui_cmdp = uscmd; 10660 10661 /* 10662 * Commands sent with priority are intended for error recovery 10663 * situations, and do not have retries performed. 10664 */ 10665 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10666 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10667 } 10668 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10669 10670 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10671 sd_uscsi_strategy, NULL, uip); 10672 10673 #ifdef SDDEBUG 10674 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10675 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10676 uscmd->uscsi_status, uscmd->uscsi_resid); 10677 if (uscmd->uscsi_bufaddr != NULL) { 10678 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10679 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10680 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10681 if (dataspace == UIO_SYSSPACE) { 10682 SD_DUMP_MEMORY(un, SD_LOG_IO, 10683 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10684 uscmd->uscsi_buflen, SD_LOG_HEX); 10685 } 10686 } 10687 #endif 10688 10689 if (format == 1) { 10690 mutex_enter(SD_MUTEX(un)); 10691 un->un_f_format_in_progress = FALSE; 10692 mutex_exit(SD_MUTEX(un)); 10693 } 10694 10695 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10696 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10697 10698 return (rval); 10699 } 10700 10701 10702 /* 10703 * Function: sd_buf_iodone 10704 * 10705 * Description: Frees the sd_xbuf & returns the buf to its originator. 10706 * 10707 * Context: May be called from interrupt context. 10708 */ 10709 /* ARGSUSED */ 10710 static void 10711 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10712 { 10713 struct sd_xbuf *xp; 10714 10715 ASSERT(un != NULL); 10716 ASSERT(bp != NULL); 10717 ASSERT(!mutex_owned(SD_MUTEX(un))); 10718 10719 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10720 10721 xp = SD_GET_XBUF(bp); 10722 ASSERT(xp != NULL); 10723 10724 mutex_enter(SD_MUTEX(un)); 10725 10726 /* 10727 * Grab time when the cmd completed. 10728 * This is used for determining if the system has been 10729 * idle long enough to make it idle to the PM framework. 10730 * This is for lowering the overhead, and therefore improving 10731 * performance per I/O operation. 10732 */ 10733 un->un_pm_idle_time = ddi_get_time(); 10734 10735 un->un_ncmds_in_driver--; 10736 ASSERT(un->un_ncmds_in_driver >= 0); 10737 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10738 un->un_ncmds_in_driver); 10739 10740 mutex_exit(SD_MUTEX(un)); 10741 10742 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10743 biodone(bp); /* bp is gone after this */ 10744 10745 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10746 } 10747 10748 10749 /* 10750 * Function: sd_uscsi_iodone 10751 * 10752 * Description: Frees the sd_xbuf & returns the buf to its originator. 10753 * 10754 * Context: May be called from interrupt context. 10755 */ 10756 /* ARGSUSED */ 10757 static void 10758 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10759 { 10760 struct sd_xbuf *xp; 10761 10762 ASSERT(un != NULL); 10763 ASSERT(bp != NULL); 10764 10765 xp = SD_GET_XBUF(bp); 10766 ASSERT(xp != NULL); 10767 ASSERT(!mutex_owned(SD_MUTEX(un))); 10768 10769 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10770 10771 bp->b_private = xp->xb_private; 10772 10773 mutex_enter(SD_MUTEX(un)); 10774 10775 /* 10776 * Grab time when the cmd completed. 10777 * This is used for determining if the system has been 10778 * idle long enough to make it idle to the PM framework. 10779 * This is for lowering the overhead, and therefore improving 10780 * performance per I/O operation. 10781 */ 10782 un->un_pm_idle_time = ddi_get_time(); 10783 10784 un->un_ncmds_in_driver--; 10785 ASSERT(un->un_ncmds_in_driver >= 0); 10786 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10787 un->un_ncmds_in_driver); 10788 10789 mutex_exit(SD_MUTEX(un)); 10790 10791 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10792 SENSE_LENGTH) { 10793 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10794 MAX_SENSE_LENGTH); 10795 } else { 10796 kmem_free(xp, sizeof (struct sd_xbuf)); 10797 } 10798 10799 biodone(bp); 10800 10801 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10802 } 10803 10804 10805 /* 10806 * Function: sd_mapblockaddr_iostart 10807 * 10808 * Description: Verify request lies within the partition limits for 10809 * the indicated minor device. Issue "overrun" buf if 10810 * request would exceed partition range. Converts 10811 * partition-relative block address to absolute. 10812 * 10813 * Context: Can sleep 10814 * 10815 * Issues: This follows what the old code did, in terms of accessing 10816 * some of the partition info in the unit struct without holding 10817 * the mutext. This is a general issue, if the partition info 10818 * can be altered while IO is in progress... as soon as we send 10819 * a buf, its partitioning can be invalid before it gets to the 10820 * device. Probably the right fix is to move partitioning out 10821 * of the driver entirely. 10822 */ 10823 10824 static void 10825 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10826 { 10827 diskaddr_t nblocks; /* #blocks in the given partition */ 10828 daddr_t blocknum; /* Block number specified by the buf */ 10829 size_t requested_nblocks; 10830 size_t available_nblocks; 10831 int partition; 10832 diskaddr_t partition_offset; 10833 struct sd_xbuf *xp; 10834 10835 10836 ASSERT(un != NULL); 10837 ASSERT(bp != NULL); 10838 ASSERT(!mutex_owned(SD_MUTEX(un))); 10839 10840 SD_TRACE(SD_LOG_IO_PARTITION, un, 10841 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10842 10843 xp = SD_GET_XBUF(bp); 10844 ASSERT(xp != NULL); 10845 10846 /* 10847 * If the geometry is not indicated as valid, attempt to access 10848 * the unit & verify the geometry/label. This can be the case for 10849 * removable-media devices, of if the device was opened in 10850 * NDELAY/NONBLOCK mode. 10851 */ 10852 if (!SD_IS_VALID_LABEL(un) && 10853 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10854 /* 10855 * For removable devices it is possible to start an I/O 10856 * without a media by opening the device in nodelay mode. 10857 * Also for writable CDs there can be many scenarios where 10858 * there is no geometry yet but volume manager is trying to 10859 * issue a read() just because it can see TOC on the CD. So 10860 * do not print a message for removables. 10861 */ 10862 if (!un->un_f_has_removable_media) { 10863 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10864 "i/o to invalid geometry\n"); 10865 } 10866 bioerror(bp, EIO); 10867 bp->b_resid = bp->b_bcount; 10868 SD_BEGIN_IODONE(index, un, bp); 10869 return; 10870 } 10871 10872 partition = SDPART(bp->b_edev); 10873 10874 nblocks = 0; 10875 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10876 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10877 10878 /* 10879 * blocknum is the starting block number of the request. At this 10880 * point it is still relative to the start of the minor device. 10881 */ 10882 blocknum = xp->xb_blkno; 10883 10884 /* 10885 * Legacy: If the starting block number is one past the last block 10886 * in the partition, do not set B_ERROR in the buf. 10887 */ 10888 if (blocknum == nblocks) { 10889 goto error_exit; 10890 } 10891 10892 /* 10893 * Confirm that the first block of the request lies within the 10894 * partition limits. Also the requested number of bytes must be 10895 * a multiple of the system block size. 10896 */ 10897 if ((blocknum < 0) || (blocknum >= nblocks) || 10898 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10899 bp->b_flags |= B_ERROR; 10900 goto error_exit; 10901 } 10902 10903 /* 10904 * If the requsted # blocks exceeds the available # blocks, that 10905 * is an overrun of the partition. 10906 */ 10907 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10908 available_nblocks = (size_t)(nblocks - blocknum); 10909 ASSERT(nblocks >= blocknum); 10910 10911 if (requested_nblocks > available_nblocks) { 10912 /* 10913 * Allocate an "overrun" buf to allow the request to proceed 10914 * for the amount of space available in the partition. The 10915 * amount not transferred will be added into the b_resid 10916 * when the operation is complete. The overrun buf 10917 * replaces the original buf here, and the original buf 10918 * is saved inside the overrun buf, for later use. 10919 */ 10920 size_t resid = SD_SYSBLOCKS2BYTES(un, 10921 (offset_t)(requested_nblocks - available_nblocks)); 10922 size_t count = bp->b_bcount - resid; 10923 /* 10924 * Note: count is an unsigned entity thus it'll NEVER 10925 * be less than 0 so ASSERT the original values are 10926 * correct. 10927 */ 10928 ASSERT(bp->b_bcount >= resid); 10929 10930 bp = sd_bioclone_alloc(bp, count, blocknum, 10931 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10932 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10933 ASSERT(xp != NULL); 10934 } 10935 10936 /* At this point there should be no residual for this buf. */ 10937 ASSERT(bp->b_resid == 0); 10938 10939 /* Convert the block number to an absolute address. */ 10940 xp->xb_blkno += partition_offset; 10941 10942 SD_NEXT_IOSTART(index, un, bp); 10943 10944 SD_TRACE(SD_LOG_IO_PARTITION, un, 10945 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10946 10947 return; 10948 10949 error_exit: 10950 bp->b_resid = bp->b_bcount; 10951 SD_BEGIN_IODONE(index, un, bp); 10952 SD_TRACE(SD_LOG_IO_PARTITION, un, 10953 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10954 } 10955 10956 10957 /* 10958 * Function: sd_mapblockaddr_iodone 10959 * 10960 * Description: Completion-side processing for partition management. 10961 * 10962 * Context: May be called under interrupt context 10963 */ 10964 10965 static void 10966 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10967 { 10968 /* int partition; */ /* Not used, see below. */ 10969 ASSERT(un != NULL); 10970 ASSERT(bp != NULL); 10971 ASSERT(!mutex_owned(SD_MUTEX(un))); 10972 10973 SD_TRACE(SD_LOG_IO_PARTITION, un, 10974 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10975 10976 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10977 /* 10978 * We have an "overrun" buf to deal with... 10979 */ 10980 struct sd_xbuf *xp; 10981 struct buf *obp; /* ptr to the original buf */ 10982 10983 xp = SD_GET_XBUF(bp); 10984 ASSERT(xp != NULL); 10985 10986 /* Retrieve the pointer to the original buf */ 10987 obp = (struct buf *)xp->xb_private; 10988 ASSERT(obp != NULL); 10989 10990 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10991 bioerror(obp, bp->b_error); 10992 10993 sd_bioclone_free(bp); 10994 10995 /* 10996 * Get back the original buf. 10997 * Note that since the restoration of xb_blkno below 10998 * was removed, the sd_xbuf is not needed. 10999 */ 11000 bp = obp; 11001 /* 11002 * xp = SD_GET_XBUF(bp); 11003 * ASSERT(xp != NULL); 11004 */ 11005 } 11006 11007 /* 11008 * Convert sd->xb_blkno back to a minor-device relative value. 11009 * Note: this has been commented out, as it is not needed in the 11010 * current implementation of the driver (ie, since this function 11011 * is at the top of the layering chains, so the info will be 11012 * discarded) and it is in the "hot" IO path. 11013 * 11014 * partition = getminor(bp->b_edev) & SDPART_MASK; 11015 * xp->xb_blkno -= un->un_offset[partition]; 11016 */ 11017 11018 SD_NEXT_IODONE(index, un, bp); 11019 11020 SD_TRACE(SD_LOG_IO_PARTITION, un, 11021 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11022 } 11023 11024 11025 /* 11026 * Function: sd_mapblocksize_iostart 11027 * 11028 * Description: Convert between system block size (un->un_sys_blocksize) 11029 * and target block size (un->un_tgt_blocksize). 11030 * 11031 * Context: Can sleep to allocate resources. 11032 * 11033 * Assumptions: A higher layer has already performed any partition validation, 11034 * and converted the xp->xb_blkno to an absolute value relative 11035 * to the start of the device. 11036 * 11037 * It is also assumed that the higher layer has implemented 11038 * an "overrun" mechanism for the case where the request would 11039 * read/write beyond the end of a partition. In this case we 11040 * assume (and ASSERT) that bp->b_resid == 0. 11041 * 11042 * Note: The implementation for this routine assumes the target 11043 * block size remains constant between allocation and transport. 11044 */ 11045 11046 static void 11047 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11048 { 11049 struct sd_mapblocksize_info *bsp; 11050 struct sd_xbuf *xp; 11051 offset_t first_byte; 11052 daddr_t start_block, end_block; 11053 daddr_t request_bytes; 11054 ushort_t is_aligned = FALSE; 11055 11056 ASSERT(un != NULL); 11057 ASSERT(bp != NULL); 11058 ASSERT(!mutex_owned(SD_MUTEX(un))); 11059 ASSERT(bp->b_resid == 0); 11060 11061 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11062 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11063 11064 /* 11065 * For a non-writable CD, a write request is an error 11066 */ 11067 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11068 (un->un_f_mmc_writable_media == FALSE)) { 11069 bioerror(bp, EIO); 11070 bp->b_resid = bp->b_bcount; 11071 SD_BEGIN_IODONE(index, un, bp); 11072 return; 11073 } 11074 11075 /* 11076 * We do not need a shadow buf if the device is using 11077 * un->un_sys_blocksize as its block size or if bcount == 0. 11078 * In this case there is no layer-private data block allocated. 11079 */ 11080 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11081 (bp->b_bcount == 0)) { 11082 goto done; 11083 } 11084 11085 #if defined(__i386) || defined(__amd64) 11086 /* We do not support non-block-aligned transfers for ROD devices */ 11087 ASSERT(!ISROD(un)); 11088 #endif 11089 11090 xp = SD_GET_XBUF(bp); 11091 ASSERT(xp != NULL); 11092 11093 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11094 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11095 un->un_tgt_blocksize, un->un_sys_blocksize); 11096 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11097 "request start block:0x%x\n", xp->xb_blkno); 11098 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11099 "request len:0x%x\n", bp->b_bcount); 11100 11101 /* 11102 * Allocate the layer-private data area for the mapblocksize layer. 11103 * Layers are allowed to use the xp_private member of the sd_xbuf 11104 * struct to store the pointer to their layer-private data block, but 11105 * each layer also has the responsibility of restoring the prior 11106 * contents of xb_private before returning the buf/xbuf to the 11107 * higher layer that sent it. 11108 * 11109 * Here we save the prior contents of xp->xb_private into the 11110 * bsp->mbs_oprivate field of our layer-private data area. This value 11111 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11112 * the layer-private area and returning the buf/xbuf to the layer 11113 * that sent it. 11114 * 11115 * Note that here we use kmem_zalloc for the allocation as there are 11116 * parts of the mapblocksize code that expect certain fields to be 11117 * zero unless explicitly set to a required value. 11118 */ 11119 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11120 bsp->mbs_oprivate = xp->xb_private; 11121 xp->xb_private = bsp; 11122 11123 /* 11124 * This treats the data on the disk (target) as an array of bytes. 11125 * first_byte is the byte offset, from the beginning of the device, 11126 * to the location of the request. This is converted from a 11127 * un->un_sys_blocksize block address to a byte offset, and then back 11128 * to a block address based upon a un->un_tgt_blocksize block size. 11129 * 11130 * xp->xb_blkno should be absolute upon entry into this function, 11131 * but, but it is based upon partitions that use the "system" 11132 * block size. It must be adjusted to reflect the block size of 11133 * the target. 11134 * 11135 * Note that end_block is actually the block that follows the last 11136 * block of the request, but that's what is needed for the computation. 11137 */ 11138 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11139 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11140 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11141 un->un_tgt_blocksize; 11142 11143 /* request_bytes is rounded up to a multiple of the target block size */ 11144 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11145 11146 /* 11147 * See if the starting address of the request and the request 11148 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11149 * then we do not need to allocate a shadow buf to handle the request. 11150 */ 11151 if (((first_byte % un->un_tgt_blocksize) == 0) && 11152 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11153 is_aligned = TRUE; 11154 } 11155 11156 if ((bp->b_flags & B_READ) == 0) { 11157 /* 11158 * Lock the range for a write operation. An aligned request is 11159 * considered a simple write; otherwise the request must be a 11160 * read-modify-write. 11161 */ 11162 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11163 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11164 } 11165 11166 /* 11167 * Alloc a shadow buf if the request is not aligned. Also, this is 11168 * where the READ command is generated for a read-modify-write. (The 11169 * write phase is deferred until after the read completes.) 11170 */ 11171 if (is_aligned == FALSE) { 11172 11173 struct sd_mapblocksize_info *shadow_bsp; 11174 struct sd_xbuf *shadow_xp; 11175 struct buf *shadow_bp; 11176 11177 /* 11178 * Allocate the shadow buf and it associated xbuf. Note that 11179 * after this call the xb_blkno value in both the original 11180 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11181 * same: absolute relative to the start of the device, and 11182 * adjusted for the target block size. The b_blkno in the 11183 * shadow buf will also be set to this value. We should never 11184 * change b_blkno in the original bp however. 11185 * 11186 * Note also that the shadow buf will always need to be a 11187 * READ command, regardless of whether the incoming command 11188 * is a READ or a WRITE. 11189 */ 11190 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11191 xp->xb_blkno, 11192 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11193 11194 shadow_xp = SD_GET_XBUF(shadow_bp); 11195 11196 /* 11197 * Allocate the layer-private data for the shadow buf. 11198 * (No need to preserve xb_private in the shadow xbuf.) 11199 */ 11200 shadow_xp->xb_private = shadow_bsp = 11201 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11202 11203 /* 11204 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11205 * to figure out where the start of the user data is (based upon 11206 * the system block size) in the data returned by the READ 11207 * command (which will be based upon the target blocksize). Note 11208 * that this is only really used if the request is unaligned. 11209 */ 11210 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11211 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11212 ASSERT((bsp->mbs_copy_offset >= 0) && 11213 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11214 11215 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11216 11217 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11218 11219 /* Transfer the wmap (if any) to the shadow buf */ 11220 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11221 bsp->mbs_wmp = NULL; 11222 11223 /* 11224 * The shadow buf goes on from here in place of the 11225 * original buf. 11226 */ 11227 shadow_bsp->mbs_orig_bp = bp; 11228 bp = shadow_bp; 11229 } 11230 11231 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11232 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11233 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11234 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11235 request_bytes); 11236 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11237 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11238 11239 done: 11240 SD_NEXT_IOSTART(index, un, bp); 11241 11242 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11243 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11244 } 11245 11246 11247 /* 11248 * Function: sd_mapblocksize_iodone 11249 * 11250 * Description: Completion side processing for block-size mapping. 11251 * 11252 * Context: May be called under interrupt context 11253 */ 11254 11255 static void 11256 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11257 { 11258 struct sd_mapblocksize_info *bsp; 11259 struct sd_xbuf *xp; 11260 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11261 struct buf *orig_bp; /* ptr to the original buf */ 11262 offset_t shadow_end; 11263 offset_t request_end; 11264 offset_t shadow_start; 11265 ssize_t copy_offset; 11266 size_t copy_length; 11267 size_t shortfall; 11268 uint_t is_write; /* TRUE if this bp is a WRITE */ 11269 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11270 11271 ASSERT(un != NULL); 11272 ASSERT(bp != NULL); 11273 11274 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11275 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11276 11277 /* 11278 * There is no shadow buf or layer-private data if the target is 11279 * using un->un_sys_blocksize as its block size or if bcount == 0. 11280 */ 11281 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11282 (bp->b_bcount == 0)) { 11283 goto exit; 11284 } 11285 11286 xp = SD_GET_XBUF(bp); 11287 ASSERT(xp != NULL); 11288 11289 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11290 bsp = xp->xb_private; 11291 11292 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11293 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11294 11295 if (is_write) { 11296 /* 11297 * For a WRITE request we must free up the block range that 11298 * we have locked up. This holds regardless of whether this is 11299 * an aligned write request or a read-modify-write request. 11300 */ 11301 sd_range_unlock(un, bsp->mbs_wmp); 11302 bsp->mbs_wmp = NULL; 11303 } 11304 11305 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11306 /* 11307 * An aligned read or write command will have no shadow buf; 11308 * there is not much else to do with it. 11309 */ 11310 goto done; 11311 } 11312 11313 orig_bp = bsp->mbs_orig_bp; 11314 ASSERT(orig_bp != NULL); 11315 orig_xp = SD_GET_XBUF(orig_bp); 11316 ASSERT(orig_xp != NULL); 11317 ASSERT(!mutex_owned(SD_MUTEX(un))); 11318 11319 if (!is_write && has_wmap) { 11320 /* 11321 * A READ with a wmap means this is the READ phase of a 11322 * read-modify-write. If an error occurred on the READ then 11323 * we do not proceed with the WRITE phase or copy any data. 11324 * Just release the write maps and return with an error. 11325 */ 11326 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11327 orig_bp->b_resid = orig_bp->b_bcount; 11328 bioerror(orig_bp, bp->b_error); 11329 sd_range_unlock(un, bsp->mbs_wmp); 11330 goto freebuf_done; 11331 } 11332 } 11333 11334 /* 11335 * Here is where we set up to copy the data from the shadow buf 11336 * into the space associated with the original buf. 11337 * 11338 * To deal with the conversion between block sizes, these 11339 * computations treat the data as an array of bytes, with the 11340 * first byte (byte 0) corresponding to the first byte in the 11341 * first block on the disk. 11342 */ 11343 11344 /* 11345 * shadow_start and shadow_len indicate the location and size of 11346 * the data returned with the shadow IO request. 11347 */ 11348 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11349 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11350 11351 /* 11352 * copy_offset gives the offset (in bytes) from the start of the first 11353 * block of the READ request to the beginning of the data. We retrieve 11354 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11355 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11356 * data to be copied (in bytes). 11357 */ 11358 copy_offset = bsp->mbs_copy_offset; 11359 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11360 copy_length = orig_bp->b_bcount; 11361 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11362 11363 /* 11364 * Set up the resid and error fields of orig_bp as appropriate. 11365 */ 11366 if (shadow_end >= request_end) { 11367 /* We got all the requested data; set resid to zero */ 11368 orig_bp->b_resid = 0; 11369 } else { 11370 /* 11371 * We failed to get enough data to fully satisfy the original 11372 * request. Just copy back whatever data we got and set 11373 * up the residual and error code as required. 11374 * 11375 * 'shortfall' is the amount by which the data received with the 11376 * shadow buf has "fallen short" of the requested amount. 11377 */ 11378 shortfall = (size_t)(request_end - shadow_end); 11379 11380 if (shortfall > orig_bp->b_bcount) { 11381 /* 11382 * We did not get enough data to even partially 11383 * fulfill the original request. The residual is 11384 * equal to the amount requested. 11385 */ 11386 orig_bp->b_resid = orig_bp->b_bcount; 11387 } else { 11388 /* 11389 * We did not get all the data that we requested 11390 * from the device, but we will try to return what 11391 * portion we did get. 11392 */ 11393 orig_bp->b_resid = shortfall; 11394 } 11395 ASSERT(copy_length >= orig_bp->b_resid); 11396 copy_length -= orig_bp->b_resid; 11397 } 11398 11399 /* Propagate the error code from the shadow buf to the original buf */ 11400 bioerror(orig_bp, bp->b_error); 11401 11402 if (is_write) { 11403 goto freebuf_done; /* No data copying for a WRITE */ 11404 } 11405 11406 if (has_wmap) { 11407 /* 11408 * This is a READ command from the READ phase of a 11409 * read-modify-write request. We have to copy the data given 11410 * by the user OVER the data returned by the READ command, 11411 * then convert the command from a READ to a WRITE and send 11412 * it back to the target. 11413 */ 11414 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11415 copy_length); 11416 11417 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11418 11419 /* 11420 * Dispatch the WRITE command to the taskq thread, which 11421 * will in turn send the command to the target. When the 11422 * WRITE command completes, we (sd_mapblocksize_iodone()) 11423 * will get called again as part of the iodone chain 11424 * processing for it. Note that we will still be dealing 11425 * with the shadow buf at that point. 11426 */ 11427 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11428 KM_NOSLEEP) != 0) { 11429 /* 11430 * Dispatch was successful so we are done. Return 11431 * without going any higher up the iodone chain. Do 11432 * not free up any layer-private data until after the 11433 * WRITE completes. 11434 */ 11435 return; 11436 } 11437 11438 /* 11439 * Dispatch of the WRITE command failed; set up the error 11440 * condition and send this IO back up the iodone chain. 11441 */ 11442 bioerror(orig_bp, EIO); 11443 orig_bp->b_resid = orig_bp->b_bcount; 11444 11445 } else { 11446 /* 11447 * This is a regular READ request (ie, not a RMW). Copy the 11448 * data from the shadow buf into the original buf. The 11449 * copy_offset compensates for any "misalignment" between the 11450 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11451 * original buf (with its un->un_sys_blocksize blocks). 11452 */ 11453 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11454 copy_length); 11455 } 11456 11457 freebuf_done: 11458 11459 /* 11460 * At this point we still have both the shadow buf AND the original 11461 * buf to deal with, as well as the layer-private data area in each. 11462 * Local variables are as follows: 11463 * 11464 * bp -- points to shadow buf 11465 * xp -- points to xbuf of shadow buf 11466 * bsp -- points to layer-private data area of shadow buf 11467 * orig_bp -- points to original buf 11468 * 11469 * First free the shadow buf and its associated xbuf, then free the 11470 * layer-private data area from the shadow buf. There is no need to 11471 * restore xb_private in the shadow xbuf. 11472 */ 11473 sd_shadow_buf_free(bp); 11474 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11475 11476 /* 11477 * Now update the local variables to point to the original buf, xbuf, 11478 * and layer-private area. 11479 */ 11480 bp = orig_bp; 11481 xp = SD_GET_XBUF(bp); 11482 ASSERT(xp != NULL); 11483 ASSERT(xp == orig_xp); 11484 bsp = xp->xb_private; 11485 ASSERT(bsp != NULL); 11486 11487 done: 11488 /* 11489 * Restore xb_private to whatever it was set to by the next higher 11490 * layer in the chain, then free the layer-private data area. 11491 */ 11492 xp->xb_private = bsp->mbs_oprivate; 11493 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11494 11495 exit: 11496 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11497 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11498 11499 SD_NEXT_IODONE(index, un, bp); 11500 } 11501 11502 11503 /* 11504 * Function: sd_checksum_iostart 11505 * 11506 * Description: A stub function for a layer that's currently not used. 11507 * For now just a placeholder. 11508 * 11509 * Context: Kernel thread context 11510 */ 11511 11512 static void 11513 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11514 { 11515 ASSERT(un != NULL); 11516 ASSERT(bp != NULL); 11517 ASSERT(!mutex_owned(SD_MUTEX(un))); 11518 SD_NEXT_IOSTART(index, un, bp); 11519 } 11520 11521 11522 /* 11523 * Function: sd_checksum_iodone 11524 * 11525 * Description: A stub function for a layer that's currently not used. 11526 * For now just a placeholder. 11527 * 11528 * Context: May be called under interrupt context 11529 */ 11530 11531 static void 11532 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11533 { 11534 ASSERT(un != NULL); 11535 ASSERT(bp != NULL); 11536 ASSERT(!mutex_owned(SD_MUTEX(un))); 11537 SD_NEXT_IODONE(index, un, bp); 11538 } 11539 11540 11541 /* 11542 * Function: sd_checksum_uscsi_iostart 11543 * 11544 * Description: A stub function for a layer that's currently not used. 11545 * For now just a placeholder. 11546 * 11547 * Context: Kernel thread context 11548 */ 11549 11550 static void 11551 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11552 { 11553 ASSERT(un != NULL); 11554 ASSERT(bp != NULL); 11555 ASSERT(!mutex_owned(SD_MUTEX(un))); 11556 SD_NEXT_IOSTART(index, un, bp); 11557 } 11558 11559 11560 /* 11561 * Function: sd_checksum_uscsi_iodone 11562 * 11563 * Description: A stub function for a layer that's currently not used. 11564 * For now just a placeholder. 11565 * 11566 * Context: May be called under interrupt context 11567 */ 11568 11569 static void 11570 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11571 { 11572 ASSERT(un != NULL); 11573 ASSERT(bp != NULL); 11574 ASSERT(!mutex_owned(SD_MUTEX(un))); 11575 SD_NEXT_IODONE(index, un, bp); 11576 } 11577 11578 11579 /* 11580 * Function: sd_pm_iostart 11581 * 11582 * Description: iostart-side routine for Power mangement. 11583 * 11584 * Context: Kernel thread context 11585 */ 11586 11587 static void 11588 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11589 { 11590 ASSERT(un != NULL); 11591 ASSERT(bp != NULL); 11592 ASSERT(!mutex_owned(SD_MUTEX(un))); 11593 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11594 11595 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11596 11597 if (sd_pm_entry(un) != DDI_SUCCESS) { 11598 /* 11599 * Set up to return the failed buf back up the 'iodone' 11600 * side of the calling chain. 11601 */ 11602 bioerror(bp, EIO); 11603 bp->b_resid = bp->b_bcount; 11604 11605 SD_BEGIN_IODONE(index, un, bp); 11606 11607 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11608 return; 11609 } 11610 11611 SD_NEXT_IOSTART(index, un, bp); 11612 11613 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11614 } 11615 11616 11617 /* 11618 * Function: sd_pm_iodone 11619 * 11620 * Description: iodone-side routine for power mangement. 11621 * 11622 * Context: may be called from interrupt context 11623 */ 11624 11625 static void 11626 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11627 { 11628 ASSERT(un != NULL); 11629 ASSERT(bp != NULL); 11630 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11631 11632 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11633 11634 /* 11635 * After attach the following flag is only read, so don't 11636 * take the penalty of acquiring a mutex for it. 11637 */ 11638 if (un->un_f_pm_is_enabled == TRUE) { 11639 sd_pm_exit(un); 11640 } 11641 11642 SD_NEXT_IODONE(index, un, bp); 11643 11644 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11645 } 11646 11647 11648 /* 11649 * Function: sd_core_iostart 11650 * 11651 * Description: Primary driver function for enqueuing buf(9S) structs from 11652 * the system and initiating IO to the target device 11653 * 11654 * Context: Kernel thread context. Can sleep. 11655 * 11656 * Assumptions: - The given xp->xb_blkno is absolute 11657 * (ie, relative to the start of the device). 11658 * - The IO is to be done using the native blocksize of 11659 * the device, as specified in un->un_tgt_blocksize. 11660 */ 11661 /* ARGSUSED */ 11662 static void 11663 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11664 { 11665 struct sd_xbuf *xp; 11666 11667 ASSERT(un != NULL); 11668 ASSERT(bp != NULL); 11669 ASSERT(!mutex_owned(SD_MUTEX(un))); 11670 ASSERT(bp->b_resid == 0); 11671 11672 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11673 11674 xp = SD_GET_XBUF(bp); 11675 ASSERT(xp != NULL); 11676 11677 mutex_enter(SD_MUTEX(un)); 11678 11679 /* 11680 * If we are currently in the failfast state, fail any new IO 11681 * that has B_FAILFAST set, then return. 11682 */ 11683 if ((bp->b_flags & B_FAILFAST) && 11684 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11685 mutex_exit(SD_MUTEX(un)); 11686 bioerror(bp, EIO); 11687 bp->b_resid = bp->b_bcount; 11688 SD_BEGIN_IODONE(index, un, bp); 11689 return; 11690 } 11691 11692 if (SD_IS_DIRECT_PRIORITY(xp)) { 11693 /* 11694 * Priority command -- transport it immediately. 11695 * 11696 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11697 * because all direct priority commands should be associated 11698 * with error recovery actions which we don't want to retry. 11699 */ 11700 sd_start_cmds(un, bp); 11701 } else { 11702 /* 11703 * Normal command -- add it to the wait queue, then start 11704 * transporting commands from the wait queue. 11705 */ 11706 sd_add_buf_to_waitq(un, bp); 11707 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11708 sd_start_cmds(un, NULL); 11709 } 11710 11711 mutex_exit(SD_MUTEX(un)); 11712 11713 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11714 } 11715 11716 11717 /* 11718 * Function: sd_init_cdb_limits 11719 * 11720 * Description: This is to handle scsi_pkt initialization differences 11721 * between the driver platforms. 11722 * 11723 * Legacy behaviors: 11724 * 11725 * If the block number or the sector count exceeds the 11726 * capabilities of a Group 0 command, shift over to a 11727 * Group 1 command. We don't blindly use Group 1 11728 * commands because a) some drives (CDC Wren IVs) get a 11729 * bit confused, and b) there is probably a fair amount 11730 * of speed difference for a target to receive and decode 11731 * a 10 byte command instead of a 6 byte command. 11732 * 11733 * The xfer time difference of 6 vs 10 byte CDBs is 11734 * still significant so this code is still worthwhile. 11735 * 10 byte CDBs are very inefficient with the fas HBA driver 11736 * and older disks. Each CDB byte took 1 usec with some 11737 * popular disks. 11738 * 11739 * Context: Must be called at attach time 11740 */ 11741 11742 static void 11743 sd_init_cdb_limits(struct sd_lun *un) 11744 { 11745 int hba_cdb_limit; 11746 11747 /* 11748 * Use CDB_GROUP1 commands for most devices except for 11749 * parallel SCSI fixed drives in which case we get better 11750 * performance using CDB_GROUP0 commands (where applicable). 11751 */ 11752 un->un_mincdb = SD_CDB_GROUP1; 11753 #if !defined(__fibre) 11754 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11755 !un->un_f_has_removable_media) { 11756 un->un_mincdb = SD_CDB_GROUP0; 11757 } 11758 #endif 11759 11760 /* 11761 * Try to read the max-cdb-length supported by HBA. 11762 */ 11763 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11764 if (0 >= un->un_max_hba_cdb) { 11765 un->un_max_hba_cdb = CDB_GROUP4; 11766 hba_cdb_limit = SD_CDB_GROUP4; 11767 } else if (0 < un->un_max_hba_cdb && 11768 un->un_max_hba_cdb < CDB_GROUP1) { 11769 hba_cdb_limit = SD_CDB_GROUP0; 11770 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11771 un->un_max_hba_cdb < CDB_GROUP5) { 11772 hba_cdb_limit = SD_CDB_GROUP1; 11773 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11774 un->un_max_hba_cdb < CDB_GROUP4) { 11775 hba_cdb_limit = SD_CDB_GROUP5; 11776 } else { 11777 hba_cdb_limit = SD_CDB_GROUP4; 11778 } 11779 11780 /* 11781 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11782 * commands for fixed disks unless we are building for a 32 bit 11783 * kernel. 11784 */ 11785 #ifdef _LP64 11786 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11787 min(hba_cdb_limit, SD_CDB_GROUP4); 11788 #else 11789 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11790 min(hba_cdb_limit, SD_CDB_GROUP1); 11791 #endif 11792 11793 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11794 ? sizeof (struct scsi_arq_status) : 1); 11795 un->un_cmd_timeout = (ushort_t)sd_io_time; 11796 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11797 } 11798 11799 11800 /* 11801 * Function: sd_initpkt_for_buf 11802 * 11803 * Description: Allocate and initialize for transport a scsi_pkt struct, 11804 * based upon the info specified in the given buf struct. 11805 * 11806 * Assumes the xb_blkno in the request is absolute (ie, 11807 * relative to the start of the device (NOT partition!). 11808 * Also assumes that the request is using the native block 11809 * size of the device (as returned by the READ CAPACITY 11810 * command). 11811 * 11812 * Return Code: SD_PKT_ALLOC_SUCCESS 11813 * SD_PKT_ALLOC_FAILURE 11814 * SD_PKT_ALLOC_FAILURE_NO_DMA 11815 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11816 * 11817 * Context: Kernel thread and may be called from software interrupt context 11818 * as part of a sdrunout callback. This function may not block or 11819 * call routines that block 11820 */ 11821 11822 static int 11823 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11824 { 11825 struct sd_xbuf *xp; 11826 struct scsi_pkt *pktp = NULL; 11827 struct sd_lun *un; 11828 size_t blockcount; 11829 daddr_t startblock; 11830 int rval; 11831 int cmd_flags; 11832 11833 ASSERT(bp != NULL); 11834 ASSERT(pktpp != NULL); 11835 xp = SD_GET_XBUF(bp); 11836 ASSERT(xp != NULL); 11837 un = SD_GET_UN(bp); 11838 ASSERT(un != NULL); 11839 ASSERT(mutex_owned(SD_MUTEX(un))); 11840 ASSERT(bp->b_resid == 0); 11841 11842 SD_TRACE(SD_LOG_IO_CORE, un, 11843 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11844 11845 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11846 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11847 /* 11848 * Already have a scsi_pkt -- just need DMA resources. 11849 * We must recompute the CDB in case the mapping returns 11850 * a nonzero pkt_resid. 11851 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11852 * that is being retried, the unmap/remap of the DMA resouces 11853 * will result in the entire transfer starting over again 11854 * from the very first block. 11855 */ 11856 ASSERT(xp->xb_pktp != NULL); 11857 pktp = xp->xb_pktp; 11858 } else { 11859 pktp = NULL; 11860 } 11861 #endif /* __i386 || __amd64 */ 11862 11863 startblock = xp->xb_blkno; /* Absolute block num. */ 11864 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11865 11866 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11867 11868 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11869 11870 #else 11871 11872 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11873 11874 #endif 11875 11876 /* 11877 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11878 * call scsi_init_pkt, and build the CDB. 11879 */ 11880 rval = sd_setup_rw_pkt(un, &pktp, bp, 11881 cmd_flags, sdrunout, (caddr_t)un, 11882 startblock, blockcount); 11883 11884 if (rval == 0) { 11885 /* 11886 * Success. 11887 * 11888 * If partial DMA is being used and required for this transfer. 11889 * set it up here. 11890 */ 11891 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11892 (pktp->pkt_resid != 0)) { 11893 11894 /* 11895 * Save the CDB length and pkt_resid for the 11896 * next xfer 11897 */ 11898 xp->xb_dma_resid = pktp->pkt_resid; 11899 11900 /* rezero resid */ 11901 pktp->pkt_resid = 0; 11902 11903 } else { 11904 xp->xb_dma_resid = 0; 11905 } 11906 11907 pktp->pkt_flags = un->un_tagflags; 11908 pktp->pkt_time = un->un_cmd_timeout; 11909 pktp->pkt_comp = sdintr; 11910 11911 pktp->pkt_private = bp; 11912 *pktpp = pktp; 11913 11914 SD_TRACE(SD_LOG_IO_CORE, un, 11915 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11916 11917 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11918 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11919 #endif 11920 11921 return (SD_PKT_ALLOC_SUCCESS); 11922 11923 } 11924 11925 /* 11926 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11927 * from sd_setup_rw_pkt. 11928 */ 11929 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11930 11931 if (rval == SD_PKT_ALLOC_FAILURE) { 11932 *pktpp = NULL; 11933 /* 11934 * Set the driver state to RWAIT to indicate the driver 11935 * is waiting on resource allocations. The driver will not 11936 * suspend, pm_suspend, or detatch while the state is RWAIT. 11937 */ 11938 New_state(un, SD_STATE_RWAIT); 11939 11940 SD_ERROR(SD_LOG_IO_CORE, un, 11941 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11942 11943 if ((bp->b_flags & B_ERROR) != 0) { 11944 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11945 } 11946 return (SD_PKT_ALLOC_FAILURE); 11947 } else { 11948 /* 11949 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11950 * 11951 * This should never happen. Maybe someone messed with the 11952 * kernel's minphys? 11953 */ 11954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11955 "Request rejected: too large for CDB: " 11956 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11957 SD_ERROR(SD_LOG_IO_CORE, un, 11958 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11959 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11960 11961 } 11962 } 11963 11964 11965 /* 11966 * Function: sd_destroypkt_for_buf 11967 * 11968 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11969 * 11970 * Context: Kernel thread or interrupt context 11971 */ 11972 11973 static void 11974 sd_destroypkt_for_buf(struct buf *bp) 11975 { 11976 ASSERT(bp != NULL); 11977 ASSERT(SD_GET_UN(bp) != NULL); 11978 11979 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11980 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11981 11982 ASSERT(SD_GET_PKTP(bp) != NULL); 11983 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11984 11985 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11986 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11987 } 11988 11989 /* 11990 * Function: sd_setup_rw_pkt 11991 * 11992 * Description: Determines appropriate CDB group for the requested LBA 11993 * and transfer length, calls scsi_init_pkt, and builds 11994 * the CDB. Do not use for partial DMA transfers except 11995 * for the initial transfer since the CDB size must 11996 * remain constant. 11997 * 11998 * Context: Kernel thread and may be called from software interrupt 11999 * context as part of a sdrunout callback. This function may not 12000 * block or call routines that block 12001 */ 12002 12003 12004 int 12005 sd_setup_rw_pkt(struct sd_lun *un, 12006 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12007 int (*callback)(caddr_t), caddr_t callback_arg, 12008 diskaddr_t lba, uint32_t blockcount) 12009 { 12010 struct scsi_pkt *return_pktp; 12011 union scsi_cdb *cdbp; 12012 struct sd_cdbinfo *cp = NULL; 12013 int i; 12014 12015 /* 12016 * See which size CDB to use, based upon the request. 12017 */ 12018 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12019 12020 /* 12021 * Check lba and block count against sd_cdbtab limits. 12022 * In the partial DMA case, we have to use the same size 12023 * CDB for all the transfers. Check lba + blockcount 12024 * against the max LBA so we know that segment of the 12025 * transfer can use the CDB we select. 12026 */ 12027 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12028 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12029 12030 /* 12031 * The command will fit into the CDB type 12032 * specified by sd_cdbtab[i]. 12033 */ 12034 cp = sd_cdbtab + i; 12035 12036 /* 12037 * Call scsi_init_pkt so we can fill in the 12038 * CDB. 12039 */ 12040 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12041 bp, cp->sc_grpcode, un->un_status_len, 0, 12042 flags, callback, callback_arg); 12043 12044 if (return_pktp != NULL) { 12045 12046 /* 12047 * Return new value of pkt 12048 */ 12049 *pktpp = return_pktp; 12050 12051 /* 12052 * To be safe, zero the CDB insuring there is 12053 * no leftover data from a previous command. 12054 */ 12055 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12056 12057 /* 12058 * Handle partial DMA mapping 12059 */ 12060 if (return_pktp->pkt_resid != 0) { 12061 12062 /* 12063 * Not going to xfer as many blocks as 12064 * originally expected 12065 */ 12066 blockcount -= 12067 SD_BYTES2TGTBLOCKS(un, 12068 return_pktp->pkt_resid); 12069 } 12070 12071 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12072 12073 /* 12074 * Set command byte based on the CDB 12075 * type we matched. 12076 */ 12077 cdbp->scc_cmd = cp->sc_grpmask | 12078 ((bp->b_flags & B_READ) ? 12079 SCMD_READ : SCMD_WRITE); 12080 12081 SD_FILL_SCSI1_LUN(un, return_pktp); 12082 12083 /* 12084 * Fill in LBA and length 12085 */ 12086 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12087 (cp->sc_grpcode == CDB_GROUP4) || 12088 (cp->sc_grpcode == CDB_GROUP0) || 12089 (cp->sc_grpcode == CDB_GROUP5)); 12090 12091 if (cp->sc_grpcode == CDB_GROUP1) { 12092 FORMG1ADDR(cdbp, lba); 12093 FORMG1COUNT(cdbp, blockcount); 12094 return (0); 12095 } else if (cp->sc_grpcode == CDB_GROUP4) { 12096 FORMG4LONGADDR(cdbp, lba); 12097 FORMG4COUNT(cdbp, blockcount); 12098 return (0); 12099 } else if (cp->sc_grpcode == CDB_GROUP0) { 12100 FORMG0ADDR(cdbp, lba); 12101 FORMG0COUNT(cdbp, blockcount); 12102 return (0); 12103 } else if (cp->sc_grpcode == CDB_GROUP5) { 12104 FORMG5ADDR(cdbp, lba); 12105 FORMG5COUNT(cdbp, blockcount); 12106 return (0); 12107 } 12108 12109 /* 12110 * It should be impossible to not match one 12111 * of the CDB types above, so we should never 12112 * reach this point. Set the CDB command byte 12113 * to test-unit-ready to avoid writing 12114 * to somewhere we don't intend. 12115 */ 12116 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12117 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12118 } else { 12119 /* 12120 * Couldn't get scsi_pkt 12121 */ 12122 return (SD_PKT_ALLOC_FAILURE); 12123 } 12124 } 12125 } 12126 12127 /* 12128 * None of the available CDB types were suitable. This really 12129 * should never happen: on a 64 bit system we support 12130 * READ16/WRITE16 which will hold an entire 64 bit disk address 12131 * and on a 32 bit system we will refuse to bind to a device 12132 * larger than 2TB so addresses will never be larger than 32 bits. 12133 */ 12134 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12135 } 12136 12137 /* 12138 * Function: sd_setup_next_rw_pkt 12139 * 12140 * Description: Setup packet for partial DMA transfers, except for the 12141 * initial transfer. sd_setup_rw_pkt should be used for 12142 * the initial transfer. 12143 * 12144 * Context: Kernel thread and may be called from interrupt context. 12145 */ 12146 12147 int 12148 sd_setup_next_rw_pkt(struct sd_lun *un, 12149 struct scsi_pkt *pktp, struct buf *bp, 12150 diskaddr_t lba, uint32_t blockcount) 12151 { 12152 uchar_t com; 12153 union scsi_cdb *cdbp; 12154 uchar_t cdb_group_id; 12155 12156 ASSERT(pktp != NULL); 12157 ASSERT(pktp->pkt_cdbp != NULL); 12158 12159 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12160 com = cdbp->scc_cmd; 12161 cdb_group_id = CDB_GROUPID(com); 12162 12163 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12164 (cdb_group_id == CDB_GROUPID_1) || 12165 (cdb_group_id == CDB_GROUPID_4) || 12166 (cdb_group_id == CDB_GROUPID_5)); 12167 12168 /* 12169 * Move pkt to the next portion of the xfer. 12170 * func is NULL_FUNC so we do not have to release 12171 * the disk mutex here. 12172 */ 12173 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12174 NULL_FUNC, NULL) == pktp) { 12175 /* Success. Handle partial DMA */ 12176 if (pktp->pkt_resid != 0) { 12177 blockcount -= 12178 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12179 } 12180 12181 cdbp->scc_cmd = com; 12182 SD_FILL_SCSI1_LUN(un, pktp); 12183 if (cdb_group_id == CDB_GROUPID_1) { 12184 FORMG1ADDR(cdbp, lba); 12185 FORMG1COUNT(cdbp, blockcount); 12186 return (0); 12187 } else if (cdb_group_id == CDB_GROUPID_4) { 12188 FORMG4LONGADDR(cdbp, lba); 12189 FORMG4COUNT(cdbp, blockcount); 12190 return (0); 12191 } else if (cdb_group_id == CDB_GROUPID_0) { 12192 FORMG0ADDR(cdbp, lba); 12193 FORMG0COUNT(cdbp, blockcount); 12194 return (0); 12195 } else if (cdb_group_id == CDB_GROUPID_5) { 12196 FORMG5ADDR(cdbp, lba); 12197 FORMG5COUNT(cdbp, blockcount); 12198 return (0); 12199 } 12200 12201 /* Unreachable */ 12202 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12203 } 12204 12205 /* 12206 * Error setting up next portion of cmd transfer. 12207 * Something is definitely very wrong and this 12208 * should not happen. 12209 */ 12210 return (SD_PKT_ALLOC_FAILURE); 12211 } 12212 12213 /* 12214 * Function: sd_initpkt_for_uscsi 12215 * 12216 * Description: Allocate and initialize for transport a scsi_pkt struct, 12217 * based upon the info specified in the given uscsi_cmd struct. 12218 * 12219 * Return Code: SD_PKT_ALLOC_SUCCESS 12220 * SD_PKT_ALLOC_FAILURE 12221 * SD_PKT_ALLOC_FAILURE_NO_DMA 12222 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12223 * 12224 * Context: Kernel thread and may be called from software interrupt context 12225 * as part of a sdrunout callback. This function may not block or 12226 * call routines that block 12227 */ 12228 12229 static int 12230 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12231 { 12232 struct uscsi_cmd *uscmd; 12233 struct sd_xbuf *xp; 12234 struct scsi_pkt *pktp; 12235 struct sd_lun *un; 12236 uint32_t flags = 0; 12237 12238 ASSERT(bp != NULL); 12239 ASSERT(pktpp != NULL); 12240 xp = SD_GET_XBUF(bp); 12241 ASSERT(xp != NULL); 12242 un = SD_GET_UN(bp); 12243 ASSERT(un != NULL); 12244 ASSERT(mutex_owned(SD_MUTEX(un))); 12245 12246 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12247 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12248 ASSERT(uscmd != NULL); 12249 12250 SD_TRACE(SD_LOG_IO_CORE, un, 12251 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12252 12253 /* 12254 * Allocate the scsi_pkt for the command. 12255 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12256 * during scsi_init_pkt time and will continue to use the 12257 * same path as long as the same scsi_pkt is used without 12258 * intervening scsi_dma_free(). Since uscsi command does 12259 * not call scsi_dmafree() before retry failed command, it 12260 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12261 * set such that scsi_vhci can use other available path for 12262 * retry. Besides, ucsci command does not allow DMA breakup, 12263 * so there is no need to set PKT_DMA_PARTIAL flag. 12264 */ 12265 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12266 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12267 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12268 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12269 - sizeof (struct scsi_extended_sense)), 0, 12270 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12271 sdrunout, (caddr_t)un); 12272 } else { 12273 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12274 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12275 sizeof (struct scsi_arq_status), 0, 12276 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12277 sdrunout, (caddr_t)un); 12278 } 12279 12280 if (pktp == NULL) { 12281 *pktpp = NULL; 12282 /* 12283 * Set the driver state to RWAIT to indicate the driver 12284 * is waiting on resource allocations. The driver will not 12285 * suspend, pm_suspend, or detatch while the state is RWAIT. 12286 */ 12287 New_state(un, SD_STATE_RWAIT); 12288 12289 SD_ERROR(SD_LOG_IO_CORE, un, 12290 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12291 12292 if ((bp->b_flags & B_ERROR) != 0) { 12293 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12294 } 12295 return (SD_PKT_ALLOC_FAILURE); 12296 } 12297 12298 /* 12299 * We do not do DMA breakup for USCSI commands, so return failure 12300 * here if all the needed DMA resources were not allocated. 12301 */ 12302 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12303 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12304 scsi_destroy_pkt(pktp); 12305 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12306 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12307 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12308 } 12309 12310 /* Init the cdb from the given uscsi struct */ 12311 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12312 uscmd->uscsi_cdb[0], 0, 0, 0); 12313 12314 SD_FILL_SCSI1_LUN(un, pktp); 12315 12316 /* 12317 * Set up the optional USCSI flags. See the uscsi (7I) man page 12318 * for listing of the supported flags. 12319 */ 12320 12321 if (uscmd->uscsi_flags & USCSI_SILENT) { 12322 flags |= FLAG_SILENT; 12323 } 12324 12325 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12326 flags |= FLAG_DIAGNOSE; 12327 } 12328 12329 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12330 flags |= FLAG_ISOLATE; 12331 } 12332 12333 if (un->un_f_is_fibre == FALSE) { 12334 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12335 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12336 } 12337 } 12338 12339 /* 12340 * Set the pkt flags here so we save time later. 12341 * Note: These flags are NOT in the uscsi man page!!! 12342 */ 12343 if (uscmd->uscsi_flags & USCSI_HEAD) { 12344 flags |= FLAG_HEAD; 12345 } 12346 12347 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12348 flags |= FLAG_NOINTR; 12349 } 12350 12351 /* 12352 * For tagged queueing, things get a bit complicated. 12353 * Check first for head of queue and last for ordered queue. 12354 * If neither head nor order, use the default driver tag flags. 12355 */ 12356 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12357 if (uscmd->uscsi_flags & USCSI_HTAG) { 12358 flags |= FLAG_HTAG; 12359 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12360 flags |= FLAG_OTAG; 12361 } else { 12362 flags |= un->un_tagflags & FLAG_TAGMASK; 12363 } 12364 } 12365 12366 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12367 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12368 } 12369 12370 pktp->pkt_flags = flags; 12371 12372 /* Transfer uscsi information to scsi_pkt */ 12373 (void) scsi_uscsi_pktinit(uscmd, pktp); 12374 12375 /* Copy the caller's CDB into the pkt... */ 12376 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12377 12378 if (uscmd->uscsi_timeout == 0) { 12379 pktp->pkt_time = un->un_uscsi_timeout; 12380 } else { 12381 pktp->pkt_time = uscmd->uscsi_timeout; 12382 } 12383 12384 /* need it later to identify USCSI request in sdintr */ 12385 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12386 12387 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12388 12389 pktp->pkt_private = bp; 12390 pktp->pkt_comp = sdintr; 12391 *pktpp = pktp; 12392 12393 SD_TRACE(SD_LOG_IO_CORE, un, 12394 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12395 12396 return (SD_PKT_ALLOC_SUCCESS); 12397 } 12398 12399 12400 /* 12401 * Function: sd_destroypkt_for_uscsi 12402 * 12403 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12404 * IOs.. Also saves relevant info into the associated uscsi_cmd 12405 * struct. 12406 * 12407 * Context: May be called under interrupt context 12408 */ 12409 12410 static void 12411 sd_destroypkt_for_uscsi(struct buf *bp) 12412 { 12413 struct uscsi_cmd *uscmd; 12414 struct sd_xbuf *xp; 12415 struct scsi_pkt *pktp; 12416 struct sd_lun *un; 12417 12418 ASSERT(bp != NULL); 12419 xp = SD_GET_XBUF(bp); 12420 ASSERT(xp != NULL); 12421 un = SD_GET_UN(bp); 12422 ASSERT(un != NULL); 12423 ASSERT(!mutex_owned(SD_MUTEX(un))); 12424 pktp = SD_GET_PKTP(bp); 12425 ASSERT(pktp != NULL); 12426 12427 SD_TRACE(SD_LOG_IO_CORE, un, 12428 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12429 12430 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12431 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12432 ASSERT(uscmd != NULL); 12433 12434 /* Save the status and the residual into the uscsi_cmd struct */ 12435 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12436 uscmd->uscsi_resid = bp->b_resid; 12437 12438 /* Transfer scsi_pkt information to uscsi */ 12439 (void) scsi_uscsi_pktfini(pktp, uscmd); 12440 12441 /* 12442 * If enabled, copy any saved sense data into the area specified 12443 * by the uscsi command. 12444 */ 12445 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12446 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12447 /* 12448 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12449 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12450 */ 12451 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12452 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12453 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12454 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12455 MAX_SENSE_LENGTH); 12456 } else { 12457 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12458 SENSE_LENGTH); 12459 } 12460 } 12461 12462 /* We are done with the scsi_pkt; free it now */ 12463 ASSERT(SD_GET_PKTP(bp) != NULL); 12464 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12465 12466 SD_TRACE(SD_LOG_IO_CORE, un, 12467 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12468 } 12469 12470 12471 /* 12472 * Function: sd_bioclone_alloc 12473 * 12474 * Description: Allocate a buf(9S) and init it as per the given buf 12475 * and the various arguments. The associated sd_xbuf 12476 * struct is (nearly) duplicated. The struct buf *bp 12477 * argument is saved in new_xp->xb_private. 12478 * 12479 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12480 * datalen - size of data area for the shadow bp 12481 * blkno - starting LBA 12482 * func - function pointer for b_iodone in the shadow buf. (May 12483 * be NULL if none.) 12484 * 12485 * Return Code: Pointer to allocates buf(9S) struct 12486 * 12487 * Context: Can sleep. 12488 */ 12489 12490 static struct buf * 12491 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12492 daddr_t blkno, int (*func)(struct buf *)) 12493 { 12494 struct sd_lun *un; 12495 struct sd_xbuf *xp; 12496 struct sd_xbuf *new_xp; 12497 struct buf *new_bp; 12498 12499 ASSERT(bp != NULL); 12500 xp = SD_GET_XBUF(bp); 12501 ASSERT(xp != NULL); 12502 un = SD_GET_UN(bp); 12503 ASSERT(un != NULL); 12504 ASSERT(!mutex_owned(SD_MUTEX(un))); 12505 12506 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12507 NULL, KM_SLEEP); 12508 12509 new_bp->b_lblkno = blkno; 12510 12511 /* 12512 * Allocate an xbuf for the shadow bp and copy the contents of the 12513 * original xbuf into it. 12514 */ 12515 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12516 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12517 12518 /* 12519 * The given bp is automatically saved in the xb_private member 12520 * of the new xbuf. Callers are allowed to depend on this. 12521 */ 12522 new_xp->xb_private = bp; 12523 12524 new_bp->b_private = new_xp; 12525 12526 return (new_bp); 12527 } 12528 12529 /* 12530 * Function: sd_shadow_buf_alloc 12531 * 12532 * Description: Allocate a buf(9S) and init it as per the given buf 12533 * and the various arguments. The associated sd_xbuf 12534 * struct is (nearly) duplicated. The struct buf *bp 12535 * argument is saved in new_xp->xb_private. 12536 * 12537 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12538 * datalen - size of data area for the shadow bp 12539 * bflags - B_READ or B_WRITE (pseudo flag) 12540 * blkno - starting LBA 12541 * func - function pointer for b_iodone in the shadow buf. (May 12542 * be NULL if none.) 12543 * 12544 * Return Code: Pointer to allocates buf(9S) struct 12545 * 12546 * Context: Can sleep. 12547 */ 12548 12549 static struct buf * 12550 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12551 daddr_t blkno, int (*func)(struct buf *)) 12552 { 12553 struct sd_lun *un; 12554 struct sd_xbuf *xp; 12555 struct sd_xbuf *new_xp; 12556 struct buf *new_bp; 12557 12558 ASSERT(bp != NULL); 12559 xp = SD_GET_XBUF(bp); 12560 ASSERT(xp != NULL); 12561 un = SD_GET_UN(bp); 12562 ASSERT(un != NULL); 12563 ASSERT(!mutex_owned(SD_MUTEX(un))); 12564 12565 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12566 bp_mapin(bp); 12567 } 12568 12569 bflags &= (B_READ | B_WRITE); 12570 #if defined(__i386) || defined(__amd64) 12571 new_bp = getrbuf(KM_SLEEP); 12572 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12573 new_bp->b_bcount = datalen; 12574 new_bp->b_flags = bflags | 12575 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12576 #else 12577 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12578 datalen, bflags, SLEEP_FUNC, NULL); 12579 #endif 12580 new_bp->av_forw = NULL; 12581 new_bp->av_back = NULL; 12582 new_bp->b_dev = bp->b_dev; 12583 new_bp->b_blkno = blkno; 12584 new_bp->b_iodone = func; 12585 new_bp->b_edev = bp->b_edev; 12586 new_bp->b_resid = 0; 12587 12588 /* We need to preserve the B_FAILFAST flag */ 12589 if (bp->b_flags & B_FAILFAST) { 12590 new_bp->b_flags |= B_FAILFAST; 12591 } 12592 12593 /* 12594 * Allocate an xbuf for the shadow bp and copy the contents of the 12595 * original xbuf into it. 12596 */ 12597 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12598 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12599 12600 /* Need later to copy data between the shadow buf & original buf! */ 12601 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12602 12603 /* 12604 * The given bp is automatically saved in the xb_private member 12605 * of the new xbuf. Callers are allowed to depend on this. 12606 */ 12607 new_xp->xb_private = bp; 12608 12609 new_bp->b_private = new_xp; 12610 12611 return (new_bp); 12612 } 12613 12614 /* 12615 * Function: sd_bioclone_free 12616 * 12617 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12618 * in the larger than partition operation. 12619 * 12620 * Context: May be called under interrupt context 12621 */ 12622 12623 static void 12624 sd_bioclone_free(struct buf *bp) 12625 { 12626 struct sd_xbuf *xp; 12627 12628 ASSERT(bp != NULL); 12629 xp = SD_GET_XBUF(bp); 12630 ASSERT(xp != NULL); 12631 12632 /* 12633 * Call bp_mapout() before freeing the buf, in case a lower 12634 * layer or HBA had done a bp_mapin(). we must do this here 12635 * as we are the "originator" of the shadow buf. 12636 */ 12637 bp_mapout(bp); 12638 12639 /* 12640 * Null out b_iodone before freeing the bp, to ensure that the driver 12641 * never gets confused by a stale value in this field. (Just a little 12642 * extra defensiveness here.) 12643 */ 12644 bp->b_iodone = NULL; 12645 12646 freerbuf(bp); 12647 12648 kmem_free(xp, sizeof (struct sd_xbuf)); 12649 } 12650 12651 /* 12652 * Function: sd_shadow_buf_free 12653 * 12654 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12655 * 12656 * Context: May be called under interrupt context 12657 */ 12658 12659 static void 12660 sd_shadow_buf_free(struct buf *bp) 12661 { 12662 struct sd_xbuf *xp; 12663 12664 ASSERT(bp != NULL); 12665 xp = SD_GET_XBUF(bp); 12666 ASSERT(xp != NULL); 12667 12668 #if defined(__sparc) 12669 /* 12670 * Call bp_mapout() before freeing the buf, in case a lower 12671 * layer or HBA had done a bp_mapin(). we must do this here 12672 * as we are the "originator" of the shadow buf. 12673 */ 12674 bp_mapout(bp); 12675 #endif 12676 12677 /* 12678 * Null out b_iodone before freeing the bp, to ensure that the driver 12679 * never gets confused by a stale value in this field. (Just a little 12680 * extra defensiveness here.) 12681 */ 12682 bp->b_iodone = NULL; 12683 12684 #if defined(__i386) || defined(__amd64) 12685 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12686 freerbuf(bp); 12687 #else 12688 scsi_free_consistent_buf(bp); 12689 #endif 12690 12691 kmem_free(xp, sizeof (struct sd_xbuf)); 12692 } 12693 12694 12695 /* 12696 * Function: sd_print_transport_rejected_message 12697 * 12698 * Description: This implements the ludicrously complex rules for printing 12699 * a "transport rejected" message. This is to address the 12700 * specific problem of having a flood of this error message 12701 * produced when a failover occurs. 12702 * 12703 * Context: Any. 12704 */ 12705 12706 static void 12707 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12708 int code) 12709 { 12710 ASSERT(un != NULL); 12711 ASSERT(mutex_owned(SD_MUTEX(un))); 12712 ASSERT(xp != NULL); 12713 12714 /* 12715 * Print the "transport rejected" message under the following 12716 * conditions: 12717 * 12718 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12719 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12720 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12721 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12722 * scsi_transport(9F) (which indicates that the target might have 12723 * gone off-line). This uses the un->un_tran_fatal_count 12724 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12725 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12726 * from scsi_transport(). 12727 * 12728 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12729 * the preceeding cases in order for the message to be printed. 12730 */ 12731 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12732 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12733 (code != TRAN_FATAL_ERROR) || 12734 (un->un_tran_fatal_count == 1)) { 12735 switch (code) { 12736 case TRAN_BADPKT: 12737 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12738 "transport rejected bad packet\n"); 12739 break; 12740 case TRAN_FATAL_ERROR: 12741 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12742 "transport rejected fatal error\n"); 12743 break; 12744 default: 12745 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12746 "transport rejected (%d)\n", code); 12747 break; 12748 } 12749 } 12750 } 12751 } 12752 12753 12754 /* 12755 * Function: sd_add_buf_to_waitq 12756 * 12757 * Description: Add the given buf(9S) struct to the wait queue for the 12758 * instance. If sorting is enabled, then the buf is added 12759 * to the queue via an elevator sort algorithm (a la 12760 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12761 * If sorting is not enabled, then the buf is just added 12762 * to the end of the wait queue. 12763 * 12764 * Return Code: void 12765 * 12766 * Context: Does not sleep/block, therefore technically can be called 12767 * from any context. However if sorting is enabled then the 12768 * execution time is indeterminate, and may take long if 12769 * the wait queue grows large. 12770 */ 12771 12772 static void 12773 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12774 { 12775 struct buf *ap; 12776 12777 ASSERT(bp != NULL); 12778 ASSERT(un != NULL); 12779 ASSERT(mutex_owned(SD_MUTEX(un))); 12780 12781 /* If the queue is empty, add the buf as the only entry & return. */ 12782 if (un->un_waitq_headp == NULL) { 12783 ASSERT(un->un_waitq_tailp == NULL); 12784 un->un_waitq_headp = un->un_waitq_tailp = bp; 12785 bp->av_forw = NULL; 12786 return; 12787 } 12788 12789 ASSERT(un->un_waitq_tailp != NULL); 12790 12791 /* 12792 * If sorting is disabled, just add the buf to the tail end of 12793 * the wait queue and return. 12794 */ 12795 if (un->un_f_disksort_disabled) { 12796 un->un_waitq_tailp->av_forw = bp; 12797 un->un_waitq_tailp = bp; 12798 bp->av_forw = NULL; 12799 return; 12800 } 12801 12802 /* 12803 * Sort thru the list of requests currently on the wait queue 12804 * and add the new buf request at the appropriate position. 12805 * 12806 * The un->un_waitq_headp is an activity chain pointer on which 12807 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12808 * first queue holds those requests which are positioned after 12809 * the current SD_GET_BLKNO() (in the first request); the second holds 12810 * requests which came in after their SD_GET_BLKNO() number was passed. 12811 * Thus we implement a one way scan, retracting after reaching 12812 * the end of the drive to the first request on the second 12813 * queue, at which time it becomes the first queue. 12814 * A one-way scan is natural because of the way UNIX read-ahead 12815 * blocks are allocated. 12816 * 12817 * If we lie after the first request, then we must locate the 12818 * second request list and add ourselves to it. 12819 */ 12820 ap = un->un_waitq_headp; 12821 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12822 while (ap->av_forw != NULL) { 12823 /* 12824 * Look for an "inversion" in the (normally 12825 * ascending) block numbers. This indicates 12826 * the start of the second request list. 12827 */ 12828 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12829 /* 12830 * Search the second request list for the 12831 * first request at a larger block number. 12832 * We go before that; however if there is 12833 * no such request, we go at the end. 12834 */ 12835 do { 12836 if (SD_GET_BLKNO(bp) < 12837 SD_GET_BLKNO(ap->av_forw)) { 12838 goto insert; 12839 } 12840 ap = ap->av_forw; 12841 } while (ap->av_forw != NULL); 12842 goto insert; /* after last */ 12843 } 12844 ap = ap->av_forw; 12845 } 12846 12847 /* 12848 * No inversions... we will go after the last, and 12849 * be the first request in the second request list. 12850 */ 12851 goto insert; 12852 } 12853 12854 /* 12855 * Request is at/after the current request... 12856 * sort in the first request list. 12857 */ 12858 while (ap->av_forw != NULL) { 12859 /* 12860 * We want to go after the current request (1) if 12861 * there is an inversion after it (i.e. it is the end 12862 * of the first request list), or (2) if the next 12863 * request is a larger block no. than our request. 12864 */ 12865 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12866 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12867 goto insert; 12868 } 12869 ap = ap->av_forw; 12870 } 12871 12872 /* 12873 * Neither a second list nor a larger request, therefore 12874 * we go at the end of the first list (which is the same 12875 * as the end of the whole schebang). 12876 */ 12877 insert: 12878 bp->av_forw = ap->av_forw; 12879 ap->av_forw = bp; 12880 12881 /* 12882 * If we inserted onto the tail end of the waitq, make sure the 12883 * tail pointer is updated. 12884 */ 12885 if (ap == un->un_waitq_tailp) { 12886 un->un_waitq_tailp = bp; 12887 } 12888 } 12889 12890 12891 /* 12892 * Function: sd_start_cmds 12893 * 12894 * Description: Remove and transport cmds from the driver queues. 12895 * 12896 * Arguments: un - pointer to the unit (soft state) struct for the target. 12897 * 12898 * immed_bp - ptr to a buf to be transported immediately. Only 12899 * the immed_bp is transported; bufs on the waitq are not 12900 * processed and the un_retry_bp is not checked. If immed_bp is 12901 * NULL, then normal queue processing is performed. 12902 * 12903 * Context: May be called from kernel thread context, interrupt context, 12904 * or runout callback context. This function may not block or 12905 * call routines that block. 12906 */ 12907 12908 static void 12909 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12910 { 12911 struct sd_xbuf *xp; 12912 struct buf *bp; 12913 void (*statp)(kstat_io_t *); 12914 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12915 void (*saved_statp)(kstat_io_t *); 12916 #endif 12917 int rval; 12918 12919 ASSERT(un != NULL); 12920 ASSERT(mutex_owned(SD_MUTEX(un))); 12921 ASSERT(un->un_ncmds_in_transport >= 0); 12922 ASSERT(un->un_throttle >= 0); 12923 12924 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12925 12926 do { 12927 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12928 saved_statp = NULL; 12929 #endif 12930 12931 /* 12932 * If we are syncing or dumping, fail the command to 12933 * avoid recursively calling back into scsi_transport(). 12934 * The dump I/O itself uses a separate code path so this 12935 * only prevents non-dump I/O from being sent while dumping. 12936 * File system sync takes place before dumping begins. 12937 * During panic, filesystem I/O is allowed provided 12938 * un_in_callback is <= 1. This is to prevent recursion 12939 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12940 * sd_start_cmds and so on. See panic.c for more information 12941 * about the states the system can be in during panic. 12942 */ 12943 if ((un->un_state == SD_STATE_DUMPING) || 12944 (ddi_in_panic() && (un->un_in_callback > 1))) { 12945 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12946 "sd_start_cmds: panicking\n"); 12947 goto exit; 12948 } 12949 12950 if ((bp = immed_bp) != NULL) { 12951 /* 12952 * We have a bp that must be transported immediately. 12953 * It's OK to transport the immed_bp here without doing 12954 * the throttle limit check because the immed_bp is 12955 * always used in a retry/recovery case. This means 12956 * that we know we are not at the throttle limit by 12957 * virtue of the fact that to get here we must have 12958 * already gotten a command back via sdintr(). This also 12959 * relies on (1) the command on un_retry_bp preventing 12960 * further commands from the waitq from being issued; 12961 * and (2) the code in sd_retry_command checking the 12962 * throttle limit before issuing a delayed or immediate 12963 * retry. This holds even if the throttle limit is 12964 * currently ratcheted down from its maximum value. 12965 */ 12966 statp = kstat_runq_enter; 12967 if (bp == un->un_retry_bp) { 12968 ASSERT((un->un_retry_statp == NULL) || 12969 (un->un_retry_statp == kstat_waitq_enter) || 12970 (un->un_retry_statp == 12971 kstat_runq_back_to_waitq)); 12972 /* 12973 * If the waitq kstat was incremented when 12974 * sd_set_retry_bp() queued this bp for a retry, 12975 * then we must set up statp so that the waitq 12976 * count will get decremented correctly below. 12977 * Also we must clear un->un_retry_statp to 12978 * ensure that we do not act on a stale value 12979 * in this field. 12980 */ 12981 if ((un->un_retry_statp == kstat_waitq_enter) || 12982 (un->un_retry_statp == 12983 kstat_runq_back_to_waitq)) { 12984 statp = kstat_waitq_to_runq; 12985 } 12986 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12987 saved_statp = un->un_retry_statp; 12988 #endif 12989 un->un_retry_statp = NULL; 12990 12991 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12992 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12993 "un_throttle:%d un_ncmds_in_transport:%d\n", 12994 un, un->un_retry_bp, un->un_throttle, 12995 un->un_ncmds_in_transport); 12996 } else { 12997 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12998 "processing priority bp:0x%p\n", bp); 12999 } 13000 13001 } else if ((bp = un->un_waitq_headp) != NULL) { 13002 /* 13003 * A command on the waitq is ready to go, but do not 13004 * send it if: 13005 * 13006 * (1) the throttle limit has been reached, or 13007 * (2) a retry is pending, or 13008 * (3) a START_STOP_UNIT callback pending, or 13009 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13010 * command is pending. 13011 * 13012 * For all of these conditions, IO processing will 13013 * restart after the condition is cleared. 13014 */ 13015 if (un->un_ncmds_in_transport >= un->un_throttle) { 13016 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13017 "sd_start_cmds: exiting, " 13018 "throttle limit reached!\n"); 13019 goto exit; 13020 } 13021 if (un->un_retry_bp != NULL) { 13022 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13023 "sd_start_cmds: exiting, retry pending!\n"); 13024 goto exit; 13025 } 13026 if (un->un_startstop_timeid != NULL) { 13027 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13028 "sd_start_cmds: exiting, " 13029 "START_STOP pending!\n"); 13030 goto exit; 13031 } 13032 if (un->un_direct_priority_timeid != NULL) { 13033 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13034 "sd_start_cmds: exiting, " 13035 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13036 goto exit; 13037 } 13038 13039 /* Dequeue the command */ 13040 un->un_waitq_headp = bp->av_forw; 13041 if (un->un_waitq_headp == NULL) { 13042 un->un_waitq_tailp = NULL; 13043 } 13044 bp->av_forw = NULL; 13045 statp = kstat_waitq_to_runq; 13046 SD_TRACE(SD_LOG_IO_CORE, un, 13047 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13048 13049 } else { 13050 /* No work to do so bail out now */ 13051 SD_TRACE(SD_LOG_IO_CORE, un, 13052 "sd_start_cmds: no more work, exiting!\n"); 13053 goto exit; 13054 } 13055 13056 /* 13057 * Reset the state to normal. This is the mechanism by which 13058 * the state transitions from either SD_STATE_RWAIT or 13059 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13060 * If state is SD_STATE_PM_CHANGING then this command is 13061 * part of the device power control and the state must 13062 * not be put back to normal. Doing so would would 13063 * allow new commands to proceed when they shouldn't, 13064 * the device may be going off. 13065 */ 13066 if ((un->un_state != SD_STATE_SUSPENDED) && 13067 (un->un_state != SD_STATE_PM_CHANGING)) { 13068 New_state(un, SD_STATE_NORMAL); 13069 } 13070 13071 xp = SD_GET_XBUF(bp); 13072 ASSERT(xp != NULL); 13073 13074 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13075 /* 13076 * Allocate the scsi_pkt if we need one, or attach DMA 13077 * resources if we have a scsi_pkt that needs them. The 13078 * latter should only occur for commands that are being 13079 * retried. 13080 */ 13081 if ((xp->xb_pktp == NULL) || 13082 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13083 #else 13084 if (xp->xb_pktp == NULL) { 13085 #endif 13086 /* 13087 * There is no scsi_pkt allocated for this buf. Call 13088 * the initpkt function to allocate & init one. 13089 * 13090 * The scsi_init_pkt runout callback functionality is 13091 * implemented as follows: 13092 * 13093 * 1) The initpkt function always calls 13094 * scsi_init_pkt(9F) with sdrunout specified as the 13095 * callback routine. 13096 * 2) A successful packet allocation is initialized and 13097 * the I/O is transported. 13098 * 3) The I/O associated with an allocation resource 13099 * failure is left on its queue to be retried via 13100 * runout or the next I/O. 13101 * 4) The I/O associated with a DMA error is removed 13102 * from the queue and failed with EIO. Processing of 13103 * the transport queues is also halted to be 13104 * restarted via runout or the next I/O. 13105 * 5) The I/O associated with a CDB size or packet 13106 * size error is removed from the queue and failed 13107 * with EIO. Processing of the transport queues is 13108 * continued. 13109 * 13110 * Note: there is no interface for canceling a runout 13111 * callback. To prevent the driver from detaching or 13112 * suspending while a runout is pending the driver 13113 * state is set to SD_STATE_RWAIT 13114 * 13115 * Note: using the scsi_init_pkt callback facility can 13116 * result in an I/O request persisting at the head of 13117 * the list which cannot be satisfied even after 13118 * multiple retries. In the future the driver may 13119 * implement some kind of maximum runout count before 13120 * failing an I/O. 13121 * 13122 * Note: the use of funcp below may seem superfluous, 13123 * but it helps warlock figure out the correct 13124 * initpkt function calls (see [s]sd.wlcmd). 13125 */ 13126 struct scsi_pkt *pktp; 13127 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13128 13129 ASSERT(bp != un->un_rqs_bp); 13130 13131 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13132 switch ((*funcp)(bp, &pktp)) { 13133 case SD_PKT_ALLOC_SUCCESS: 13134 xp->xb_pktp = pktp; 13135 SD_TRACE(SD_LOG_IO_CORE, un, 13136 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13137 pktp); 13138 goto got_pkt; 13139 13140 case SD_PKT_ALLOC_FAILURE: 13141 /* 13142 * Temporary (hopefully) resource depletion. 13143 * Since retries and RQS commands always have a 13144 * scsi_pkt allocated, these cases should never 13145 * get here. So the only cases this needs to 13146 * handle is a bp from the waitq (which we put 13147 * back onto the waitq for sdrunout), or a bp 13148 * sent as an immed_bp (which we just fail). 13149 */ 13150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13151 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13152 13153 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13154 13155 if (bp == immed_bp) { 13156 /* 13157 * If SD_XB_DMA_FREED is clear, then 13158 * this is a failure to allocate a 13159 * scsi_pkt, and we must fail the 13160 * command. 13161 */ 13162 if ((xp->xb_pkt_flags & 13163 SD_XB_DMA_FREED) == 0) { 13164 break; 13165 } 13166 13167 /* 13168 * If this immediate command is NOT our 13169 * un_retry_bp, then we must fail it. 13170 */ 13171 if (bp != un->un_retry_bp) { 13172 break; 13173 } 13174 13175 /* 13176 * We get here if this cmd is our 13177 * un_retry_bp that was DMAFREED, but 13178 * scsi_init_pkt() failed to reallocate 13179 * DMA resources when we attempted to 13180 * retry it. This can happen when an 13181 * mpxio failover is in progress, but 13182 * we don't want to just fail the 13183 * command in this case. 13184 * 13185 * Use timeout(9F) to restart it after 13186 * a 100ms delay. We don't want to 13187 * let sdrunout() restart it, because 13188 * sdrunout() is just supposed to start 13189 * commands that are sitting on the 13190 * wait queue. The un_retry_bp stays 13191 * set until the command completes, but 13192 * sdrunout can be called many times 13193 * before that happens. Since sdrunout 13194 * cannot tell if the un_retry_bp is 13195 * already in the transport, it could 13196 * end up calling scsi_transport() for 13197 * the un_retry_bp multiple times. 13198 * 13199 * Also: don't schedule the callback 13200 * if some other callback is already 13201 * pending. 13202 */ 13203 if (un->un_retry_statp == NULL) { 13204 /* 13205 * restore the kstat pointer to 13206 * keep kstat counts coherent 13207 * when we do retry the command. 13208 */ 13209 un->un_retry_statp = 13210 saved_statp; 13211 } 13212 13213 if ((un->un_startstop_timeid == NULL) && 13214 (un->un_retry_timeid == NULL) && 13215 (un->un_direct_priority_timeid == 13216 NULL)) { 13217 13218 un->un_retry_timeid = 13219 timeout( 13220 sd_start_retry_command, 13221 un, SD_RESTART_TIMEOUT); 13222 } 13223 goto exit; 13224 } 13225 13226 #else 13227 if (bp == immed_bp) { 13228 break; /* Just fail the command */ 13229 } 13230 #endif 13231 13232 /* Add the buf back to the head of the waitq */ 13233 bp->av_forw = un->un_waitq_headp; 13234 un->un_waitq_headp = bp; 13235 if (un->un_waitq_tailp == NULL) { 13236 un->un_waitq_tailp = bp; 13237 } 13238 goto exit; 13239 13240 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13241 /* 13242 * HBA DMA resource failure. Fail the command 13243 * and continue processing of the queues. 13244 */ 13245 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13246 "sd_start_cmds: " 13247 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13248 break; 13249 13250 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13251 /* 13252 * Note:x86: Partial DMA mapping not supported 13253 * for USCSI commands, and all the needed DMA 13254 * resources were not allocated. 13255 */ 13256 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13257 "sd_start_cmds: " 13258 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13259 break; 13260 13261 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13262 /* 13263 * Note:x86: Request cannot fit into CDB based 13264 * on lba and len. 13265 */ 13266 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13267 "sd_start_cmds: " 13268 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13269 break; 13270 13271 default: 13272 /* Should NEVER get here! */ 13273 panic("scsi_initpkt error"); 13274 /*NOTREACHED*/ 13275 } 13276 13277 /* 13278 * Fatal error in allocating a scsi_pkt for this buf. 13279 * Update kstats & return the buf with an error code. 13280 * We must use sd_return_failed_command_no_restart() to 13281 * avoid a recursive call back into sd_start_cmds(). 13282 * However this also means that we must keep processing 13283 * the waitq here in order to avoid stalling. 13284 */ 13285 if (statp == kstat_waitq_to_runq) { 13286 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13287 } 13288 sd_return_failed_command_no_restart(un, bp, EIO); 13289 if (bp == immed_bp) { 13290 /* immed_bp is gone by now, so clear this */ 13291 immed_bp = NULL; 13292 } 13293 continue; 13294 } 13295 got_pkt: 13296 if (bp == immed_bp) { 13297 /* goto the head of the class.... */ 13298 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13299 } 13300 13301 un->un_ncmds_in_transport++; 13302 SD_UPDATE_KSTATS(un, statp, bp); 13303 13304 /* 13305 * Call scsi_transport() to send the command to the target. 13306 * According to SCSA architecture, we must drop the mutex here 13307 * before calling scsi_transport() in order to avoid deadlock. 13308 * Note that the scsi_pkt's completion routine can be executed 13309 * (from interrupt context) even before the call to 13310 * scsi_transport() returns. 13311 */ 13312 SD_TRACE(SD_LOG_IO_CORE, un, 13313 "sd_start_cmds: calling scsi_transport()\n"); 13314 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13315 13316 mutex_exit(SD_MUTEX(un)); 13317 rval = scsi_transport(xp->xb_pktp); 13318 mutex_enter(SD_MUTEX(un)); 13319 13320 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13321 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13322 13323 switch (rval) { 13324 case TRAN_ACCEPT: 13325 /* Clear this with every pkt accepted by the HBA */ 13326 un->un_tran_fatal_count = 0; 13327 break; /* Success; try the next cmd (if any) */ 13328 13329 case TRAN_BUSY: 13330 un->un_ncmds_in_transport--; 13331 ASSERT(un->un_ncmds_in_transport >= 0); 13332 13333 /* 13334 * Don't retry request sense, the sense data 13335 * is lost when another request is sent. 13336 * Free up the rqs buf and retry 13337 * the original failed cmd. Update kstat. 13338 */ 13339 if (bp == un->un_rqs_bp) { 13340 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13341 bp = sd_mark_rqs_idle(un, xp); 13342 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13343 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13344 kstat_waitq_enter); 13345 goto exit; 13346 } 13347 13348 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13349 /* 13350 * Free the DMA resources for the scsi_pkt. This will 13351 * allow mpxio to select another path the next time 13352 * we call scsi_transport() with this scsi_pkt. 13353 * See sdintr() for the rationalization behind this. 13354 */ 13355 if ((un->un_f_is_fibre == TRUE) && 13356 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13357 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13358 scsi_dmafree(xp->xb_pktp); 13359 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13360 } 13361 #endif 13362 13363 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13364 /* 13365 * Commands that are SD_PATH_DIRECT_PRIORITY 13366 * are for error recovery situations. These do 13367 * not use the normal command waitq, so if they 13368 * get a TRAN_BUSY we cannot put them back onto 13369 * the waitq for later retry. One possible 13370 * problem is that there could already be some 13371 * other command on un_retry_bp that is waiting 13372 * for this one to complete, so we would be 13373 * deadlocked if we put this command back onto 13374 * the waitq for later retry (since un_retry_bp 13375 * must complete before the driver gets back to 13376 * commands on the waitq). 13377 * 13378 * To avoid deadlock we must schedule a callback 13379 * that will restart this command after a set 13380 * interval. This should keep retrying for as 13381 * long as the underlying transport keeps 13382 * returning TRAN_BUSY (just like for other 13383 * commands). Use the same timeout interval as 13384 * for the ordinary TRAN_BUSY retry. 13385 */ 13386 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13387 "sd_start_cmds: scsi_transport() returned " 13388 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13389 13390 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13391 un->un_direct_priority_timeid = 13392 timeout(sd_start_direct_priority_command, 13393 bp, SD_BSY_TIMEOUT / 500); 13394 13395 goto exit; 13396 } 13397 13398 /* 13399 * For TRAN_BUSY, we want to reduce the throttle value, 13400 * unless we are retrying a command. 13401 */ 13402 if (bp != un->un_retry_bp) { 13403 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13404 } 13405 13406 /* 13407 * Set up the bp to be tried again 10 ms later. 13408 * Note:x86: Is there a timeout value in the sd_lun 13409 * for this condition? 13410 */ 13411 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13412 kstat_runq_back_to_waitq); 13413 goto exit; 13414 13415 case TRAN_FATAL_ERROR: 13416 un->un_tran_fatal_count++; 13417 /* FALLTHRU */ 13418 13419 case TRAN_BADPKT: 13420 default: 13421 un->un_ncmds_in_transport--; 13422 ASSERT(un->un_ncmds_in_transport >= 0); 13423 13424 /* 13425 * If this is our REQUEST SENSE command with a 13426 * transport error, we must get back the pointers 13427 * to the original buf, and mark the REQUEST 13428 * SENSE command as "available". 13429 */ 13430 if (bp == un->un_rqs_bp) { 13431 bp = sd_mark_rqs_idle(un, xp); 13432 xp = SD_GET_XBUF(bp); 13433 } else { 13434 /* 13435 * Legacy behavior: do not update transport 13436 * error count for request sense commands. 13437 */ 13438 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13439 } 13440 13441 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13442 sd_print_transport_rejected_message(un, xp, rval); 13443 13444 /* 13445 * We must use sd_return_failed_command_no_restart() to 13446 * avoid a recursive call back into sd_start_cmds(). 13447 * However this also means that we must keep processing 13448 * the waitq here in order to avoid stalling. 13449 */ 13450 sd_return_failed_command_no_restart(un, bp, EIO); 13451 13452 /* 13453 * Notify any threads waiting in sd_ddi_suspend() that 13454 * a command completion has occurred. 13455 */ 13456 if (un->un_state == SD_STATE_SUSPENDED) { 13457 cv_broadcast(&un->un_disk_busy_cv); 13458 } 13459 13460 if (bp == immed_bp) { 13461 /* immed_bp is gone by now, so clear this */ 13462 immed_bp = NULL; 13463 } 13464 break; 13465 } 13466 13467 } while (immed_bp == NULL); 13468 13469 exit: 13470 ASSERT(mutex_owned(SD_MUTEX(un))); 13471 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13472 } 13473 13474 13475 /* 13476 * Function: sd_return_command 13477 * 13478 * Description: Returns a command to its originator (with or without an 13479 * error). Also starts commands waiting to be transported 13480 * to the target. 13481 * 13482 * Context: May be called from interrupt, kernel, or timeout context 13483 */ 13484 13485 static void 13486 sd_return_command(struct sd_lun *un, struct buf *bp) 13487 { 13488 struct sd_xbuf *xp; 13489 struct scsi_pkt *pktp; 13490 13491 ASSERT(bp != NULL); 13492 ASSERT(un != NULL); 13493 ASSERT(mutex_owned(SD_MUTEX(un))); 13494 ASSERT(bp != un->un_rqs_bp); 13495 xp = SD_GET_XBUF(bp); 13496 ASSERT(xp != NULL); 13497 13498 pktp = SD_GET_PKTP(bp); 13499 13500 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13501 13502 /* 13503 * Note: check for the "sdrestart failed" case. 13504 */ 13505 if ((un->un_partial_dma_supported == 1) && 13506 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13507 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13508 (xp->xb_pktp->pkt_resid == 0)) { 13509 13510 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13511 /* 13512 * Successfully set up next portion of cmd 13513 * transfer, try sending it 13514 */ 13515 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13516 NULL, NULL, 0, (clock_t)0, NULL); 13517 sd_start_cmds(un, NULL); 13518 return; /* Note:x86: need a return here? */ 13519 } 13520 } 13521 13522 /* 13523 * If this is the failfast bp, clear it from un_failfast_bp. This 13524 * can happen if upon being re-tried the failfast bp either 13525 * succeeded or encountered another error (possibly even a different 13526 * error than the one that precipitated the failfast state, but in 13527 * that case it would have had to exhaust retries as well). Regardless, 13528 * this should not occur whenever the instance is in the active 13529 * failfast state. 13530 */ 13531 if (bp == un->un_failfast_bp) { 13532 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13533 un->un_failfast_bp = NULL; 13534 } 13535 13536 /* 13537 * Clear the failfast state upon successful completion of ANY cmd. 13538 */ 13539 if (bp->b_error == 0) { 13540 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13541 } 13542 13543 /* 13544 * This is used if the command was retried one or more times. Show that 13545 * we are done with it, and allow processing of the waitq to resume. 13546 */ 13547 if (bp == un->un_retry_bp) { 13548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13549 "sd_return_command: un:0x%p: " 13550 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13551 un->un_retry_bp = NULL; 13552 un->un_retry_statp = NULL; 13553 } 13554 13555 SD_UPDATE_RDWR_STATS(un, bp); 13556 SD_UPDATE_PARTITION_STATS(un, bp); 13557 13558 switch (un->un_state) { 13559 case SD_STATE_SUSPENDED: 13560 /* 13561 * Notify any threads waiting in sd_ddi_suspend() that 13562 * a command completion has occurred. 13563 */ 13564 cv_broadcast(&un->un_disk_busy_cv); 13565 break; 13566 default: 13567 sd_start_cmds(un, NULL); 13568 break; 13569 } 13570 13571 /* Return this command up the iodone chain to its originator. */ 13572 mutex_exit(SD_MUTEX(un)); 13573 13574 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13575 xp->xb_pktp = NULL; 13576 13577 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13578 13579 ASSERT(!mutex_owned(SD_MUTEX(un))); 13580 mutex_enter(SD_MUTEX(un)); 13581 13582 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13583 } 13584 13585 13586 /* 13587 * Function: sd_return_failed_command 13588 * 13589 * Description: Command completion when an error occurred. 13590 * 13591 * Context: May be called from interrupt context 13592 */ 13593 13594 static void 13595 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13596 { 13597 ASSERT(bp != NULL); 13598 ASSERT(un != NULL); 13599 ASSERT(mutex_owned(SD_MUTEX(un))); 13600 13601 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13602 "sd_return_failed_command: entry\n"); 13603 13604 /* 13605 * b_resid could already be nonzero due to a partial data 13606 * transfer, so do not change it here. 13607 */ 13608 SD_BIOERROR(bp, errcode); 13609 13610 sd_return_command(un, bp); 13611 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13612 "sd_return_failed_command: exit\n"); 13613 } 13614 13615 13616 /* 13617 * Function: sd_return_failed_command_no_restart 13618 * 13619 * Description: Same as sd_return_failed_command, but ensures that no 13620 * call back into sd_start_cmds will be issued. 13621 * 13622 * Context: May be called from interrupt context 13623 */ 13624 13625 static void 13626 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13627 int errcode) 13628 { 13629 struct sd_xbuf *xp; 13630 13631 ASSERT(bp != NULL); 13632 ASSERT(un != NULL); 13633 ASSERT(mutex_owned(SD_MUTEX(un))); 13634 xp = SD_GET_XBUF(bp); 13635 ASSERT(xp != NULL); 13636 ASSERT(errcode != 0); 13637 13638 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13639 "sd_return_failed_command_no_restart: entry\n"); 13640 13641 /* 13642 * b_resid could already be nonzero due to a partial data 13643 * transfer, so do not change it here. 13644 */ 13645 SD_BIOERROR(bp, errcode); 13646 13647 /* 13648 * If this is the failfast bp, clear it. This can happen if the 13649 * failfast bp encounterd a fatal error when we attempted to 13650 * re-try it (such as a scsi_transport(9F) failure). However 13651 * we should NOT be in an active failfast state if the failfast 13652 * bp is not NULL. 13653 */ 13654 if (bp == un->un_failfast_bp) { 13655 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13656 un->un_failfast_bp = NULL; 13657 } 13658 13659 if (bp == un->un_retry_bp) { 13660 /* 13661 * This command was retried one or more times. Show that we are 13662 * done with it, and allow processing of the waitq to resume. 13663 */ 13664 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13665 "sd_return_failed_command_no_restart: " 13666 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13667 un->un_retry_bp = NULL; 13668 un->un_retry_statp = NULL; 13669 } 13670 13671 SD_UPDATE_RDWR_STATS(un, bp); 13672 SD_UPDATE_PARTITION_STATS(un, bp); 13673 13674 mutex_exit(SD_MUTEX(un)); 13675 13676 if (xp->xb_pktp != NULL) { 13677 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13678 xp->xb_pktp = NULL; 13679 } 13680 13681 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13682 13683 mutex_enter(SD_MUTEX(un)); 13684 13685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13686 "sd_return_failed_command_no_restart: exit\n"); 13687 } 13688 13689 13690 /* 13691 * Function: sd_retry_command 13692 * 13693 * Description: queue up a command for retry, or (optionally) fail it 13694 * if retry counts are exhausted. 13695 * 13696 * Arguments: un - Pointer to the sd_lun struct for the target. 13697 * 13698 * bp - Pointer to the buf for the command to be retried. 13699 * 13700 * retry_check_flag - Flag to see which (if any) of the retry 13701 * counts should be decremented/checked. If the indicated 13702 * retry count is exhausted, then the command will not be 13703 * retried; it will be failed instead. This should use a 13704 * value equal to one of the following: 13705 * 13706 * SD_RETRIES_NOCHECK 13707 * SD_RESD_RETRIES_STANDARD 13708 * SD_RETRIES_VICTIM 13709 * 13710 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13711 * if the check should be made to see of FLAG_ISOLATE is set 13712 * in the pkt. If FLAG_ISOLATE is set, then the command is 13713 * not retried, it is simply failed. 13714 * 13715 * user_funcp - Ptr to function to call before dispatching the 13716 * command. May be NULL if no action needs to be performed. 13717 * (Primarily intended for printing messages.) 13718 * 13719 * user_arg - Optional argument to be passed along to 13720 * the user_funcp call. 13721 * 13722 * failure_code - errno return code to set in the bp if the 13723 * command is going to be failed. 13724 * 13725 * retry_delay - Retry delay interval in (clock_t) units. May 13726 * be zero which indicates that the retry should be retried 13727 * immediately (ie, without an intervening delay). 13728 * 13729 * statp - Ptr to kstat function to be updated if the command 13730 * is queued for a delayed retry. May be NULL if no kstat 13731 * update is desired. 13732 * 13733 * Context: May be called from interrupt context. 13734 */ 13735 13736 static void 13737 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13738 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13739 code), void *user_arg, int failure_code, clock_t retry_delay, 13740 void (*statp)(kstat_io_t *)) 13741 { 13742 struct sd_xbuf *xp; 13743 struct scsi_pkt *pktp; 13744 13745 ASSERT(un != NULL); 13746 ASSERT(mutex_owned(SD_MUTEX(un))); 13747 ASSERT(bp != NULL); 13748 xp = SD_GET_XBUF(bp); 13749 ASSERT(xp != NULL); 13750 pktp = SD_GET_PKTP(bp); 13751 ASSERT(pktp != NULL); 13752 13753 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13754 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13755 13756 /* 13757 * If we are syncing or dumping, fail the command to avoid 13758 * recursively calling back into scsi_transport(). 13759 */ 13760 if (ddi_in_panic()) { 13761 goto fail_command_no_log; 13762 } 13763 13764 /* 13765 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13766 * log an error and fail the command. 13767 */ 13768 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13769 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13770 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13771 sd_dump_memory(un, SD_LOG_IO, "CDB", 13772 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13773 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13774 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13775 goto fail_command; 13776 } 13777 13778 /* 13779 * If we are suspended, then put the command onto head of the 13780 * wait queue since we don't want to start more commands, and 13781 * clear the un_retry_bp. Next time when we are resumed, will 13782 * handle the command in the wait queue. 13783 */ 13784 switch (un->un_state) { 13785 case SD_STATE_SUSPENDED: 13786 case SD_STATE_DUMPING: 13787 bp->av_forw = un->un_waitq_headp; 13788 un->un_waitq_headp = bp; 13789 if (un->un_waitq_tailp == NULL) { 13790 un->un_waitq_tailp = bp; 13791 } 13792 if (bp == un->un_retry_bp) { 13793 un->un_retry_bp = NULL; 13794 un->un_retry_statp = NULL; 13795 } 13796 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13797 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13798 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13799 return; 13800 default: 13801 break; 13802 } 13803 13804 /* 13805 * If the caller wants us to check FLAG_ISOLATE, then see if that 13806 * is set; if it is then we do not want to retry the command. 13807 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13808 */ 13809 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13810 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13811 goto fail_command; 13812 } 13813 } 13814 13815 13816 /* 13817 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13818 * command timeout or a selection timeout has occurred. This means 13819 * that we were unable to establish an kind of communication with 13820 * the target, and subsequent retries and/or commands are likely 13821 * to encounter similar results and take a long time to complete. 13822 * 13823 * If this is a failfast error condition, we need to update the 13824 * failfast state, even if this bp does not have B_FAILFAST set. 13825 */ 13826 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13827 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13828 ASSERT(un->un_failfast_bp == NULL); 13829 /* 13830 * If we are already in the active failfast state, and 13831 * another failfast error condition has been detected, 13832 * then fail this command if it has B_FAILFAST set. 13833 * If B_FAILFAST is clear, then maintain the legacy 13834 * behavior of retrying heroically, even tho this will 13835 * take a lot more time to fail the command. 13836 */ 13837 if (bp->b_flags & B_FAILFAST) { 13838 goto fail_command; 13839 } 13840 } else { 13841 /* 13842 * We're not in the active failfast state, but we 13843 * have a failfast error condition, so we must begin 13844 * transition to the next state. We do this regardless 13845 * of whether or not this bp has B_FAILFAST set. 13846 */ 13847 if (un->un_failfast_bp == NULL) { 13848 /* 13849 * This is the first bp to meet a failfast 13850 * condition so save it on un_failfast_bp & 13851 * do normal retry processing. Do not enter 13852 * active failfast state yet. This marks 13853 * entry into the "failfast pending" state. 13854 */ 13855 un->un_failfast_bp = bp; 13856 13857 } else if (un->un_failfast_bp == bp) { 13858 /* 13859 * This is the second time *this* bp has 13860 * encountered a failfast error condition, 13861 * so enter active failfast state & flush 13862 * queues as appropriate. 13863 */ 13864 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13865 un->un_failfast_bp = NULL; 13866 sd_failfast_flushq(un); 13867 13868 /* 13869 * Fail this bp now if B_FAILFAST set; 13870 * otherwise continue with retries. (It would 13871 * be pretty ironic if this bp succeeded on a 13872 * subsequent retry after we just flushed all 13873 * the queues). 13874 */ 13875 if (bp->b_flags & B_FAILFAST) { 13876 goto fail_command; 13877 } 13878 13879 #if !defined(lint) && !defined(__lint) 13880 } else { 13881 /* 13882 * If neither of the preceeding conditionals 13883 * was true, it means that there is some 13884 * *other* bp that has met an inital failfast 13885 * condition and is currently either being 13886 * retried or is waiting to be retried. In 13887 * that case we should perform normal retry 13888 * processing on *this* bp, since there is a 13889 * chance that the current failfast condition 13890 * is transient and recoverable. If that does 13891 * not turn out to be the case, then retries 13892 * will be cleared when the wait queue is 13893 * flushed anyway. 13894 */ 13895 #endif 13896 } 13897 } 13898 } else { 13899 /* 13900 * SD_RETRIES_FAILFAST is clear, which indicates that we 13901 * likely were able to at least establish some level of 13902 * communication with the target and subsequent commands 13903 * and/or retries are likely to get through to the target, 13904 * In this case we want to be aggressive about clearing 13905 * the failfast state. Note that this does not affect 13906 * the "failfast pending" condition. 13907 */ 13908 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13909 } 13910 13911 13912 /* 13913 * Check the specified retry count to see if we can still do 13914 * any retries with this pkt before we should fail it. 13915 */ 13916 switch (retry_check_flag & SD_RETRIES_MASK) { 13917 case SD_RETRIES_VICTIM: 13918 /* 13919 * Check the victim retry count. If exhausted, then fall 13920 * thru & check against the standard retry count. 13921 */ 13922 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13923 /* Increment count & proceed with the retry */ 13924 xp->xb_victim_retry_count++; 13925 break; 13926 } 13927 /* Victim retries exhausted, fall back to std. retries... */ 13928 /* FALLTHRU */ 13929 13930 case SD_RETRIES_STANDARD: 13931 if (xp->xb_retry_count >= un->un_retry_count) { 13932 /* Retries exhausted, fail the command */ 13933 SD_TRACE(SD_LOG_IO_CORE, un, 13934 "sd_retry_command: retries exhausted!\n"); 13935 /* 13936 * update b_resid for failed SCMD_READ & SCMD_WRITE 13937 * commands with nonzero pkt_resid. 13938 */ 13939 if ((pktp->pkt_reason == CMD_CMPLT) && 13940 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13941 (pktp->pkt_resid != 0)) { 13942 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13943 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13944 SD_UPDATE_B_RESID(bp, pktp); 13945 } 13946 } 13947 goto fail_command; 13948 } 13949 xp->xb_retry_count++; 13950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13951 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13952 break; 13953 13954 case SD_RETRIES_UA: 13955 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13956 /* Retries exhausted, fail the command */ 13957 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13958 "Unit Attention retries exhausted. " 13959 "Check the target.\n"); 13960 goto fail_command; 13961 } 13962 xp->xb_ua_retry_count++; 13963 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13964 "sd_retry_command: retry count:%d\n", 13965 xp->xb_ua_retry_count); 13966 break; 13967 13968 case SD_RETRIES_BUSY: 13969 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13970 /* Retries exhausted, fail the command */ 13971 SD_TRACE(SD_LOG_IO_CORE, un, 13972 "sd_retry_command: retries exhausted!\n"); 13973 goto fail_command; 13974 } 13975 xp->xb_retry_count++; 13976 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13977 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13978 break; 13979 13980 case SD_RETRIES_NOCHECK: 13981 default: 13982 /* No retry count to check. Just proceed with the retry */ 13983 break; 13984 } 13985 13986 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13987 13988 /* 13989 * If we were given a zero timeout, we must attempt to retry the 13990 * command immediately (ie, without a delay). 13991 */ 13992 if (retry_delay == 0) { 13993 /* 13994 * Check some limiting conditions to see if we can actually 13995 * do the immediate retry. If we cannot, then we must 13996 * fall back to queueing up a delayed retry. 13997 */ 13998 if (un->un_ncmds_in_transport >= un->un_throttle) { 13999 /* 14000 * We are at the throttle limit for the target, 14001 * fall back to delayed retry. 14002 */ 14003 retry_delay = SD_BSY_TIMEOUT; 14004 statp = kstat_waitq_enter; 14005 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14006 "sd_retry_command: immed. retry hit " 14007 "throttle!\n"); 14008 } else { 14009 /* 14010 * We're clear to proceed with the immediate retry. 14011 * First call the user-provided function (if any) 14012 */ 14013 if (user_funcp != NULL) { 14014 (*user_funcp)(un, bp, user_arg, 14015 SD_IMMEDIATE_RETRY_ISSUED); 14016 #ifdef __lock_lint 14017 sd_print_incomplete_msg(un, bp, user_arg, 14018 SD_IMMEDIATE_RETRY_ISSUED); 14019 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14020 SD_IMMEDIATE_RETRY_ISSUED); 14021 sd_print_sense_failed_msg(un, bp, user_arg, 14022 SD_IMMEDIATE_RETRY_ISSUED); 14023 #endif 14024 } 14025 14026 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14027 "sd_retry_command: issuing immediate retry\n"); 14028 14029 /* 14030 * Call sd_start_cmds() to transport the command to 14031 * the target. 14032 */ 14033 sd_start_cmds(un, bp); 14034 14035 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14036 "sd_retry_command exit\n"); 14037 return; 14038 } 14039 } 14040 14041 /* 14042 * Set up to retry the command after a delay. 14043 * First call the user-provided function (if any) 14044 */ 14045 if (user_funcp != NULL) { 14046 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14047 } 14048 14049 sd_set_retry_bp(un, bp, retry_delay, statp); 14050 14051 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14052 return; 14053 14054 fail_command: 14055 14056 if (user_funcp != NULL) { 14057 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14058 } 14059 14060 fail_command_no_log: 14061 14062 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14063 "sd_retry_command: returning failed command\n"); 14064 14065 sd_return_failed_command(un, bp, failure_code); 14066 14067 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14068 } 14069 14070 14071 /* 14072 * Function: sd_set_retry_bp 14073 * 14074 * Description: Set up the given bp for retry. 14075 * 14076 * Arguments: un - ptr to associated softstate 14077 * bp - ptr to buf(9S) for the command 14078 * retry_delay - time interval before issuing retry (may be 0) 14079 * statp - optional pointer to kstat function 14080 * 14081 * Context: May be called under interrupt context 14082 */ 14083 14084 static void 14085 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14086 void (*statp)(kstat_io_t *)) 14087 { 14088 ASSERT(un != NULL); 14089 ASSERT(mutex_owned(SD_MUTEX(un))); 14090 ASSERT(bp != NULL); 14091 14092 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14093 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14094 14095 /* 14096 * Indicate that the command is being retried. This will not allow any 14097 * other commands on the wait queue to be transported to the target 14098 * until this command has been completed (success or failure). The 14099 * "retry command" is not transported to the target until the given 14100 * time delay expires, unless the user specified a 0 retry_delay. 14101 * 14102 * Note: the timeout(9F) callback routine is what actually calls 14103 * sd_start_cmds() to transport the command, with the exception of a 14104 * zero retry_delay. The only current implementor of a zero retry delay 14105 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14106 */ 14107 if (un->un_retry_bp == NULL) { 14108 ASSERT(un->un_retry_statp == NULL); 14109 un->un_retry_bp = bp; 14110 14111 /* 14112 * If the user has not specified a delay the command should 14113 * be queued and no timeout should be scheduled. 14114 */ 14115 if (retry_delay == 0) { 14116 /* 14117 * Save the kstat pointer that will be used in the 14118 * call to SD_UPDATE_KSTATS() below, so that 14119 * sd_start_cmds() can correctly decrement the waitq 14120 * count when it is time to transport this command. 14121 */ 14122 un->un_retry_statp = statp; 14123 goto done; 14124 } 14125 } 14126 14127 if (un->un_retry_bp == bp) { 14128 /* 14129 * Save the kstat pointer that will be used in the call to 14130 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14131 * correctly decrement the waitq count when it is time to 14132 * transport this command. 14133 */ 14134 un->un_retry_statp = statp; 14135 14136 /* 14137 * Schedule a timeout if: 14138 * 1) The user has specified a delay. 14139 * 2) There is not a START_STOP_UNIT callback pending. 14140 * 14141 * If no delay has been specified, then it is up to the caller 14142 * to ensure that IO processing continues without stalling. 14143 * Effectively, this means that the caller will issue the 14144 * required call to sd_start_cmds(). The START_STOP_UNIT 14145 * callback does this after the START STOP UNIT command has 14146 * completed. In either of these cases we should not schedule 14147 * a timeout callback here. Also don't schedule the timeout if 14148 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14149 */ 14150 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14151 (un->un_direct_priority_timeid == NULL)) { 14152 un->un_retry_timeid = 14153 timeout(sd_start_retry_command, un, retry_delay); 14154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14155 "sd_set_retry_bp: setting timeout: un: 0x%p" 14156 " bp:0x%p un_retry_timeid:0x%p\n", 14157 un, bp, un->un_retry_timeid); 14158 } 14159 } else { 14160 /* 14161 * We only get in here if there is already another command 14162 * waiting to be retried. In this case, we just put the 14163 * given command onto the wait queue, so it can be transported 14164 * after the current retry command has completed. 14165 * 14166 * Also we have to make sure that if the command at the head 14167 * of the wait queue is the un_failfast_bp, that we do not 14168 * put ahead of it any other commands that are to be retried. 14169 */ 14170 if ((un->un_failfast_bp != NULL) && 14171 (un->un_failfast_bp == un->un_waitq_headp)) { 14172 /* 14173 * Enqueue this command AFTER the first command on 14174 * the wait queue (which is also un_failfast_bp). 14175 */ 14176 bp->av_forw = un->un_waitq_headp->av_forw; 14177 un->un_waitq_headp->av_forw = bp; 14178 if (un->un_waitq_headp == un->un_waitq_tailp) { 14179 un->un_waitq_tailp = bp; 14180 } 14181 } else { 14182 /* Enqueue this command at the head of the waitq. */ 14183 bp->av_forw = un->un_waitq_headp; 14184 un->un_waitq_headp = bp; 14185 if (un->un_waitq_tailp == NULL) { 14186 un->un_waitq_tailp = bp; 14187 } 14188 } 14189 14190 if (statp == NULL) { 14191 statp = kstat_waitq_enter; 14192 } 14193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14194 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14195 } 14196 14197 done: 14198 if (statp != NULL) { 14199 SD_UPDATE_KSTATS(un, statp, bp); 14200 } 14201 14202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14203 "sd_set_retry_bp: exit un:0x%p\n", un); 14204 } 14205 14206 14207 /* 14208 * Function: sd_start_retry_command 14209 * 14210 * Description: Start the command that has been waiting on the target's 14211 * retry queue. Called from timeout(9F) context after the 14212 * retry delay interval has expired. 14213 * 14214 * Arguments: arg - pointer to associated softstate for the device. 14215 * 14216 * Context: timeout(9F) thread context. May not sleep. 14217 */ 14218 14219 static void 14220 sd_start_retry_command(void *arg) 14221 { 14222 struct sd_lun *un = arg; 14223 14224 ASSERT(un != NULL); 14225 ASSERT(!mutex_owned(SD_MUTEX(un))); 14226 14227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14228 "sd_start_retry_command: entry\n"); 14229 14230 mutex_enter(SD_MUTEX(un)); 14231 14232 un->un_retry_timeid = NULL; 14233 14234 if (un->un_retry_bp != NULL) { 14235 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14236 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14237 un, un->un_retry_bp); 14238 sd_start_cmds(un, un->un_retry_bp); 14239 } 14240 14241 mutex_exit(SD_MUTEX(un)); 14242 14243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14244 "sd_start_retry_command: exit\n"); 14245 } 14246 14247 14248 /* 14249 * Function: sd_start_direct_priority_command 14250 * 14251 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14252 * received TRAN_BUSY when we called scsi_transport() to send it 14253 * to the underlying HBA. This function is called from timeout(9F) 14254 * context after the delay interval has expired. 14255 * 14256 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14257 * 14258 * Context: timeout(9F) thread context. May not sleep. 14259 */ 14260 14261 static void 14262 sd_start_direct_priority_command(void *arg) 14263 { 14264 struct buf *priority_bp = arg; 14265 struct sd_lun *un; 14266 14267 ASSERT(priority_bp != NULL); 14268 un = SD_GET_UN(priority_bp); 14269 ASSERT(un != NULL); 14270 ASSERT(!mutex_owned(SD_MUTEX(un))); 14271 14272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14273 "sd_start_direct_priority_command: entry\n"); 14274 14275 mutex_enter(SD_MUTEX(un)); 14276 un->un_direct_priority_timeid = NULL; 14277 sd_start_cmds(un, priority_bp); 14278 mutex_exit(SD_MUTEX(un)); 14279 14280 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14281 "sd_start_direct_priority_command: exit\n"); 14282 } 14283 14284 14285 /* 14286 * Function: sd_send_request_sense_command 14287 * 14288 * Description: Sends a REQUEST SENSE command to the target 14289 * 14290 * Context: May be called from interrupt context. 14291 */ 14292 14293 static void 14294 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14295 struct scsi_pkt *pktp) 14296 { 14297 ASSERT(bp != NULL); 14298 ASSERT(un != NULL); 14299 ASSERT(mutex_owned(SD_MUTEX(un))); 14300 14301 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14302 "entry: buf:0x%p\n", bp); 14303 14304 /* 14305 * If we are syncing or dumping, then fail the command to avoid a 14306 * recursive callback into scsi_transport(). Also fail the command 14307 * if we are suspended (legacy behavior). 14308 */ 14309 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14310 (un->un_state == SD_STATE_DUMPING)) { 14311 sd_return_failed_command(un, bp, EIO); 14312 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14313 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14314 return; 14315 } 14316 14317 /* 14318 * Retry the failed command and don't issue the request sense if: 14319 * 1) the sense buf is busy 14320 * 2) we have 1 or more outstanding commands on the target 14321 * (the sense data will be cleared or invalidated any way) 14322 * 14323 * Note: There could be an issue with not checking a retry limit here, 14324 * the problem is determining which retry limit to check. 14325 */ 14326 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14327 /* Don't retry if the command is flagged as non-retryable */ 14328 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14329 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14330 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14332 "sd_send_request_sense_command: " 14333 "at full throttle, retrying exit\n"); 14334 } else { 14335 sd_return_failed_command(un, bp, EIO); 14336 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14337 "sd_send_request_sense_command: " 14338 "at full throttle, non-retryable exit\n"); 14339 } 14340 return; 14341 } 14342 14343 sd_mark_rqs_busy(un, bp); 14344 sd_start_cmds(un, un->un_rqs_bp); 14345 14346 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14347 "sd_send_request_sense_command: exit\n"); 14348 } 14349 14350 14351 /* 14352 * Function: sd_mark_rqs_busy 14353 * 14354 * Description: Indicate that the request sense bp for this instance is 14355 * in use. 14356 * 14357 * Context: May be called under interrupt context 14358 */ 14359 14360 static void 14361 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14362 { 14363 struct sd_xbuf *sense_xp; 14364 14365 ASSERT(un != NULL); 14366 ASSERT(bp != NULL); 14367 ASSERT(mutex_owned(SD_MUTEX(un))); 14368 ASSERT(un->un_sense_isbusy == 0); 14369 14370 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14371 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14372 14373 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14374 ASSERT(sense_xp != NULL); 14375 14376 SD_INFO(SD_LOG_IO, un, 14377 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14378 14379 ASSERT(sense_xp->xb_pktp != NULL); 14380 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14381 == (FLAG_SENSING | FLAG_HEAD)); 14382 14383 un->un_sense_isbusy = 1; 14384 un->un_rqs_bp->b_resid = 0; 14385 sense_xp->xb_pktp->pkt_resid = 0; 14386 sense_xp->xb_pktp->pkt_reason = 0; 14387 14388 /* So we can get back the bp at interrupt time! */ 14389 sense_xp->xb_sense_bp = bp; 14390 14391 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14392 14393 /* 14394 * Mark this buf as awaiting sense data. (This is already set in 14395 * the pkt_flags for the RQS packet.) 14396 */ 14397 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14398 14399 sense_xp->xb_retry_count = 0; 14400 sense_xp->xb_victim_retry_count = 0; 14401 sense_xp->xb_ua_retry_count = 0; 14402 sense_xp->xb_nr_retry_count = 0; 14403 sense_xp->xb_dma_resid = 0; 14404 14405 /* Clean up the fields for auto-request sense */ 14406 sense_xp->xb_sense_status = 0; 14407 sense_xp->xb_sense_state = 0; 14408 sense_xp->xb_sense_resid = 0; 14409 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14410 14411 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14412 } 14413 14414 14415 /* 14416 * Function: sd_mark_rqs_idle 14417 * 14418 * Description: SD_MUTEX must be held continuously through this routine 14419 * to prevent reuse of the rqs struct before the caller can 14420 * complete it's processing. 14421 * 14422 * Return Code: Pointer to the RQS buf 14423 * 14424 * Context: May be called under interrupt context 14425 */ 14426 14427 static struct buf * 14428 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14429 { 14430 struct buf *bp; 14431 ASSERT(un != NULL); 14432 ASSERT(sense_xp != NULL); 14433 ASSERT(mutex_owned(SD_MUTEX(un))); 14434 ASSERT(un->un_sense_isbusy != 0); 14435 14436 un->un_sense_isbusy = 0; 14437 bp = sense_xp->xb_sense_bp; 14438 sense_xp->xb_sense_bp = NULL; 14439 14440 /* This pkt is no longer interested in getting sense data */ 14441 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14442 14443 return (bp); 14444 } 14445 14446 14447 14448 /* 14449 * Function: sd_alloc_rqs 14450 * 14451 * Description: Set up the unit to receive auto request sense data 14452 * 14453 * Return Code: DDI_SUCCESS or DDI_FAILURE 14454 * 14455 * Context: Called under attach(9E) context 14456 */ 14457 14458 static int 14459 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14460 { 14461 struct sd_xbuf *xp; 14462 14463 ASSERT(un != NULL); 14464 ASSERT(!mutex_owned(SD_MUTEX(un))); 14465 ASSERT(un->un_rqs_bp == NULL); 14466 ASSERT(un->un_rqs_pktp == NULL); 14467 14468 /* 14469 * First allocate the required buf and scsi_pkt structs, then set up 14470 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14471 */ 14472 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14473 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14474 if (un->un_rqs_bp == NULL) { 14475 return (DDI_FAILURE); 14476 } 14477 14478 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14479 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14480 14481 if (un->un_rqs_pktp == NULL) { 14482 sd_free_rqs(un); 14483 return (DDI_FAILURE); 14484 } 14485 14486 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14487 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14488 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14489 14490 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14491 14492 /* Set up the other needed members in the ARQ scsi_pkt. */ 14493 un->un_rqs_pktp->pkt_comp = sdintr; 14494 un->un_rqs_pktp->pkt_time = sd_io_time; 14495 un->un_rqs_pktp->pkt_flags |= 14496 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14497 14498 /* 14499 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14500 * provide any intpkt, destroypkt routines as we take care of 14501 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14502 */ 14503 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14504 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14505 xp->xb_pktp = un->un_rqs_pktp; 14506 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14507 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14508 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14509 14510 /* 14511 * Save the pointer to the request sense private bp so it can 14512 * be retrieved in sdintr. 14513 */ 14514 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14515 ASSERT(un->un_rqs_bp->b_private == xp); 14516 14517 /* 14518 * See if the HBA supports auto-request sense for the specified 14519 * target/lun. If it does, then try to enable it (if not already 14520 * enabled). 14521 * 14522 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14523 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14524 * return success. However, in both of these cases ARQ is always 14525 * enabled and scsi_ifgetcap will always return true. The best approach 14526 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14527 * 14528 * The 3rd case is the HBA (adp) always return enabled on 14529 * scsi_ifgetgetcap even when it's not enable, the best approach 14530 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14531 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14532 */ 14533 14534 if (un->un_f_is_fibre == TRUE) { 14535 un->un_f_arq_enabled = TRUE; 14536 } else { 14537 #if defined(__i386) || defined(__amd64) 14538 /* 14539 * Circumvent the Adaptec bug, remove this code when 14540 * the bug is fixed 14541 */ 14542 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14543 #endif 14544 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14545 case 0: 14546 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14547 "sd_alloc_rqs: HBA supports ARQ\n"); 14548 /* 14549 * ARQ is supported by this HBA but currently is not 14550 * enabled. Attempt to enable it and if successful then 14551 * mark this instance as ARQ enabled. 14552 */ 14553 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14554 == 1) { 14555 /* Successfully enabled ARQ in the HBA */ 14556 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14557 "sd_alloc_rqs: ARQ enabled\n"); 14558 un->un_f_arq_enabled = TRUE; 14559 } else { 14560 /* Could not enable ARQ in the HBA */ 14561 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14562 "sd_alloc_rqs: failed ARQ enable\n"); 14563 un->un_f_arq_enabled = FALSE; 14564 } 14565 break; 14566 case 1: 14567 /* 14568 * ARQ is supported by this HBA and is already enabled. 14569 * Just mark ARQ as enabled for this instance. 14570 */ 14571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14572 "sd_alloc_rqs: ARQ already enabled\n"); 14573 un->un_f_arq_enabled = TRUE; 14574 break; 14575 default: 14576 /* 14577 * ARQ is not supported by this HBA; disable it for this 14578 * instance. 14579 */ 14580 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14581 "sd_alloc_rqs: HBA does not support ARQ\n"); 14582 un->un_f_arq_enabled = FALSE; 14583 break; 14584 } 14585 } 14586 14587 return (DDI_SUCCESS); 14588 } 14589 14590 14591 /* 14592 * Function: sd_free_rqs 14593 * 14594 * Description: Cleanup for the pre-instance RQS command. 14595 * 14596 * Context: Kernel thread context 14597 */ 14598 14599 static void 14600 sd_free_rqs(struct sd_lun *un) 14601 { 14602 ASSERT(un != NULL); 14603 14604 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14605 14606 /* 14607 * If consistent memory is bound to a scsi_pkt, the pkt 14608 * has to be destroyed *before* freeing the consistent memory. 14609 * Don't change the sequence of this operations. 14610 * scsi_destroy_pkt() might access memory, which isn't allowed, 14611 * after it was freed in scsi_free_consistent_buf(). 14612 */ 14613 if (un->un_rqs_pktp != NULL) { 14614 scsi_destroy_pkt(un->un_rqs_pktp); 14615 un->un_rqs_pktp = NULL; 14616 } 14617 14618 if (un->un_rqs_bp != NULL) { 14619 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14620 if (xp != NULL) { 14621 kmem_free(xp, sizeof (struct sd_xbuf)); 14622 } 14623 scsi_free_consistent_buf(un->un_rqs_bp); 14624 un->un_rqs_bp = NULL; 14625 } 14626 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14627 } 14628 14629 14630 14631 /* 14632 * Function: sd_reduce_throttle 14633 * 14634 * Description: Reduces the maximum # of outstanding commands on a 14635 * target to the current number of outstanding commands. 14636 * Queues a tiemout(9F) callback to restore the limit 14637 * after a specified interval has elapsed. 14638 * Typically used when we get a TRAN_BUSY return code 14639 * back from scsi_transport(). 14640 * 14641 * Arguments: un - ptr to the sd_lun softstate struct 14642 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14643 * 14644 * Context: May be called from interrupt context 14645 */ 14646 14647 static void 14648 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14649 { 14650 ASSERT(un != NULL); 14651 ASSERT(mutex_owned(SD_MUTEX(un))); 14652 ASSERT(un->un_ncmds_in_transport >= 0); 14653 14654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14655 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14656 un, un->un_throttle, un->un_ncmds_in_transport); 14657 14658 if (un->un_throttle > 1) { 14659 if (un->un_f_use_adaptive_throttle == TRUE) { 14660 switch (throttle_type) { 14661 case SD_THROTTLE_TRAN_BUSY: 14662 if (un->un_busy_throttle == 0) { 14663 un->un_busy_throttle = un->un_throttle; 14664 } 14665 break; 14666 case SD_THROTTLE_QFULL: 14667 un->un_busy_throttle = 0; 14668 break; 14669 default: 14670 ASSERT(FALSE); 14671 } 14672 14673 if (un->un_ncmds_in_transport > 0) { 14674 un->un_throttle = un->un_ncmds_in_transport; 14675 } 14676 14677 } else { 14678 if (un->un_ncmds_in_transport == 0) { 14679 un->un_throttle = 1; 14680 } else { 14681 un->un_throttle = un->un_ncmds_in_transport; 14682 } 14683 } 14684 } 14685 14686 /* Reschedule the timeout if none is currently active */ 14687 if (un->un_reset_throttle_timeid == NULL) { 14688 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14689 un, SD_THROTTLE_RESET_INTERVAL); 14690 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14691 "sd_reduce_throttle: timeout scheduled!\n"); 14692 } 14693 14694 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14695 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14696 } 14697 14698 14699 14700 /* 14701 * Function: sd_restore_throttle 14702 * 14703 * Description: Callback function for timeout(9F). Resets the current 14704 * value of un->un_throttle to its default. 14705 * 14706 * Arguments: arg - pointer to associated softstate for the device. 14707 * 14708 * Context: May be called from interrupt context 14709 */ 14710 14711 static void 14712 sd_restore_throttle(void *arg) 14713 { 14714 struct sd_lun *un = arg; 14715 14716 ASSERT(un != NULL); 14717 ASSERT(!mutex_owned(SD_MUTEX(un))); 14718 14719 mutex_enter(SD_MUTEX(un)); 14720 14721 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14722 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14723 14724 un->un_reset_throttle_timeid = NULL; 14725 14726 if (un->un_f_use_adaptive_throttle == TRUE) { 14727 /* 14728 * If un_busy_throttle is nonzero, then it contains the 14729 * value that un_throttle was when we got a TRAN_BUSY back 14730 * from scsi_transport(). We want to revert back to this 14731 * value. 14732 * 14733 * In the QFULL case, the throttle limit will incrementally 14734 * increase until it reaches max throttle. 14735 */ 14736 if (un->un_busy_throttle > 0) { 14737 un->un_throttle = un->un_busy_throttle; 14738 un->un_busy_throttle = 0; 14739 } else { 14740 /* 14741 * increase throttle by 10% open gate slowly, schedule 14742 * another restore if saved throttle has not been 14743 * reached 14744 */ 14745 short throttle; 14746 if (sd_qfull_throttle_enable) { 14747 throttle = un->un_throttle + 14748 max((un->un_throttle / 10), 1); 14749 un->un_throttle = 14750 (throttle < un->un_saved_throttle) ? 14751 throttle : un->un_saved_throttle; 14752 if (un->un_throttle < un->un_saved_throttle) { 14753 un->un_reset_throttle_timeid = 14754 timeout(sd_restore_throttle, 14755 un, 14756 SD_QFULL_THROTTLE_RESET_INTERVAL); 14757 } 14758 } 14759 } 14760 14761 /* 14762 * If un_throttle has fallen below the low-water mark, we 14763 * restore the maximum value here (and allow it to ratchet 14764 * down again if necessary). 14765 */ 14766 if (un->un_throttle < un->un_min_throttle) { 14767 un->un_throttle = un->un_saved_throttle; 14768 } 14769 } else { 14770 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14771 "restoring limit from 0x%x to 0x%x\n", 14772 un->un_throttle, un->un_saved_throttle); 14773 un->un_throttle = un->un_saved_throttle; 14774 } 14775 14776 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14777 "sd_restore_throttle: calling sd_start_cmds!\n"); 14778 14779 sd_start_cmds(un, NULL); 14780 14781 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14782 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14783 un, un->un_throttle); 14784 14785 mutex_exit(SD_MUTEX(un)); 14786 14787 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14788 } 14789 14790 /* 14791 * Function: sdrunout 14792 * 14793 * Description: Callback routine for scsi_init_pkt when a resource allocation 14794 * fails. 14795 * 14796 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14797 * soft state instance. 14798 * 14799 * Return Code: The scsi_init_pkt routine allows for the callback function to 14800 * return a 0 indicating the callback should be rescheduled or a 1 14801 * indicating not to reschedule. This routine always returns 1 14802 * because the driver always provides a callback function to 14803 * scsi_init_pkt. This results in a callback always being scheduled 14804 * (via the scsi_init_pkt callback implementation) if a resource 14805 * failure occurs. 14806 * 14807 * Context: This callback function may not block or call routines that block 14808 * 14809 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14810 * request persisting at the head of the list which cannot be 14811 * satisfied even after multiple retries. In the future the driver 14812 * may implement some time of maximum runout count before failing 14813 * an I/O. 14814 */ 14815 14816 static int 14817 sdrunout(caddr_t arg) 14818 { 14819 struct sd_lun *un = (struct sd_lun *)arg; 14820 14821 ASSERT(un != NULL); 14822 ASSERT(!mutex_owned(SD_MUTEX(un))); 14823 14824 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14825 14826 mutex_enter(SD_MUTEX(un)); 14827 sd_start_cmds(un, NULL); 14828 mutex_exit(SD_MUTEX(un)); 14829 /* 14830 * This callback routine always returns 1 (i.e. do not reschedule) 14831 * because we always specify sdrunout as the callback handler for 14832 * scsi_init_pkt inside the call to sd_start_cmds. 14833 */ 14834 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14835 return (1); 14836 } 14837 14838 14839 /* 14840 * Function: sdintr 14841 * 14842 * Description: Completion callback routine for scsi_pkt(9S) structs 14843 * sent to the HBA driver via scsi_transport(9F). 14844 * 14845 * Context: Interrupt context 14846 */ 14847 14848 static void 14849 sdintr(struct scsi_pkt *pktp) 14850 { 14851 struct buf *bp; 14852 struct sd_xbuf *xp; 14853 struct sd_lun *un; 14854 size_t actual_len; 14855 14856 ASSERT(pktp != NULL); 14857 bp = (struct buf *)pktp->pkt_private; 14858 ASSERT(bp != NULL); 14859 xp = SD_GET_XBUF(bp); 14860 ASSERT(xp != NULL); 14861 ASSERT(xp->xb_pktp != NULL); 14862 un = SD_GET_UN(bp); 14863 ASSERT(un != NULL); 14864 ASSERT(!mutex_owned(SD_MUTEX(un))); 14865 14866 #ifdef SD_FAULT_INJECTION 14867 14868 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14869 /* SD FaultInjection */ 14870 sd_faultinjection(pktp); 14871 14872 #endif /* SD_FAULT_INJECTION */ 14873 14874 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14875 " xp:0x%p, un:0x%p\n", bp, xp, un); 14876 14877 mutex_enter(SD_MUTEX(un)); 14878 14879 /* Reduce the count of the #commands currently in transport */ 14880 un->un_ncmds_in_transport--; 14881 ASSERT(un->un_ncmds_in_transport >= 0); 14882 14883 /* Increment counter to indicate that the callback routine is active */ 14884 un->un_in_callback++; 14885 14886 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14887 14888 #ifdef SDDEBUG 14889 if (bp == un->un_retry_bp) { 14890 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14891 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14892 un, un->un_retry_bp, un->un_ncmds_in_transport); 14893 } 14894 #endif 14895 14896 /* 14897 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14898 * state if needed. 14899 */ 14900 if (pktp->pkt_reason == CMD_DEV_GONE) { 14901 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14902 "Command failed to complete...Device is gone\n"); 14903 if (un->un_mediastate != DKIO_DEV_GONE) { 14904 un->un_mediastate = DKIO_DEV_GONE; 14905 cv_broadcast(&un->un_state_cv); 14906 } 14907 sd_return_failed_command(un, bp, EIO); 14908 goto exit; 14909 } 14910 14911 if (pktp->pkt_state & STATE_XARQ_DONE) { 14912 SD_TRACE(SD_LOG_COMMON, un, 14913 "sdintr: extra sense data received. pkt=%p\n", pktp); 14914 } 14915 14916 /* 14917 * First see if the pkt has auto-request sense data with it.... 14918 * Look at the packet state first so we don't take a performance 14919 * hit looking at the arq enabled flag unless absolutely necessary. 14920 */ 14921 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14922 (un->un_f_arq_enabled == TRUE)) { 14923 /* 14924 * The HBA did an auto request sense for this command so check 14925 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14926 * driver command that should not be retried. 14927 */ 14928 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14929 /* 14930 * Save the relevant sense info into the xp for the 14931 * original cmd. 14932 */ 14933 struct scsi_arq_status *asp; 14934 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14935 xp->xb_sense_status = 14936 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14937 xp->xb_sense_state = asp->sts_rqpkt_state; 14938 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14939 if (pktp->pkt_state & STATE_XARQ_DONE) { 14940 actual_len = MAX_SENSE_LENGTH - 14941 xp->xb_sense_resid; 14942 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14943 MAX_SENSE_LENGTH); 14944 } else { 14945 if (xp->xb_sense_resid > SENSE_LENGTH) { 14946 actual_len = MAX_SENSE_LENGTH - 14947 xp->xb_sense_resid; 14948 } else { 14949 actual_len = SENSE_LENGTH - 14950 xp->xb_sense_resid; 14951 } 14952 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14953 if ((((struct uscsi_cmd *) 14954 (xp->xb_pktinfo))->uscsi_rqlen) > 14955 actual_len) { 14956 xp->xb_sense_resid = 14957 (((struct uscsi_cmd *) 14958 (xp->xb_pktinfo))-> 14959 uscsi_rqlen) - actual_len; 14960 } else { 14961 xp->xb_sense_resid = 0; 14962 } 14963 } 14964 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14965 SENSE_LENGTH); 14966 } 14967 14968 /* fail the command */ 14969 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14970 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14971 sd_return_failed_command(un, bp, EIO); 14972 goto exit; 14973 } 14974 14975 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14976 /* 14977 * We want to either retry or fail this command, so free 14978 * the DMA resources here. If we retry the command then 14979 * the DMA resources will be reallocated in sd_start_cmds(). 14980 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14981 * causes the *entire* transfer to start over again from the 14982 * beginning of the request, even for PARTIAL chunks that 14983 * have already transferred successfully. 14984 */ 14985 if ((un->un_f_is_fibre == TRUE) && 14986 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14987 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14988 scsi_dmafree(pktp); 14989 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14990 } 14991 #endif 14992 14993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14994 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14995 14996 sd_handle_auto_request_sense(un, bp, xp, pktp); 14997 goto exit; 14998 } 14999 15000 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15001 if (pktp->pkt_flags & FLAG_SENSING) { 15002 /* This pktp is from the unit's REQUEST_SENSE command */ 15003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15004 "sdintr: sd_handle_request_sense\n"); 15005 sd_handle_request_sense(un, bp, xp, pktp); 15006 goto exit; 15007 } 15008 15009 /* 15010 * Check to see if the command successfully completed as requested; 15011 * this is the most common case (and also the hot performance path). 15012 * 15013 * Requirements for successful completion are: 15014 * pkt_reason is CMD_CMPLT and packet status is status good. 15015 * In addition: 15016 * - A residual of zero indicates successful completion no matter what 15017 * the command is. 15018 * - If the residual is not zero and the command is not a read or 15019 * write, then it's still defined as successful completion. In other 15020 * words, if the command is a read or write the residual must be 15021 * zero for successful completion. 15022 * - If the residual is not zero and the command is a read or 15023 * write, and it's a USCSICMD, then it's still defined as 15024 * successful completion. 15025 */ 15026 if ((pktp->pkt_reason == CMD_CMPLT) && 15027 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15028 15029 /* 15030 * Since this command is returned with a good status, we 15031 * can reset the count for Sonoma failover. 15032 */ 15033 un->un_sonoma_failure_count = 0; 15034 15035 /* 15036 * Return all USCSI commands on good status 15037 */ 15038 if (pktp->pkt_resid == 0) { 15039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15040 "sdintr: returning command for resid == 0\n"); 15041 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15042 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15043 SD_UPDATE_B_RESID(bp, pktp); 15044 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15045 "sdintr: returning command for resid != 0\n"); 15046 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15047 SD_UPDATE_B_RESID(bp, pktp); 15048 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15049 "sdintr: returning uscsi command\n"); 15050 } else { 15051 goto not_successful; 15052 } 15053 sd_return_command(un, bp); 15054 15055 /* 15056 * Decrement counter to indicate that the callback routine 15057 * is done. 15058 */ 15059 un->un_in_callback--; 15060 ASSERT(un->un_in_callback >= 0); 15061 mutex_exit(SD_MUTEX(un)); 15062 15063 return; 15064 } 15065 15066 not_successful: 15067 15068 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15069 /* 15070 * The following is based upon knowledge of the underlying transport 15071 * and its use of DMA resources. This code should be removed when 15072 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15073 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15074 * and sd_start_cmds(). 15075 * 15076 * Free any DMA resources associated with this command if there 15077 * is a chance it could be retried or enqueued for later retry. 15078 * If we keep the DMA binding then mpxio cannot reissue the 15079 * command on another path whenever a path failure occurs. 15080 * 15081 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15082 * causes the *entire* transfer to start over again from the 15083 * beginning of the request, even for PARTIAL chunks that 15084 * have already transferred successfully. 15085 * 15086 * This is only done for non-uscsi commands (and also skipped for the 15087 * driver's internal RQS command). Also just do this for Fibre Channel 15088 * devices as these are the only ones that support mpxio. 15089 */ 15090 if ((un->un_f_is_fibre == TRUE) && 15091 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15092 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15093 scsi_dmafree(pktp); 15094 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15095 } 15096 #endif 15097 15098 /* 15099 * The command did not successfully complete as requested so check 15100 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15101 * driver command that should not be retried so just return. If 15102 * FLAG_DIAGNOSE is not set the error will be processed below. 15103 */ 15104 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15105 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15106 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15107 /* 15108 * Issue a request sense if a check condition caused the error 15109 * (we handle the auto request sense case above), otherwise 15110 * just fail the command. 15111 */ 15112 if ((pktp->pkt_reason == CMD_CMPLT) && 15113 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15114 sd_send_request_sense_command(un, bp, pktp); 15115 } else { 15116 sd_return_failed_command(un, bp, EIO); 15117 } 15118 goto exit; 15119 } 15120 15121 /* 15122 * The command did not successfully complete as requested so process 15123 * the error, retry, and/or attempt recovery. 15124 */ 15125 switch (pktp->pkt_reason) { 15126 case CMD_CMPLT: 15127 switch (SD_GET_PKT_STATUS(pktp)) { 15128 case STATUS_GOOD: 15129 /* 15130 * The command completed successfully with a non-zero 15131 * residual 15132 */ 15133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15134 "sdintr: STATUS_GOOD \n"); 15135 sd_pkt_status_good(un, bp, xp, pktp); 15136 break; 15137 15138 case STATUS_CHECK: 15139 case STATUS_TERMINATED: 15140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15141 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15142 sd_pkt_status_check_condition(un, bp, xp, pktp); 15143 break; 15144 15145 case STATUS_BUSY: 15146 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15147 "sdintr: STATUS_BUSY\n"); 15148 sd_pkt_status_busy(un, bp, xp, pktp); 15149 break; 15150 15151 case STATUS_RESERVATION_CONFLICT: 15152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15153 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15154 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15155 break; 15156 15157 case STATUS_QFULL: 15158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15159 "sdintr: STATUS_QFULL\n"); 15160 sd_pkt_status_qfull(un, bp, xp, pktp); 15161 break; 15162 15163 case STATUS_MET: 15164 case STATUS_INTERMEDIATE: 15165 case STATUS_SCSI2: 15166 case STATUS_INTERMEDIATE_MET: 15167 case STATUS_ACA_ACTIVE: 15168 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15169 "Unexpected SCSI status received: 0x%x\n", 15170 SD_GET_PKT_STATUS(pktp)); 15171 sd_return_failed_command(un, bp, EIO); 15172 break; 15173 15174 default: 15175 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15176 "Invalid SCSI status received: 0x%x\n", 15177 SD_GET_PKT_STATUS(pktp)); 15178 sd_return_failed_command(un, bp, EIO); 15179 break; 15180 15181 } 15182 break; 15183 15184 case CMD_INCOMPLETE: 15185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15186 "sdintr: CMD_INCOMPLETE\n"); 15187 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15188 break; 15189 case CMD_TRAN_ERR: 15190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15191 "sdintr: CMD_TRAN_ERR\n"); 15192 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15193 break; 15194 case CMD_RESET: 15195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15196 "sdintr: CMD_RESET \n"); 15197 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15198 break; 15199 case CMD_ABORTED: 15200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15201 "sdintr: CMD_ABORTED \n"); 15202 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15203 break; 15204 case CMD_TIMEOUT: 15205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15206 "sdintr: CMD_TIMEOUT\n"); 15207 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15208 break; 15209 case CMD_UNX_BUS_FREE: 15210 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15211 "sdintr: CMD_UNX_BUS_FREE \n"); 15212 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15213 break; 15214 case CMD_TAG_REJECT: 15215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15216 "sdintr: CMD_TAG_REJECT\n"); 15217 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15218 break; 15219 default: 15220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15221 "sdintr: default\n"); 15222 sd_pkt_reason_default(un, bp, xp, pktp); 15223 break; 15224 } 15225 15226 exit: 15227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15228 15229 /* Decrement counter to indicate that the callback routine is done. */ 15230 un->un_in_callback--; 15231 ASSERT(un->un_in_callback >= 0); 15232 15233 /* 15234 * At this point, the pkt has been dispatched, ie, it is either 15235 * being re-tried or has been returned to its caller and should 15236 * not be referenced. 15237 */ 15238 15239 mutex_exit(SD_MUTEX(un)); 15240 } 15241 15242 15243 /* 15244 * Function: sd_print_incomplete_msg 15245 * 15246 * Description: Prints the error message for a CMD_INCOMPLETE error. 15247 * 15248 * Arguments: un - ptr to associated softstate for the device. 15249 * bp - ptr to the buf(9S) for the command. 15250 * arg - message string ptr 15251 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15252 * or SD_NO_RETRY_ISSUED. 15253 * 15254 * Context: May be called under interrupt context 15255 */ 15256 15257 static void 15258 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15259 { 15260 struct scsi_pkt *pktp; 15261 char *msgp; 15262 char *cmdp = arg; 15263 15264 ASSERT(un != NULL); 15265 ASSERT(mutex_owned(SD_MUTEX(un))); 15266 ASSERT(bp != NULL); 15267 ASSERT(arg != NULL); 15268 pktp = SD_GET_PKTP(bp); 15269 ASSERT(pktp != NULL); 15270 15271 switch (code) { 15272 case SD_DELAYED_RETRY_ISSUED: 15273 case SD_IMMEDIATE_RETRY_ISSUED: 15274 msgp = "retrying"; 15275 break; 15276 case SD_NO_RETRY_ISSUED: 15277 default: 15278 msgp = "giving up"; 15279 break; 15280 } 15281 15282 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15283 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15284 "incomplete %s- %s\n", cmdp, msgp); 15285 } 15286 } 15287 15288 15289 15290 /* 15291 * Function: sd_pkt_status_good 15292 * 15293 * Description: Processing for a STATUS_GOOD code in pkt_status. 15294 * 15295 * Context: May be called under interrupt context 15296 */ 15297 15298 static void 15299 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15300 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15301 { 15302 char *cmdp; 15303 15304 ASSERT(un != NULL); 15305 ASSERT(mutex_owned(SD_MUTEX(un))); 15306 ASSERT(bp != NULL); 15307 ASSERT(xp != NULL); 15308 ASSERT(pktp != NULL); 15309 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15310 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15311 ASSERT(pktp->pkt_resid != 0); 15312 15313 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15314 15315 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15316 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15317 case SCMD_READ: 15318 cmdp = "read"; 15319 break; 15320 case SCMD_WRITE: 15321 cmdp = "write"; 15322 break; 15323 default: 15324 SD_UPDATE_B_RESID(bp, pktp); 15325 sd_return_command(un, bp); 15326 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15327 return; 15328 } 15329 15330 /* 15331 * See if we can retry the read/write, preferrably immediately. 15332 * If retries are exhaused, then sd_retry_command() will update 15333 * the b_resid count. 15334 */ 15335 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15336 cmdp, EIO, (clock_t)0, NULL); 15337 15338 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15339 } 15340 15341 15342 15343 15344 15345 /* 15346 * Function: sd_handle_request_sense 15347 * 15348 * Description: Processing for non-auto Request Sense command. 15349 * 15350 * Arguments: un - ptr to associated softstate 15351 * sense_bp - ptr to buf(9S) for the RQS command 15352 * sense_xp - ptr to the sd_xbuf for the RQS command 15353 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15354 * 15355 * Context: May be called under interrupt context 15356 */ 15357 15358 static void 15359 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15360 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15361 { 15362 struct buf *cmd_bp; /* buf for the original command */ 15363 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15364 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15365 size_t actual_len; /* actual sense data length */ 15366 15367 ASSERT(un != NULL); 15368 ASSERT(mutex_owned(SD_MUTEX(un))); 15369 ASSERT(sense_bp != NULL); 15370 ASSERT(sense_xp != NULL); 15371 ASSERT(sense_pktp != NULL); 15372 15373 /* 15374 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15375 * RQS command and not the original command. 15376 */ 15377 ASSERT(sense_pktp == un->un_rqs_pktp); 15378 ASSERT(sense_bp == un->un_rqs_bp); 15379 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15380 (FLAG_SENSING | FLAG_HEAD)); 15381 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15382 FLAG_SENSING) == FLAG_SENSING); 15383 15384 /* These are the bp, xp, and pktp for the original command */ 15385 cmd_bp = sense_xp->xb_sense_bp; 15386 cmd_xp = SD_GET_XBUF(cmd_bp); 15387 cmd_pktp = SD_GET_PKTP(cmd_bp); 15388 15389 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15390 /* 15391 * The REQUEST SENSE command failed. Release the REQUEST 15392 * SENSE command for re-use, get back the bp for the original 15393 * command, and attempt to re-try the original command if 15394 * FLAG_DIAGNOSE is not set in the original packet. 15395 */ 15396 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15397 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15398 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15399 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15400 NULL, NULL, EIO, (clock_t)0, NULL); 15401 return; 15402 } 15403 } 15404 15405 /* 15406 * Save the relevant sense info into the xp for the original cmd. 15407 * 15408 * Note: if the request sense failed the state info will be zero 15409 * as set in sd_mark_rqs_busy() 15410 */ 15411 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15412 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15413 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15414 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15415 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15416 SENSE_LENGTH)) { 15417 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15418 MAX_SENSE_LENGTH); 15419 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15420 } else { 15421 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15422 SENSE_LENGTH); 15423 if (actual_len < SENSE_LENGTH) { 15424 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15425 } else { 15426 cmd_xp->xb_sense_resid = 0; 15427 } 15428 } 15429 15430 /* 15431 * Free up the RQS command.... 15432 * NOTE: 15433 * Must do this BEFORE calling sd_validate_sense_data! 15434 * sd_validate_sense_data may return the original command in 15435 * which case the pkt will be freed and the flags can no 15436 * longer be touched. 15437 * SD_MUTEX is held through this process until the command 15438 * is dispatched based upon the sense data, so there are 15439 * no race conditions. 15440 */ 15441 (void) sd_mark_rqs_idle(un, sense_xp); 15442 15443 /* 15444 * For a retryable command see if we have valid sense data, if so then 15445 * turn it over to sd_decode_sense() to figure out the right course of 15446 * action. Just fail a non-retryable command. 15447 */ 15448 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15449 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15450 SD_SENSE_DATA_IS_VALID) { 15451 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15452 } 15453 } else { 15454 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15455 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15456 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15457 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15458 sd_return_failed_command(un, cmd_bp, EIO); 15459 } 15460 } 15461 15462 15463 15464 15465 /* 15466 * Function: sd_handle_auto_request_sense 15467 * 15468 * Description: Processing for auto-request sense information. 15469 * 15470 * Arguments: un - ptr to associated softstate 15471 * bp - ptr to buf(9S) for the command 15472 * xp - ptr to the sd_xbuf for the command 15473 * pktp - ptr to the scsi_pkt(9S) for the command 15474 * 15475 * Context: May be called under interrupt context 15476 */ 15477 15478 static void 15479 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15480 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15481 { 15482 struct scsi_arq_status *asp; 15483 size_t actual_len; 15484 15485 ASSERT(un != NULL); 15486 ASSERT(mutex_owned(SD_MUTEX(un))); 15487 ASSERT(bp != NULL); 15488 ASSERT(xp != NULL); 15489 ASSERT(pktp != NULL); 15490 ASSERT(pktp != un->un_rqs_pktp); 15491 ASSERT(bp != un->un_rqs_bp); 15492 15493 /* 15494 * For auto-request sense, we get a scsi_arq_status back from 15495 * the HBA, with the sense data in the sts_sensedata member. 15496 * The pkt_scbp of the packet points to this scsi_arq_status. 15497 */ 15498 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15499 15500 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15501 /* 15502 * The auto REQUEST SENSE failed; see if we can re-try 15503 * the original command. 15504 */ 15505 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15506 "auto request sense failed (reason=%s)\n", 15507 scsi_rname(asp->sts_rqpkt_reason)); 15508 15509 sd_reset_target(un, pktp); 15510 15511 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15512 NULL, NULL, EIO, (clock_t)0, NULL); 15513 return; 15514 } 15515 15516 /* Save the relevant sense info into the xp for the original cmd. */ 15517 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15518 xp->xb_sense_state = asp->sts_rqpkt_state; 15519 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15520 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15521 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15522 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15523 MAX_SENSE_LENGTH); 15524 } else { 15525 if (xp->xb_sense_resid > SENSE_LENGTH) { 15526 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15527 } else { 15528 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15529 } 15530 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15531 if ((((struct uscsi_cmd *) 15532 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15533 xp->xb_sense_resid = (((struct uscsi_cmd *) 15534 (xp->xb_pktinfo))->uscsi_rqlen) - 15535 actual_len; 15536 } else { 15537 xp->xb_sense_resid = 0; 15538 } 15539 } 15540 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15541 } 15542 15543 /* 15544 * See if we have valid sense data, if so then turn it over to 15545 * sd_decode_sense() to figure out the right course of action. 15546 */ 15547 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15548 SD_SENSE_DATA_IS_VALID) { 15549 sd_decode_sense(un, bp, xp, pktp); 15550 } 15551 } 15552 15553 15554 /* 15555 * Function: sd_print_sense_failed_msg 15556 * 15557 * Description: Print log message when RQS has failed. 15558 * 15559 * Arguments: un - ptr to associated softstate 15560 * bp - ptr to buf(9S) for the command 15561 * arg - generic message string ptr 15562 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15563 * or SD_NO_RETRY_ISSUED 15564 * 15565 * Context: May be called from interrupt context 15566 */ 15567 15568 static void 15569 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15570 int code) 15571 { 15572 char *msgp = arg; 15573 15574 ASSERT(un != NULL); 15575 ASSERT(mutex_owned(SD_MUTEX(un))); 15576 ASSERT(bp != NULL); 15577 15578 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15579 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15580 } 15581 } 15582 15583 15584 /* 15585 * Function: sd_validate_sense_data 15586 * 15587 * Description: Check the given sense data for validity. 15588 * If the sense data is not valid, the command will 15589 * be either failed or retried! 15590 * 15591 * Return Code: SD_SENSE_DATA_IS_INVALID 15592 * SD_SENSE_DATA_IS_VALID 15593 * 15594 * Context: May be called from interrupt context 15595 */ 15596 15597 static int 15598 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15599 size_t actual_len) 15600 { 15601 struct scsi_extended_sense *esp; 15602 struct scsi_pkt *pktp; 15603 char *msgp = NULL; 15604 15605 ASSERT(un != NULL); 15606 ASSERT(mutex_owned(SD_MUTEX(un))); 15607 ASSERT(bp != NULL); 15608 ASSERT(bp != un->un_rqs_bp); 15609 ASSERT(xp != NULL); 15610 15611 pktp = SD_GET_PKTP(bp); 15612 ASSERT(pktp != NULL); 15613 15614 /* 15615 * Check the status of the RQS command (auto or manual). 15616 */ 15617 switch (xp->xb_sense_status & STATUS_MASK) { 15618 case STATUS_GOOD: 15619 break; 15620 15621 case STATUS_RESERVATION_CONFLICT: 15622 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15623 return (SD_SENSE_DATA_IS_INVALID); 15624 15625 case STATUS_BUSY: 15626 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15627 "Busy Status on REQUEST SENSE\n"); 15628 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15629 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15630 return (SD_SENSE_DATA_IS_INVALID); 15631 15632 case STATUS_QFULL: 15633 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15634 "QFULL Status on REQUEST SENSE\n"); 15635 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15636 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15637 return (SD_SENSE_DATA_IS_INVALID); 15638 15639 case STATUS_CHECK: 15640 case STATUS_TERMINATED: 15641 msgp = "Check Condition on REQUEST SENSE\n"; 15642 goto sense_failed; 15643 15644 default: 15645 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15646 goto sense_failed; 15647 } 15648 15649 /* 15650 * See if we got the minimum required amount of sense data. 15651 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15652 * or less. 15653 */ 15654 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15655 (actual_len == 0)) { 15656 msgp = "Request Sense couldn't get sense data\n"; 15657 goto sense_failed; 15658 } 15659 15660 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15661 msgp = "Not enough sense information\n"; 15662 goto sense_failed; 15663 } 15664 15665 /* 15666 * We require the extended sense data 15667 */ 15668 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15669 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15670 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15671 static char tmp[8]; 15672 static char buf[148]; 15673 char *p = (char *)(xp->xb_sense_data); 15674 int i; 15675 15676 mutex_enter(&sd_sense_mutex); 15677 (void) strcpy(buf, "undecodable sense information:"); 15678 for (i = 0; i < actual_len; i++) { 15679 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15680 (void) strcpy(&buf[strlen(buf)], tmp); 15681 } 15682 i = strlen(buf); 15683 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15684 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15685 mutex_exit(&sd_sense_mutex); 15686 } 15687 /* Note: Legacy behavior, fail the command with no retry */ 15688 sd_return_failed_command(un, bp, EIO); 15689 return (SD_SENSE_DATA_IS_INVALID); 15690 } 15691 15692 /* 15693 * Check that es_code is valid (es_class concatenated with es_code 15694 * make up the "response code" field. es_class will always be 7, so 15695 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15696 * format. 15697 */ 15698 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15699 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15700 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15701 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15702 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15703 goto sense_failed; 15704 } 15705 15706 return (SD_SENSE_DATA_IS_VALID); 15707 15708 sense_failed: 15709 /* 15710 * If the request sense failed (for whatever reason), attempt 15711 * to retry the original command. 15712 */ 15713 #if defined(__i386) || defined(__amd64) 15714 /* 15715 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15716 * sddef.h for Sparc platform, and x86 uses 1 binary 15717 * for both SCSI/FC. 15718 * The SD_RETRY_DELAY value need to be adjusted here 15719 * when SD_RETRY_DELAY change in sddef.h 15720 */ 15721 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15722 sd_print_sense_failed_msg, msgp, EIO, 15723 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15724 #else 15725 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15726 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15727 #endif 15728 15729 return (SD_SENSE_DATA_IS_INVALID); 15730 } 15731 15732 15733 15734 /* 15735 * Function: sd_decode_sense 15736 * 15737 * Description: Take recovery action(s) when SCSI Sense Data is received. 15738 * 15739 * Context: Interrupt context. 15740 */ 15741 15742 static void 15743 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15744 struct scsi_pkt *pktp) 15745 { 15746 uint8_t sense_key; 15747 15748 ASSERT(un != NULL); 15749 ASSERT(mutex_owned(SD_MUTEX(un))); 15750 ASSERT(bp != NULL); 15751 ASSERT(bp != un->un_rqs_bp); 15752 ASSERT(xp != NULL); 15753 ASSERT(pktp != NULL); 15754 15755 sense_key = scsi_sense_key(xp->xb_sense_data); 15756 15757 switch (sense_key) { 15758 case KEY_NO_SENSE: 15759 sd_sense_key_no_sense(un, bp, xp, pktp); 15760 break; 15761 case KEY_RECOVERABLE_ERROR: 15762 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15763 bp, xp, pktp); 15764 break; 15765 case KEY_NOT_READY: 15766 sd_sense_key_not_ready(un, xp->xb_sense_data, 15767 bp, xp, pktp); 15768 break; 15769 case KEY_MEDIUM_ERROR: 15770 case KEY_HARDWARE_ERROR: 15771 sd_sense_key_medium_or_hardware_error(un, 15772 xp->xb_sense_data, bp, xp, pktp); 15773 break; 15774 case KEY_ILLEGAL_REQUEST: 15775 sd_sense_key_illegal_request(un, bp, xp, pktp); 15776 break; 15777 case KEY_UNIT_ATTENTION: 15778 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15779 bp, xp, pktp); 15780 break; 15781 case KEY_WRITE_PROTECT: 15782 case KEY_VOLUME_OVERFLOW: 15783 case KEY_MISCOMPARE: 15784 sd_sense_key_fail_command(un, bp, xp, pktp); 15785 break; 15786 case KEY_BLANK_CHECK: 15787 sd_sense_key_blank_check(un, bp, xp, pktp); 15788 break; 15789 case KEY_ABORTED_COMMAND: 15790 sd_sense_key_aborted_command(un, bp, xp, pktp); 15791 break; 15792 case KEY_VENDOR_UNIQUE: 15793 case KEY_COPY_ABORTED: 15794 case KEY_EQUAL: 15795 case KEY_RESERVED: 15796 default: 15797 sd_sense_key_default(un, xp->xb_sense_data, 15798 bp, xp, pktp); 15799 break; 15800 } 15801 } 15802 15803 15804 /* 15805 * Function: sd_dump_memory 15806 * 15807 * Description: Debug logging routine to print the contents of a user provided 15808 * buffer. The output of the buffer is broken up into 256 byte 15809 * segments due to a size constraint of the scsi_log. 15810 * implementation. 15811 * 15812 * Arguments: un - ptr to softstate 15813 * comp - component mask 15814 * title - "title" string to preceed data when printed 15815 * data - ptr to data block to be printed 15816 * len - size of data block to be printed 15817 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15818 * 15819 * Context: May be called from interrupt context 15820 */ 15821 15822 #define SD_DUMP_MEMORY_BUF_SIZE 256 15823 15824 static char *sd_dump_format_string[] = { 15825 " 0x%02x", 15826 " %c" 15827 }; 15828 15829 static void 15830 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15831 int len, int fmt) 15832 { 15833 int i, j; 15834 int avail_count; 15835 int start_offset; 15836 int end_offset; 15837 size_t entry_len; 15838 char *bufp; 15839 char *local_buf; 15840 char *format_string; 15841 15842 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15843 15844 /* 15845 * In the debug version of the driver, this function is called from a 15846 * number of places which are NOPs in the release driver. 15847 * The debug driver therefore has additional methods of filtering 15848 * debug output. 15849 */ 15850 #ifdef SDDEBUG 15851 /* 15852 * In the debug version of the driver we can reduce the amount of debug 15853 * messages by setting sd_error_level to something other than 15854 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15855 * sd_component_mask. 15856 */ 15857 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15858 (sd_error_level != SCSI_ERR_ALL)) { 15859 return; 15860 } 15861 if (((sd_component_mask & comp) == 0) || 15862 (sd_error_level != SCSI_ERR_ALL)) { 15863 return; 15864 } 15865 #else 15866 if (sd_error_level != SCSI_ERR_ALL) { 15867 return; 15868 } 15869 #endif 15870 15871 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15872 bufp = local_buf; 15873 /* 15874 * Available length is the length of local_buf[], minus the 15875 * length of the title string, minus one for the ":", minus 15876 * one for the newline, minus one for the NULL terminator. 15877 * This gives the #bytes available for holding the printed 15878 * values from the given data buffer. 15879 */ 15880 if (fmt == SD_LOG_HEX) { 15881 format_string = sd_dump_format_string[0]; 15882 } else /* SD_LOG_CHAR */ { 15883 format_string = sd_dump_format_string[1]; 15884 } 15885 /* 15886 * Available count is the number of elements from the given 15887 * data buffer that we can fit into the available length. 15888 * This is based upon the size of the format string used. 15889 * Make one entry and find it's size. 15890 */ 15891 (void) sprintf(bufp, format_string, data[0]); 15892 entry_len = strlen(bufp); 15893 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15894 15895 j = 0; 15896 while (j < len) { 15897 bufp = local_buf; 15898 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15899 start_offset = j; 15900 15901 end_offset = start_offset + avail_count; 15902 15903 (void) sprintf(bufp, "%s:", title); 15904 bufp += strlen(bufp); 15905 for (i = start_offset; ((i < end_offset) && (j < len)); 15906 i++, j++) { 15907 (void) sprintf(bufp, format_string, data[i]); 15908 bufp += entry_len; 15909 } 15910 (void) sprintf(bufp, "\n"); 15911 15912 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15913 } 15914 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15915 } 15916 15917 /* 15918 * Function: sd_print_sense_msg 15919 * 15920 * Description: Log a message based upon the given sense data. 15921 * 15922 * Arguments: un - ptr to associated softstate 15923 * bp - ptr to buf(9S) for the command 15924 * arg - ptr to associate sd_sense_info struct 15925 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15926 * or SD_NO_RETRY_ISSUED 15927 * 15928 * Context: May be called from interrupt context 15929 */ 15930 15931 static void 15932 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15933 { 15934 struct sd_xbuf *xp; 15935 struct scsi_pkt *pktp; 15936 uint8_t *sensep; 15937 daddr_t request_blkno; 15938 diskaddr_t err_blkno; 15939 int severity; 15940 int pfa_flag; 15941 extern struct scsi_key_strings scsi_cmds[]; 15942 15943 ASSERT(un != NULL); 15944 ASSERT(mutex_owned(SD_MUTEX(un))); 15945 ASSERT(bp != NULL); 15946 xp = SD_GET_XBUF(bp); 15947 ASSERT(xp != NULL); 15948 pktp = SD_GET_PKTP(bp); 15949 ASSERT(pktp != NULL); 15950 ASSERT(arg != NULL); 15951 15952 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15953 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15954 15955 if ((code == SD_DELAYED_RETRY_ISSUED) || 15956 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15957 severity = SCSI_ERR_RETRYABLE; 15958 } 15959 15960 /* Use absolute block number for the request block number */ 15961 request_blkno = xp->xb_blkno; 15962 15963 /* 15964 * Now try to get the error block number from the sense data 15965 */ 15966 sensep = xp->xb_sense_data; 15967 15968 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15969 (uint64_t *)&err_blkno)) { 15970 /* 15971 * We retrieved the error block number from the information 15972 * portion of the sense data. 15973 * 15974 * For USCSI commands we are better off using the error 15975 * block no. as the requested block no. (This is the best 15976 * we can estimate.) 15977 */ 15978 if ((SD_IS_BUFIO(xp) == FALSE) && 15979 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15980 request_blkno = err_blkno; 15981 } 15982 } else { 15983 /* 15984 * Without the es_valid bit set (for fixed format) or an 15985 * information descriptor (for descriptor format) we cannot 15986 * be certain of the error blkno, so just use the 15987 * request_blkno. 15988 */ 15989 err_blkno = (diskaddr_t)request_blkno; 15990 } 15991 15992 /* 15993 * The following will log the buffer contents for the release driver 15994 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15995 * level is set to verbose. 15996 */ 15997 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15998 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15999 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16000 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16001 16002 if (pfa_flag == FALSE) { 16003 /* This is normally only set for USCSI */ 16004 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16005 return; 16006 } 16007 16008 if ((SD_IS_BUFIO(xp) == TRUE) && 16009 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16010 (severity < sd_error_level))) { 16011 return; 16012 } 16013 } 16014 16015 /* 16016 * Check for Sonoma Failover and keep a count of how many failed I/O's 16017 */ 16018 if ((SD_IS_LSI(un)) && 16019 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16020 (scsi_sense_asc(sensep) == 0x94) && 16021 (scsi_sense_ascq(sensep) == 0x01)) { 16022 un->un_sonoma_failure_count++; 16023 if (un->un_sonoma_failure_count > 1) { 16024 return; 16025 } 16026 } 16027 16028 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16029 request_blkno, err_blkno, scsi_cmds, 16030 (struct scsi_extended_sense *)sensep, 16031 un->un_additional_codes, NULL); 16032 } 16033 16034 /* 16035 * Function: sd_sense_key_no_sense 16036 * 16037 * Description: Recovery action when sense data was not received. 16038 * 16039 * Context: May be called from interrupt context 16040 */ 16041 16042 static void 16043 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16044 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16045 { 16046 struct sd_sense_info si; 16047 16048 ASSERT(un != NULL); 16049 ASSERT(mutex_owned(SD_MUTEX(un))); 16050 ASSERT(bp != NULL); 16051 ASSERT(xp != NULL); 16052 ASSERT(pktp != NULL); 16053 16054 si.ssi_severity = SCSI_ERR_FATAL; 16055 si.ssi_pfa_flag = FALSE; 16056 16057 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16058 16059 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16060 &si, EIO, (clock_t)0, NULL); 16061 } 16062 16063 16064 /* 16065 * Function: sd_sense_key_recoverable_error 16066 * 16067 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16068 * 16069 * Context: May be called from interrupt context 16070 */ 16071 16072 static void 16073 sd_sense_key_recoverable_error(struct sd_lun *un, 16074 uint8_t *sense_datap, 16075 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16076 { 16077 struct sd_sense_info si; 16078 uint8_t asc = scsi_sense_asc(sense_datap); 16079 16080 ASSERT(un != NULL); 16081 ASSERT(mutex_owned(SD_MUTEX(un))); 16082 ASSERT(bp != NULL); 16083 ASSERT(xp != NULL); 16084 ASSERT(pktp != NULL); 16085 16086 /* 16087 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16088 */ 16089 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16090 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16091 si.ssi_severity = SCSI_ERR_INFO; 16092 si.ssi_pfa_flag = TRUE; 16093 } else { 16094 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16095 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16096 si.ssi_severity = SCSI_ERR_RECOVERED; 16097 si.ssi_pfa_flag = FALSE; 16098 } 16099 16100 if (pktp->pkt_resid == 0) { 16101 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16102 sd_return_command(un, bp); 16103 return; 16104 } 16105 16106 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16107 &si, EIO, (clock_t)0, NULL); 16108 } 16109 16110 16111 16112 16113 /* 16114 * Function: sd_sense_key_not_ready 16115 * 16116 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16117 * 16118 * Context: May be called from interrupt context 16119 */ 16120 16121 static void 16122 sd_sense_key_not_ready(struct sd_lun *un, 16123 uint8_t *sense_datap, 16124 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16125 { 16126 struct sd_sense_info si; 16127 uint8_t asc = scsi_sense_asc(sense_datap); 16128 uint8_t ascq = scsi_sense_ascq(sense_datap); 16129 16130 ASSERT(un != NULL); 16131 ASSERT(mutex_owned(SD_MUTEX(un))); 16132 ASSERT(bp != NULL); 16133 ASSERT(xp != NULL); 16134 ASSERT(pktp != NULL); 16135 16136 si.ssi_severity = SCSI_ERR_FATAL; 16137 si.ssi_pfa_flag = FALSE; 16138 16139 /* 16140 * Update error stats after first NOT READY error. Disks may have 16141 * been powered down and may need to be restarted. For CDROMs, 16142 * report NOT READY errors only if media is present. 16143 */ 16144 if ((ISCD(un) && (asc == 0x3A)) || 16145 (xp->xb_nr_retry_count > 0)) { 16146 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16147 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16148 } 16149 16150 /* 16151 * Just fail if the "not ready" retry limit has been reached. 16152 */ 16153 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16154 /* Special check for error message printing for removables. */ 16155 if (un->un_f_has_removable_media && (asc == 0x04) && 16156 (ascq >= 0x04)) { 16157 si.ssi_severity = SCSI_ERR_ALL; 16158 } 16159 goto fail_command; 16160 } 16161 16162 /* 16163 * Check the ASC and ASCQ in the sense data as needed, to determine 16164 * what to do. 16165 */ 16166 switch (asc) { 16167 case 0x04: /* LOGICAL UNIT NOT READY */ 16168 /* 16169 * disk drives that don't spin up result in a very long delay 16170 * in format without warning messages. We will log a message 16171 * if the error level is set to verbose. 16172 */ 16173 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16174 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16175 "logical unit not ready, resetting disk\n"); 16176 } 16177 16178 /* 16179 * There are different requirements for CDROMs and disks for 16180 * the number of retries. If a CD-ROM is giving this, it is 16181 * probably reading TOC and is in the process of getting 16182 * ready, so we should keep on trying for a long time to make 16183 * sure that all types of media are taken in account (for 16184 * some media the drive takes a long time to read TOC). For 16185 * disks we do not want to retry this too many times as this 16186 * can cause a long hang in format when the drive refuses to 16187 * spin up (a very common failure). 16188 */ 16189 switch (ascq) { 16190 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16191 /* 16192 * Disk drives frequently refuse to spin up which 16193 * results in a very long hang in format without 16194 * warning messages. 16195 * 16196 * Note: This code preserves the legacy behavior of 16197 * comparing xb_nr_retry_count against zero for fibre 16198 * channel targets instead of comparing against the 16199 * un_reset_retry_count value. The reason for this 16200 * discrepancy has been so utterly lost beneath the 16201 * Sands of Time that even Indiana Jones could not 16202 * find it. 16203 */ 16204 if (un->un_f_is_fibre == TRUE) { 16205 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16206 (xp->xb_nr_retry_count > 0)) && 16207 (un->un_startstop_timeid == NULL)) { 16208 scsi_log(SD_DEVINFO(un), sd_label, 16209 CE_WARN, "logical unit not ready, " 16210 "resetting disk\n"); 16211 sd_reset_target(un, pktp); 16212 } 16213 } else { 16214 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16215 (xp->xb_nr_retry_count > 16216 un->un_reset_retry_count)) && 16217 (un->un_startstop_timeid == NULL)) { 16218 scsi_log(SD_DEVINFO(un), sd_label, 16219 CE_WARN, "logical unit not ready, " 16220 "resetting disk\n"); 16221 sd_reset_target(un, pktp); 16222 } 16223 } 16224 break; 16225 16226 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16227 /* 16228 * If the target is in the process of becoming 16229 * ready, just proceed with the retry. This can 16230 * happen with CD-ROMs that take a long time to 16231 * read TOC after a power cycle or reset. 16232 */ 16233 goto do_retry; 16234 16235 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16236 break; 16237 16238 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16239 /* 16240 * Retries cannot help here so just fail right away. 16241 */ 16242 goto fail_command; 16243 16244 case 0x88: 16245 /* 16246 * Vendor-unique code for T3/T4: it indicates a 16247 * path problem in a mutipathed config, but as far as 16248 * the target driver is concerned it equates to a fatal 16249 * error, so we should just fail the command right away 16250 * (without printing anything to the console). If this 16251 * is not a T3/T4, fall thru to the default recovery 16252 * action. 16253 * T3/T4 is FC only, don't need to check is_fibre 16254 */ 16255 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16256 sd_return_failed_command(un, bp, EIO); 16257 return; 16258 } 16259 /* FALLTHRU */ 16260 16261 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16262 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16263 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16264 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16265 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16266 default: /* Possible future codes in SCSI spec? */ 16267 /* 16268 * For removable-media devices, do not retry if 16269 * ASCQ > 2 as these result mostly from USCSI commands 16270 * on MMC devices issued to check status of an 16271 * operation initiated in immediate mode. Also for 16272 * ASCQ >= 4 do not print console messages as these 16273 * mainly represent a user-initiated operation 16274 * instead of a system failure. 16275 */ 16276 if (un->un_f_has_removable_media) { 16277 si.ssi_severity = SCSI_ERR_ALL; 16278 goto fail_command; 16279 } 16280 break; 16281 } 16282 16283 /* 16284 * As part of our recovery attempt for the NOT READY 16285 * condition, we issue a START STOP UNIT command. However 16286 * we want to wait for a short delay before attempting this 16287 * as there may still be more commands coming back from the 16288 * target with the check condition. To do this we use 16289 * timeout(9F) to call sd_start_stop_unit_callback() after 16290 * the delay interval expires. (sd_start_stop_unit_callback() 16291 * dispatches sd_start_stop_unit_task(), which will issue 16292 * the actual START STOP UNIT command. The delay interval 16293 * is one-half of the delay that we will use to retry the 16294 * command that generated the NOT READY condition. 16295 * 16296 * Note that we could just dispatch sd_start_stop_unit_task() 16297 * from here and allow it to sleep for the delay interval, 16298 * but then we would be tying up the taskq thread 16299 * uncesessarily for the duration of the delay. 16300 * 16301 * Do not issue the START STOP UNIT if the current command 16302 * is already a START STOP UNIT. 16303 */ 16304 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16305 break; 16306 } 16307 16308 /* 16309 * Do not schedule the timeout if one is already pending. 16310 */ 16311 if (un->un_startstop_timeid != NULL) { 16312 SD_INFO(SD_LOG_ERROR, un, 16313 "sd_sense_key_not_ready: restart already issued to" 16314 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16315 ddi_get_instance(SD_DEVINFO(un))); 16316 break; 16317 } 16318 16319 /* 16320 * Schedule the START STOP UNIT command, then queue the command 16321 * for a retry. 16322 * 16323 * Note: A timeout is not scheduled for this retry because we 16324 * want the retry to be serial with the START_STOP_UNIT. The 16325 * retry will be started when the START_STOP_UNIT is completed 16326 * in sd_start_stop_unit_task. 16327 */ 16328 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16329 un, SD_BSY_TIMEOUT / 2); 16330 xp->xb_nr_retry_count++; 16331 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16332 return; 16333 16334 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16335 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16336 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16337 "unit does not respond to selection\n"); 16338 } 16339 break; 16340 16341 case 0x3A: /* MEDIUM NOT PRESENT */ 16342 if (sd_error_level >= SCSI_ERR_FATAL) { 16343 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16344 "Caddy not inserted in drive\n"); 16345 } 16346 16347 sr_ejected(un); 16348 un->un_mediastate = DKIO_EJECTED; 16349 /* The state has changed, inform the media watch routines */ 16350 cv_broadcast(&un->un_state_cv); 16351 /* Just fail if no media is present in the drive. */ 16352 goto fail_command; 16353 16354 default: 16355 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16356 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16357 "Unit not Ready. Additional sense code 0x%x\n", 16358 asc); 16359 } 16360 break; 16361 } 16362 16363 do_retry: 16364 16365 /* 16366 * Retry the command, as some targets may report NOT READY for 16367 * several seconds after being reset. 16368 */ 16369 xp->xb_nr_retry_count++; 16370 si.ssi_severity = SCSI_ERR_RETRYABLE; 16371 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16372 &si, EIO, SD_BSY_TIMEOUT, NULL); 16373 16374 return; 16375 16376 fail_command: 16377 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16378 sd_return_failed_command(un, bp, EIO); 16379 } 16380 16381 16382 16383 /* 16384 * Function: sd_sense_key_medium_or_hardware_error 16385 * 16386 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16387 * sense key. 16388 * 16389 * Context: May be called from interrupt context 16390 */ 16391 16392 static void 16393 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16394 uint8_t *sense_datap, 16395 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16396 { 16397 struct sd_sense_info si; 16398 uint8_t sense_key = scsi_sense_key(sense_datap); 16399 uint8_t asc = scsi_sense_asc(sense_datap); 16400 16401 ASSERT(un != NULL); 16402 ASSERT(mutex_owned(SD_MUTEX(un))); 16403 ASSERT(bp != NULL); 16404 ASSERT(xp != NULL); 16405 ASSERT(pktp != NULL); 16406 16407 si.ssi_severity = SCSI_ERR_FATAL; 16408 si.ssi_pfa_flag = FALSE; 16409 16410 if (sense_key == KEY_MEDIUM_ERROR) { 16411 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16412 } 16413 16414 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16415 16416 if ((un->un_reset_retry_count != 0) && 16417 (xp->xb_retry_count == un->un_reset_retry_count)) { 16418 mutex_exit(SD_MUTEX(un)); 16419 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16420 if (un->un_f_allow_bus_device_reset == TRUE) { 16421 16422 boolean_t try_resetting_target = B_TRUE; 16423 16424 /* 16425 * We need to be able to handle specific ASC when we are 16426 * handling a KEY_HARDWARE_ERROR. In particular 16427 * taking the default action of resetting the target may 16428 * not be the appropriate way to attempt recovery. 16429 * Resetting a target because of a single LUN failure 16430 * victimizes all LUNs on that target. 16431 * 16432 * This is true for the LSI arrays, if an LSI 16433 * array controller returns an ASC of 0x84 (LUN Dead) we 16434 * should trust it. 16435 */ 16436 16437 if (sense_key == KEY_HARDWARE_ERROR) { 16438 switch (asc) { 16439 case 0x84: 16440 if (SD_IS_LSI(un)) { 16441 try_resetting_target = B_FALSE; 16442 } 16443 break; 16444 default: 16445 break; 16446 } 16447 } 16448 16449 if (try_resetting_target == B_TRUE) { 16450 int reset_retval = 0; 16451 if (un->un_f_lun_reset_enabled == TRUE) { 16452 SD_TRACE(SD_LOG_IO_CORE, un, 16453 "sd_sense_key_medium_or_hardware_" 16454 "error: issuing RESET_LUN\n"); 16455 reset_retval = 16456 scsi_reset(SD_ADDRESS(un), 16457 RESET_LUN); 16458 } 16459 if (reset_retval == 0) { 16460 SD_TRACE(SD_LOG_IO_CORE, un, 16461 "sd_sense_key_medium_or_hardware_" 16462 "error: issuing RESET_TARGET\n"); 16463 (void) scsi_reset(SD_ADDRESS(un), 16464 RESET_TARGET); 16465 } 16466 } 16467 } 16468 mutex_enter(SD_MUTEX(un)); 16469 } 16470 16471 /* 16472 * This really ought to be a fatal error, but we will retry anyway 16473 * as some drives report this as a spurious error. 16474 */ 16475 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16476 &si, EIO, (clock_t)0, NULL); 16477 } 16478 16479 16480 16481 /* 16482 * Function: sd_sense_key_illegal_request 16483 * 16484 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16485 * 16486 * Context: May be called from interrupt context 16487 */ 16488 16489 static void 16490 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16491 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16492 { 16493 struct sd_sense_info si; 16494 16495 ASSERT(un != NULL); 16496 ASSERT(mutex_owned(SD_MUTEX(un))); 16497 ASSERT(bp != NULL); 16498 ASSERT(xp != NULL); 16499 ASSERT(pktp != NULL); 16500 16501 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16502 16503 si.ssi_severity = SCSI_ERR_INFO; 16504 si.ssi_pfa_flag = FALSE; 16505 16506 /* Pointless to retry if the target thinks it's an illegal request */ 16507 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16508 sd_return_failed_command(un, bp, EIO); 16509 } 16510 16511 16512 16513 16514 /* 16515 * Function: sd_sense_key_unit_attention 16516 * 16517 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16518 * 16519 * Context: May be called from interrupt context 16520 */ 16521 16522 static void 16523 sd_sense_key_unit_attention(struct sd_lun *un, 16524 uint8_t *sense_datap, 16525 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16526 { 16527 /* 16528 * For UNIT ATTENTION we allow retries for one minute. Devices 16529 * like Sonoma can return UNIT ATTENTION close to a minute 16530 * under certain conditions. 16531 */ 16532 int retry_check_flag = SD_RETRIES_UA; 16533 boolean_t kstat_updated = B_FALSE; 16534 struct sd_sense_info si; 16535 uint8_t asc = scsi_sense_asc(sense_datap); 16536 uint8_t ascq = scsi_sense_ascq(sense_datap); 16537 16538 ASSERT(un != NULL); 16539 ASSERT(mutex_owned(SD_MUTEX(un))); 16540 ASSERT(bp != NULL); 16541 ASSERT(xp != NULL); 16542 ASSERT(pktp != NULL); 16543 16544 si.ssi_severity = SCSI_ERR_INFO; 16545 si.ssi_pfa_flag = FALSE; 16546 16547 16548 switch (asc) { 16549 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16550 if (sd_report_pfa != 0) { 16551 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16552 si.ssi_pfa_flag = TRUE; 16553 retry_check_flag = SD_RETRIES_STANDARD; 16554 goto do_retry; 16555 } 16556 16557 break; 16558 16559 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16560 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16561 un->un_resvd_status |= 16562 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16563 } 16564 #ifdef _LP64 16565 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16566 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16567 un, KM_NOSLEEP) == 0) { 16568 /* 16569 * If we can't dispatch the task we'll just 16570 * live without descriptor sense. We can 16571 * try again on the next "unit attention" 16572 */ 16573 SD_ERROR(SD_LOG_ERROR, un, 16574 "sd_sense_key_unit_attention: " 16575 "Could not dispatch " 16576 "sd_reenable_dsense_task\n"); 16577 } 16578 } 16579 #endif /* _LP64 */ 16580 /* FALLTHRU */ 16581 16582 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16583 if (!un->un_f_has_removable_media) { 16584 break; 16585 } 16586 16587 /* 16588 * When we get a unit attention from a removable-media device, 16589 * it may be in a state that will take a long time to recover 16590 * (e.g., from a reset). Since we are executing in interrupt 16591 * context here, we cannot wait around for the device to come 16592 * back. So hand this command off to sd_media_change_task() 16593 * for deferred processing under taskq thread context. (Note 16594 * that the command still may be failed if a problem is 16595 * encountered at a later time.) 16596 */ 16597 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16598 KM_NOSLEEP) == 0) { 16599 /* 16600 * Cannot dispatch the request so fail the command. 16601 */ 16602 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16603 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16604 si.ssi_severity = SCSI_ERR_FATAL; 16605 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16606 sd_return_failed_command(un, bp, EIO); 16607 } 16608 16609 /* 16610 * If failed to dispatch sd_media_change_task(), we already 16611 * updated kstat. If succeed to dispatch sd_media_change_task(), 16612 * we should update kstat later if it encounters an error. So, 16613 * we update kstat_updated flag here. 16614 */ 16615 kstat_updated = B_TRUE; 16616 16617 /* 16618 * Either the command has been successfully dispatched to a 16619 * task Q for retrying, or the dispatch failed. In either case 16620 * do NOT retry again by calling sd_retry_command. This sets up 16621 * two retries of the same command and when one completes and 16622 * frees the resources the other will access freed memory, 16623 * a bad thing. 16624 */ 16625 return; 16626 16627 default: 16628 break; 16629 } 16630 16631 /* 16632 * ASC ASCQ 16633 * 2A 09 Capacity data has changed 16634 * 2A 01 Mode parameters changed 16635 * 3F 0E Reported luns data has changed 16636 * Arrays that support logical unit expansion should report 16637 * capacity changes(2Ah/09). Mode parameters changed and 16638 * reported luns data has changed are the approximation. 16639 */ 16640 if (((asc == 0x2a) && (ascq == 0x09)) || 16641 ((asc == 0x2a) && (ascq == 0x01)) || 16642 ((asc == 0x3f) && (ascq == 0x0e))) { 16643 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16644 KM_NOSLEEP) == 0) { 16645 SD_ERROR(SD_LOG_ERROR, un, 16646 "sd_sense_key_unit_attention: " 16647 "Could not dispatch sd_target_change_task\n"); 16648 } 16649 } 16650 16651 /* 16652 * Update kstat if we haven't done that. 16653 */ 16654 if (!kstat_updated) { 16655 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16656 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16657 } 16658 16659 do_retry: 16660 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16661 EIO, SD_UA_RETRY_DELAY, NULL); 16662 } 16663 16664 16665 16666 /* 16667 * Function: sd_sense_key_fail_command 16668 * 16669 * Description: Use to fail a command when we don't like the sense key that 16670 * was returned. 16671 * 16672 * Context: May be called from interrupt context 16673 */ 16674 16675 static void 16676 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16677 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16678 { 16679 struct sd_sense_info si; 16680 16681 ASSERT(un != NULL); 16682 ASSERT(mutex_owned(SD_MUTEX(un))); 16683 ASSERT(bp != NULL); 16684 ASSERT(xp != NULL); 16685 ASSERT(pktp != NULL); 16686 16687 si.ssi_severity = SCSI_ERR_FATAL; 16688 si.ssi_pfa_flag = FALSE; 16689 16690 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16691 sd_return_failed_command(un, bp, EIO); 16692 } 16693 16694 16695 16696 /* 16697 * Function: sd_sense_key_blank_check 16698 * 16699 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16700 * Has no monetary connotation. 16701 * 16702 * Context: May be called from interrupt context 16703 */ 16704 16705 static void 16706 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16707 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16708 { 16709 struct sd_sense_info si; 16710 16711 ASSERT(un != NULL); 16712 ASSERT(mutex_owned(SD_MUTEX(un))); 16713 ASSERT(bp != NULL); 16714 ASSERT(xp != NULL); 16715 ASSERT(pktp != NULL); 16716 16717 /* 16718 * Blank check is not fatal for removable devices, therefore 16719 * it does not require a console message. 16720 */ 16721 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16722 SCSI_ERR_FATAL; 16723 si.ssi_pfa_flag = FALSE; 16724 16725 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16726 sd_return_failed_command(un, bp, EIO); 16727 } 16728 16729 16730 16731 16732 /* 16733 * Function: sd_sense_key_aborted_command 16734 * 16735 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16736 * 16737 * Context: May be called from interrupt context 16738 */ 16739 16740 static void 16741 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16742 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16743 { 16744 struct sd_sense_info si; 16745 16746 ASSERT(un != NULL); 16747 ASSERT(mutex_owned(SD_MUTEX(un))); 16748 ASSERT(bp != NULL); 16749 ASSERT(xp != NULL); 16750 ASSERT(pktp != NULL); 16751 16752 si.ssi_severity = SCSI_ERR_FATAL; 16753 si.ssi_pfa_flag = FALSE; 16754 16755 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16756 16757 /* 16758 * This really ought to be a fatal error, but we will retry anyway 16759 * as some drives report this as a spurious error. 16760 */ 16761 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16762 &si, EIO, drv_usectohz(100000), NULL); 16763 } 16764 16765 16766 16767 /* 16768 * Function: sd_sense_key_default 16769 * 16770 * Description: Default recovery action for several SCSI sense keys (basically 16771 * attempts a retry). 16772 * 16773 * Context: May be called from interrupt context 16774 */ 16775 16776 static void 16777 sd_sense_key_default(struct sd_lun *un, 16778 uint8_t *sense_datap, 16779 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16780 { 16781 struct sd_sense_info si; 16782 uint8_t sense_key = scsi_sense_key(sense_datap); 16783 16784 ASSERT(un != NULL); 16785 ASSERT(mutex_owned(SD_MUTEX(un))); 16786 ASSERT(bp != NULL); 16787 ASSERT(xp != NULL); 16788 ASSERT(pktp != NULL); 16789 16790 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16791 16792 /* 16793 * Undecoded sense key. Attempt retries and hope that will fix 16794 * the problem. Otherwise, we're dead. 16795 */ 16796 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16798 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16799 } 16800 16801 si.ssi_severity = SCSI_ERR_FATAL; 16802 si.ssi_pfa_flag = FALSE; 16803 16804 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16805 &si, EIO, (clock_t)0, NULL); 16806 } 16807 16808 16809 16810 /* 16811 * Function: sd_print_retry_msg 16812 * 16813 * Description: Print a message indicating the retry action being taken. 16814 * 16815 * Arguments: un - ptr to associated softstate 16816 * bp - ptr to buf(9S) for the command 16817 * arg - not used. 16818 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16819 * or SD_NO_RETRY_ISSUED 16820 * 16821 * Context: May be called from interrupt context 16822 */ 16823 /* ARGSUSED */ 16824 static void 16825 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16826 { 16827 struct sd_xbuf *xp; 16828 struct scsi_pkt *pktp; 16829 char *reasonp; 16830 char *msgp; 16831 16832 ASSERT(un != NULL); 16833 ASSERT(mutex_owned(SD_MUTEX(un))); 16834 ASSERT(bp != NULL); 16835 pktp = SD_GET_PKTP(bp); 16836 ASSERT(pktp != NULL); 16837 xp = SD_GET_XBUF(bp); 16838 ASSERT(xp != NULL); 16839 16840 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16841 mutex_enter(&un->un_pm_mutex); 16842 if ((un->un_state == SD_STATE_SUSPENDED) || 16843 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16844 (pktp->pkt_flags & FLAG_SILENT)) { 16845 mutex_exit(&un->un_pm_mutex); 16846 goto update_pkt_reason; 16847 } 16848 mutex_exit(&un->un_pm_mutex); 16849 16850 /* 16851 * Suppress messages if they are all the same pkt_reason; with 16852 * TQ, many (up to 256) are returned with the same pkt_reason. 16853 * If we are in panic, then suppress the retry messages. 16854 */ 16855 switch (flag) { 16856 case SD_NO_RETRY_ISSUED: 16857 msgp = "giving up"; 16858 break; 16859 case SD_IMMEDIATE_RETRY_ISSUED: 16860 case SD_DELAYED_RETRY_ISSUED: 16861 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16862 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16863 (sd_error_level != SCSI_ERR_ALL))) { 16864 return; 16865 } 16866 msgp = "retrying command"; 16867 break; 16868 default: 16869 goto update_pkt_reason; 16870 } 16871 16872 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16873 scsi_rname(pktp->pkt_reason)); 16874 16875 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16876 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16877 16878 update_pkt_reason: 16879 /* 16880 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16881 * This is to prevent multiple console messages for the same failure 16882 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16883 * when the command is retried successfully because there still may be 16884 * more commands coming back with the same value of pktp->pkt_reason. 16885 */ 16886 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16887 un->un_last_pkt_reason = pktp->pkt_reason; 16888 } 16889 } 16890 16891 16892 /* 16893 * Function: sd_print_cmd_incomplete_msg 16894 * 16895 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16896 * 16897 * Arguments: un - ptr to associated softstate 16898 * bp - ptr to buf(9S) for the command 16899 * arg - passed to sd_print_retry_msg() 16900 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16901 * or SD_NO_RETRY_ISSUED 16902 * 16903 * Context: May be called from interrupt context 16904 */ 16905 16906 static void 16907 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16908 int code) 16909 { 16910 dev_info_t *dip; 16911 16912 ASSERT(un != NULL); 16913 ASSERT(mutex_owned(SD_MUTEX(un))); 16914 ASSERT(bp != NULL); 16915 16916 switch (code) { 16917 case SD_NO_RETRY_ISSUED: 16918 /* Command was failed. Someone turned off this target? */ 16919 if (un->un_state != SD_STATE_OFFLINE) { 16920 /* 16921 * Suppress message if we are detaching and 16922 * device has been disconnected 16923 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16924 * private interface and not part of the DDI 16925 */ 16926 dip = un->un_sd->sd_dev; 16927 if (!(DEVI_IS_DETACHING(dip) && 16928 DEVI_IS_DEVICE_REMOVED(dip))) { 16929 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16930 "disk not responding to selection\n"); 16931 } 16932 New_state(un, SD_STATE_OFFLINE); 16933 } 16934 break; 16935 16936 case SD_DELAYED_RETRY_ISSUED: 16937 case SD_IMMEDIATE_RETRY_ISSUED: 16938 default: 16939 /* Command was successfully queued for retry */ 16940 sd_print_retry_msg(un, bp, arg, code); 16941 break; 16942 } 16943 } 16944 16945 16946 /* 16947 * Function: sd_pkt_reason_cmd_incomplete 16948 * 16949 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16950 * 16951 * Context: May be called from interrupt context 16952 */ 16953 16954 static void 16955 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16956 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16957 { 16958 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16959 16960 ASSERT(un != NULL); 16961 ASSERT(mutex_owned(SD_MUTEX(un))); 16962 ASSERT(bp != NULL); 16963 ASSERT(xp != NULL); 16964 ASSERT(pktp != NULL); 16965 16966 /* Do not do a reset if selection did not complete */ 16967 /* Note: Should this not just check the bit? */ 16968 if (pktp->pkt_state != STATE_GOT_BUS) { 16969 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16970 sd_reset_target(un, pktp); 16971 } 16972 16973 /* 16974 * If the target was not successfully selected, then set 16975 * SD_RETRIES_FAILFAST to indicate that we lost communication 16976 * with the target, and further retries and/or commands are 16977 * likely to take a long time. 16978 */ 16979 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16980 flag |= SD_RETRIES_FAILFAST; 16981 } 16982 16983 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16984 16985 sd_retry_command(un, bp, flag, 16986 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16987 } 16988 16989 16990 16991 /* 16992 * Function: sd_pkt_reason_cmd_tran_err 16993 * 16994 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16995 * 16996 * Context: May be called from interrupt context 16997 */ 16998 16999 static void 17000 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17001 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17002 { 17003 ASSERT(un != NULL); 17004 ASSERT(mutex_owned(SD_MUTEX(un))); 17005 ASSERT(bp != NULL); 17006 ASSERT(xp != NULL); 17007 ASSERT(pktp != NULL); 17008 17009 /* 17010 * Do not reset if we got a parity error, or if 17011 * selection did not complete. 17012 */ 17013 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17014 /* Note: Should this not just check the bit for pkt_state? */ 17015 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17016 (pktp->pkt_state != STATE_GOT_BUS)) { 17017 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17018 sd_reset_target(un, pktp); 17019 } 17020 17021 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17022 17023 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17024 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17025 } 17026 17027 17028 17029 /* 17030 * Function: sd_pkt_reason_cmd_reset 17031 * 17032 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17033 * 17034 * Context: May be called from interrupt context 17035 */ 17036 17037 static void 17038 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17039 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17040 { 17041 ASSERT(un != NULL); 17042 ASSERT(mutex_owned(SD_MUTEX(un))); 17043 ASSERT(bp != NULL); 17044 ASSERT(xp != NULL); 17045 ASSERT(pktp != NULL); 17046 17047 /* The target may still be running the command, so try to reset. */ 17048 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17049 sd_reset_target(un, pktp); 17050 17051 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17052 17053 /* 17054 * If pkt_reason is CMD_RESET chances are that this pkt got 17055 * reset because another target on this bus caused it. The target 17056 * that caused it should get CMD_TIMEOUT with pkt_statistics 17057 * of STAT_TIMEOUT/STAT_DEV_RESET. 17058 */ 17059 17060 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17061 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17062 } 17063 17064 17065 17066 17067 /* 17068 * Function: sd_pkt_reason_cmd_aborted 17069 * 17070 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17071 * 17072 * Context: May be called from interrupt context 17073 */ 17074 17075 static void 17076 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17077 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17078 { 17079 ASSERT(un != NULL); 17080 ASSERT(mutex_owned(SD_MUTEX(un))); 17081 ASSERT(bp != NULL); 17082 ASSERT(xp != NULL); 17083 ASSERT(pktp != NULL); 17084 17085 /* The target may still be running the command, so try to reset. */ 17086 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17087 sd_reset_target(un, pktp); 17088 17089 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17090 17091 /* 17092 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17093 * aborted because another target on this bus caused it. The target 17094 * that caused it should get CMD_TIMEOUT with pkt_statistics 17095 * of STAT_TIMEOUT/STAT_DEV_RESET. 17096 */ 17097 17098 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17099 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17100 } 17101 17102 17103 17104 /* 17105 * Function: sd_pkt_reason_cmd_timeout 17106 * 17107 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17108 * 17109 * Context: May be called from interrupt context 17110 */ 17111 17112 static void 17113 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17114 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17115 { 17116 ASSERT(un != NULL); 17117 ASSERT(mutex_owned(SD_MUTEX(un))); 17118 ASSERT(bp != NULL); 17119 ASSERT(xp != NULL); 17120 ASSERT(pktp != NULL); 17121 17122 17123 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17124 sd_reset_target(un, pktp); 17125 17126 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17127 17128 /* 17129 * A command timeout indicates that we could not establish 17130 * communication with the target, so set SD_RETRIES_FAILFAST 17131 * as further retries/commands are likely to take a long time. 17132 */ 17133 sd_retry_command(un, bp, 17134 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17135 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17136 } 17137 17138 17139 17140 /* 17141 * Function: sd_pkt_reason_cmd_unx_bus_free 17142 * 17143 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17144 * 17145 * Context: May be called from interrupt context 17146 */ 17147 17148 static void 17149 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17150 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17151 { 17152 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17153 17154 ASSERT(un != NULL); 17155 ASSERT(mutex_owned(SD_MUTEX(un))); 17156 ASSERT(bp != NULL); 17157 ASSERT(xp != NULL); 17158 ASSERT(pktp != NULL); 17159 17160 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17161 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17162 17163 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17164 sd_print_retry_msg : NULL; 17165 17166 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17167 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17168 } 17169 17170 17171 /* 17172 * Function: sd_pkt_reason_cmd_tag_reject 17173 * 17174 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17175 * 17176 * Context: May be called from interrupt context 17177 */ 17178 17179 static void 17180 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17181 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17182 { 17183 ASSERT(un != NULL); 17184 ASSERT(mutex_owned(SD_MUTEX(un))); 17185 ASSERT(bp != NULL); 17186 ASSERT(xp != NULL); 17187 ASSERT(pktp != NULL); 17188 17189 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17190 pktp->pkt_flags = 0; 17191 un->un_tagflags = 0; 17192 if (un->un_f_opt_queueing == TRUE) { 17193 un->un_throttle = min(un->un_throttle, 3); 17194 } else { 17195 un->un_throttle = 1; 17196 } 17197 mutex_exit(SD_MUTEX(un)); 17198 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17199 mutex_enter(SD_MUTEX(un)); 17200 17201 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17202 17203 /* Legacy behavior not to check retry counts here. */ 17204 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17205 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17206 } 17207 17208 17209 /* 17210 * Function: sd_pkt_reason_default 17211 * 17212 * Description: Default recovery actions for SCSA pkt_reason values that 17213 * do not have more explicit recovery actions. 17214 * 17215 * Context: May be called from interrupt context 17216 */ 17217 17218 static void 17219 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17220 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17221 { 17222 ASSERT(un != NULL); 17223 ASSERT(mutex_owned(SD_MUTEX(un))); 17224 ASSERT(bp != NULL); 17225 ASSERT(xp != NULL); 17226 ASSERT(pktp != NULL); 17227 17228 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17229 sd_reset_target(un, pktp); 17230 17231 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17232 17233 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17234 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17235 } 17236 17237 17238 17239 /* 17240 * Function: sd_pkt_status_check_condition 17241 * 17242 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17243 * 17244 * Context: May be called from interrupt context 17245 */ 17246 17247 static void 17248 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17249 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17250 { 17251 ASSERT(un != NULL); 17252 ASSERT(mutex_owned(SD_MUTEX(un))); 17253 ASSERT(bp != NULL); 17254 ASSERT(xp != NULL); 17255 ASSERT(pktp != NULL); 17256 17257 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17258 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17259 17260 /* 17261 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17262 * command will be retried after the request sense). Otherwise, retry 17263 * the command. Note: we are issuing the request sense even though the 17264 * retry limit may have been reached for the failed command. 17265 */ 17266 if (un->un_f_arq_enabled == FALSE) { 17267 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17268 "no ARQ, sending request sense command\n"); 17269 sd_send_request_sense_command(un, bp, pktp); 17270 } else { 17271 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17272 "ARQ,retrying request sense command\n"); 17273 #if defined(__i386) || defined(__amd64) 17274 /* 17275 * The SD_RETRY_DELAY value need to be adjusted here 17276 * when SD_RETRY_DELAY change in sddef.h 17277 */ 17278 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17279 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17280 NULL); 17281 #else 17282 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17283 EIO, SD_RETRY_DELAY, NULL); 17284 #endif 17285 } 17286 17287 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17288 } 17289 17290 17291 /* 17292 * Function: sd_pkt_status_busy 17293 * 17294 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17295 * 17296 * Context: May be called from interrupt context 17297 */ 17298 17299 static void 17300 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17301 struct scsi_pkt *pktp) 17302 { 17303 ASSERT(un != NULL); 17304 ASSERT(mutex_owned(SD_MUTEX(un))); 17305 ASSERT(bp != NULL); 17306 ASSERT(xp != NULL); 17307 ASSERT(pktp != NULL); 17308 17309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17310 "sd_pkt_status_busy: entry\n"); 17311 17312 /* If retries are exhausted, just fail the command. */ 17313 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17314 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17315 "device busy too long\n"); 17316 sd_return_failed_command(un, bp, EIO); 17317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17318 "sd_pkt_status_busy: exit\n"); 17319 return; 17320 } 17321 xp->xb_retry_count++; 17322 17323 /* 17324 * Try to reset the target. However, we do not want to perform 17325 * more than one reset if the device continues to fail. The reset 17326 * will be performed when the retry count reaches the reset 17327 * threshold. This threshold should be set such that at least 17328 * one retry is issued before the reset is performed. 17329 */ 17330 if (xp->xb_retry_count == 17331 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17332 int rval = 0; 17333 mutex_exit(SD_MUTEX(un)); 17334 if (un->un_f_allow_bus_device_reset == TRUE) { 17335 /* 17336 * First try to reset the LUN; if we cannot then 17337 * try to reset the target. 17338 */ 17339 if (un->un_f_lun_reset_enabled == TRUE) { 17340 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17341 "sd_pkt_status_busy: RESET_LUN\n"); 17342 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17343 } 17344 if (rval == 0) { 17345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17346 "sd_pkt_status_busy: RESET_TARGET\n"); 17347 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17348 } 17349 } 17350 if (rval == 0) { 17351 /* 17352 * If the RESET_LUN and/or RESET_TARGET failed, 17353 * try RESET_ALL 17354 */ 17355 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17356 "sd_pkt_status_busy: RESET_ALL\n"); 17357 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17358 } 17359 mutex_enter(SD_MUTEX(un)); 17360 if (rval == 0) { 17361 /* 17362 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17363 * At this point we give up & fail the command. 17364 */ 17365 sd_return_failed_command(un, bp, EIO); 17366 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17367 "sd_pkt_status_busy: exit (failed cmd)\n"); 17368 return; 17369 } 17370 } 17371 17372 /* 17373 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17374 * we have already checked the retry counts above. 17375 */ 17376 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17377 EIO, SD_BSY_TIMEOUT, NULL); 17378 17379 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17380 "sd_pkt_status_busy: exit\n"); 17381 } 17382 17383 17384 /* 17385 * Function: sd_pkt_status_reservation_conflict 17386 * 17387 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17388 * command status. 17389 * 17390 * Context: May be called from interrupt context 17391 */ 17392 17393 static void 17394 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17395 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17396 { 17397 ASSERT(un != NULL); 17398 ASSERT(mutex_owned(SD_MUTEX(un))); 17399 ASSERT(bp != NULL); 17400 ASSERT(xp != NULL); 17401 ASSERT(pktp != NULL); 17402 17403 /* 17404 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17405 * conflict could be due to various reasons like incorrect keys, not 17406 * registered or not reserved etc. So, we return EACCES to the caller. 17407 */ 17408 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17409 int cmd = SD_GET_PKT_OPCODE(pktp); 17410 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17411 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17412 sd_return_failed_command(un, bp, EACCES); 17413 return; 17414 } 17415 } 17416 17417 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17418 17419 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17420 if (sd_failfast_enable != 0) { 17421 /* By definition, we must panic here.... */ 17422 sd_panic_for_res_conflict(un); 17423 /*NOTREACHED*/ 17424 } 17425 SD_ERROR(SD_LOG_IO, un, 17426 "sd_handle_resv_conflict: Disk Reserved\n"); 17427 sd_return_failed_command(un, bp, EACCES); 17428 return; 17429 } 17430 17431 /* 17432 * 1147670: retry only if sd_retry_on_reservation_conflict 17433 * property is set (default is 1). Retries will not succeed 17434 * on a disk reserved by another initiator. HA systems 17435 * may reset this via sd.conf to avoid these retries. 17436 * 17437 * Note: The legacy return code for this failure is EIO, however EACCES 17438 * seems more appropriate for a reservation conflict. 17439 */ 17440 if (sd_retry_on_reservation_conflict == 0) { 17441 SD_ERROR(SD_LOG_IO, un, 17442 "sd_handle_resv_conflict: Device Reserved\n"); 17443 sd_return_failed_command(un, bp, EIO); 17444 return; 17445 } 17446 17447 /* 17448 * Retry the command if we can. 17449 * 17450 * Note: The legacy return code for this failure is EIO, however EACCES 17451 * seems more appropriate for a reservation conflict. 17452 */ 17453 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17454 (clock_t)2, NULL); 17455 } 17456 17457 17458 17459 /* 17460 * Function: sd_pkt_status_qfull 17461 * 17462 * Description: Handle a QUEUE FULL condition from the target. This can 17463 * occur if the HBA does not handle the queue full condition. 17464 * (Basically this means third-party HBAs as Sun HBAs will 17465 * handle the queue full condition.) Note that if there are 17466 * some commands already in the transport, then the queue full 17467 * has occurred because the queue for this nexus is actually 17468 * full. If there are no commands in the transport, then the 17469 * queue full is resulting from some other initiator or lun 17470 * consuming all the resources at the target. 17471 * 17472 * Context: May be called from interrupt context 17473 */ 17474 17475 static void 17476 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17477 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17478 { 17479 ASSERT(un != NULL); 17480 ASSERT(mutex_owned(SD_MUTEX(un))); 17481 ASSERT(bp != NULL); 17482 ASSERT(xp != NULL); 17483 ASSERT(pktp != NULL); 17484 17485 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17486 "sd_pkt_status_qfull: entry\n"); 17487 17488 /* 17489 * Just lower the QFULL throttle and retry the command. Note that 17490 * we do not limit the number of retries here. 17491 */ 17492 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17493 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17494 SD_RESTART_TIMEOUT, NULL); 17495 17496 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17497 "sd_pkt_status_qfull: exit\n"); 17498 } 17499 17500 17501 /* 17502 * Function: sd_reset_target 17503 * 17504 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17505 * RESET_TARGET, or RESET_ALL. 17506 * 17507 * Context: May be called under interrupt context. 17508 */ 17509 17510 static void 17511 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17512 { 17513 int rval = 0; 17514 17515 ASSERT(un != NULL); 17516 ASSERT(mutex_owned(SD_MUTEX(un))); 17517 ASSERT(pktp != NULL); 17518 17519 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17520 17521 /* 17522 * No need to reset if the transport layer has already done so. 17523 */ 17524 if ((pktp->pkt_statistics & 17525 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17527 "sd_reset_target: no reset\n"); 17528 return; 17529 } 17530 17531 mutex_exit(SD_MUTEX(un)); 17532 17533 if (un->un_f_allow_bus_device_reset == TRUE) { 17534 if (un->un_f_lun_reset_enabled == TRUE) { 17535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17536 "sd_reset_target: RESET_LUN\n"); 17537 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17538 } 17539 if (rval == 0) { 17540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17541 "sd_reset_target: RESET_TARGET\n"); 17542 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17543 } 17544 } 17545 17546 if (rval == 0) { 17547 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17548 "sd_reset_target: RESET_ALL\n"); 17549 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17550 } 17551 17552 mutex_enter(SD_MUTEX(un)); 17553 17554 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17555 } 17556 17557 /* 17558 * Function: sd_target_change_task 17559 * 17560 * Description: Handle dynamic target change 17561 * 17562 * Context: Executes in a taskq() thread context 17563 */ 17564 static void 17565 sd_target_change_task(void *arg) 17566 { 17567 struct sd_lun *un = arg; 17568 uint64_t capacity; 17569 diskaddr_t label_cap; 17570 uint_t lbasize; 17571 17572 ASSERT(un != NULL); 17573 ASSERT(!mutex_owned(SD_MUTEX(un))); 17574 17575 if ((un->un_f_blockcount_is_valid == FALSE) || 17576 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17577 return; 17578 } 17579 17580 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17581 &lbasize, SD_PATH_DIRECT) != 0) { 17582 SD_ERROR(SD_LOG_ERROR, un, 17583 "sd_target_change_task: fail to read capacity\n"); 17584 return; 17585 } 17586 17587 mutex_enter(SD_MUTEX(un)); 17588 if (capacity <= un->un_blockcount) { 17589 mutex_exit(SD_MUTEX(un)); 17590 return; 17591 } 17592 17593 sd_update_block_info(un, lbasize, capacity); 17594 mutex_exit(SD_MUTEX(un)); 17595 17596 /* 17597 * If lun is EFI labeled and lun capacity is greater than the 17598 * capacity contained in the label, log a sys event. 17599 */ 17600 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17601 (void*)SD_PATH_DIRECT) == 0) { 17602 mutex_enter(SD_MUTEX(un)); 17603 if (un->un_f_blockcount_is_valid && 17604 un->un_blockcount > label_cap) { 17605 mutex_exit(SD_MUTEX(un)); 17606 sd_log_lun_expansion_event(un, KM_SLEEP); 17607 } else { 17608 mutex_exit(SD_MUTEX(un)); 17609 } 17610 } 17611 } 17612 17613 /* 17614 * Function: sd_log_lun_expansion_event 17615 * 17616 * Description: Log lun expansion sys event 17617 * 17618 * Context: Never called from interrupt context 17619 */ 17620 static void 17621 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17622 { 17623 int err; 17624 char *path; 17625 nvlist_t *dle_attr_list; 17626 17627 /* Allocate and build sysevent attribute list */ 17628 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17629 if (err != 0) { 17630 SD_ERROR(SD_LOG_ERROR, un, 17631 "sd_log_lun_expansion_event: fail to allocate space\n"); 17632 return; 17633 } 17634 17635 path = kmem_alloc(MAXPATHLEN, km_flag); 17636 if (path == NULL) { 17637 nvlist_free(dle_attr_list); 17638 SD_ERROR(SD_LOG_ERROR, un, 17639 "sd_log_lun_expansion_event: fail to allocate space\n"); 17640 return; 17641 } 17642 /* 17643 * Add path attribute to identify the lun. 17644 * We are using minor node 'a' as the sysevent attribute. 17645 */ 17646 (void) snprintf(path, MAXPATHLEN, "/devices"); 17647 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17648 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17649 ":a"); 17650 17651 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17652 if (err != 0) { 17653 nvlist_free(dle_attr_list); 17654 kmem_free(path, MAXPATHLEN); 17655 SD_ERROR(SD_LOG_ERROR, un, 17656 "sd_log_lun_expansion_event: fail to add attribute\n"); 17657 return; 17658 } 17659 17660 /* Log dynamic lun expansion sysevent */ 17661 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17662 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17663 if (err != DDI_SUCCESS) { 17664 SD_ERROR(SD_LOG_ERROR, un, 17665 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17666 } 17667 17668 nvlist_free(dle_attr_list); 17669 kmem_free(path, MAXPATHLEN); 17670 } 17671 17672 /* 17673 * Function: sd_media_change_task 17674 * 17675 * Description: Recovery action for CDROM to become available. 17676 * 17677 * Context: Executes in a taskq() thread context 17678 */ 17679 17680 static void 17681 sd_media_change_task(void *arg) 17682 { 17683 struct scsi_pkt *pktp = arg; 17684 struct sd_lun *un; 17685 struct buf *bp; 17686 struct sd_xbuf *xp; 17687 int err = 0; 17688 int retry_count = 0; 17689 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17690 struct sd_sense_info si; 17691 17692 ASSERT(pktp != NULL); 17693 bp = (struct buf *)pktp->pkt_private; 17694 ASSERT(bp != NULL); 17695 xp = SD_GET_XBUF(bp); 17696 ASSERT(xp != NULL); 17697 un = SD_GET_UN(bp); 17698 ASSERT(un != NULL); 17699 ASSERT(!mutex_owned(SD_MUTEX(un))); 17700 ASSERT(un->un_f_monitor_media_state); 17701 17702 si.ssi_severity = SCSI_ERR_INFO; 17703 si.ssi_pfa_flag = FALSE; 17704 17705 /* 17706 * When a reset is issued on a CDROM, it takes a long time to 17707 * recover. First few attempts to read capacity and other things 17708 * related to handling unit attention fail (with a ASC 0x4 and 17709 * ASCQ 0x1). In that case we want to do enough retries and we want 17710 * to limit the retries in other cases of genuine failures like 17711 * no media in drive. 17712 */ 17713 while (retry_count++ < retry_limit) { 17714 if ((err = sd_handle_mchange(un)) == 0) { 17715 break; 17716 } 17717 if (err == EAGAIN) { 17718 retry_limit = SD_UNIT_ATTENTION_RETRY; 17719 } 17720 /* Sleep for 0.5 sec. & try again */ 17721 delay(drv_usectohz(500000)); 17722 } 17723 17724 /* 17725 * Dispatch (retry or fail) the original command here, 17726 * along with appropriate console messages.... 17727 * 17728 * Must grab the mutex before calling sd_retry_command, 17729 * sd_print_sense_msg and sd_return_failed_command. 17730 */ 17731 mutex_enter(SD_MUTEX(un)); 17732 if (err != SD_CMD_SUCCESS) { 17733 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17734 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17735 si.ssi_severity = SCSI_ERR_FATAL; 17736 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17737 sd_return_failed_command(un, bp, EIO); 17738 } else { 17739 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17740 &si, EIO, (clock_t)0, NULL); 17741 } 17742 mutex_exit(SD_MUTEX(un)); 17743 } 17744 17745 17746 17747 /* 17748 * Function: sd_handle_mchange 17749 * 17750 * Description: Perform geometry validation & other recovery when CDROM 17751 * has been removed from drive. 17752 * 17753 * Return Code: 0 for success 17754 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17755 * sd_send_scsi_READ_CAPACITY() 17756 * 17757 * Context: Executes in a taskq() thread context 17758 */ 17759 17760 static int 17761 sd_handle_mchange(struct sd_lun *un) 17762 { 17763 uint64_t capacity; 17764 uint32_t lbasize; 17765 int rval; 17766 17767 ASSERT(!mutex_owned(SD_MUTEX(un))); 17768 ASSERT(un->un_f_monitor_media_state); 17769 17770 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17771 SD_PATH_DIRECT_PRIORITY)) != 0) { 17772 return (rval); 17773 } 17774 17775 mutex_enter(SD_MUTEX(un)); 17776 sd_update_block_info(un, lbasize, capacity); 17777 17778 if (un->un_errstats != NULL) { 17779 struct sd_errstats *stp = 17780 (struct sd_errstats *)un->un_errstats->ks_data; 17781 stp->sd_capacity.value.ui64 = (uint64_t) 17782 ((uint64_t)un->un_blockcount * 17783 (uint64_t)un->un_tgt_blocksize); 17784 } 17785 17786 17787 /* 17788 * Check if the media in the device is writable or not 17789 */ 17790 if (ISCD(un)) 17791 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17792 17793 /* 17794 * Note: Maybe let the strategy/partitioning chain worry about getting 17795 * valid geometry. 17796 */ 17797 mutex_exit(SD_MUTEX(un)); 17798 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17799 17800 17801 if (cmlb_validate(un->un_cmlbhandle, 0, 17802 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17803 return (EIO); 17804 } else { 17805 if (un->un_f_pkstats_enabled) { 17806 sd_set_pstats(un); 17807 SD_TRACE(SD_LOG_IO_PARTITION, un, 17808 "sd_handle_mchange: un:0x%p pstats created and " 17809 "set\n", un); 17810 } 17811 } 17812 17813 17814 /* 17815 * Try to lock the door 17816 */ 17817 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17818 SD_PATH_DIRECT_PRIORITY)); 17819 } 17820 17821 17822 /* 17823 * Function: sd_send_scsi_DOORLOCK 17824 * 17825 * Description: Issue the scsi DOOR LOCK command 17826 * 17827 * Arguments: un - pointer to driver soft state (unit) structure for 17828 * this target. 17829 * flag - SD_REMOVAL_ALLOW 17830 * SD_REMOVAL_PREVENT 17831 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17832 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17833 * to use the USCSI "direct" chain and bypass the normal 17834 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17835 * command is issued as part of an error recovery action. 17836 * 17837 * Return Code: 0 - Success 17838 * errno return code from sd_send_scsi_cmd() 17839 * 17840 * Context: Can sleep. 17841 */ 17842 17843 static int 17844 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17845 { 17846 union scsi_cdb cdb; 17847 struct uscsi_cmd ucmd_buf; 17848 struct scsi_extended_sense sense_buf; 17849 int status; 17850 17851 ASSERT(un != NULL); 17852 ASSERT(!mutex_owned(SD_MUTEX(un))); 17853 17854 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17855 17856 /* already determined doorlock is not supported, fake success */ 17857 if (un->un_f_doorlock_supported == FALSE) { 17858 return (0); 17859 } 17860 17861 /* 17862 * If we are ejecting and see an SD_REMOVAL_PREVENT 17863 * ignore the command so we can complete the eject 17864 * operation. 17865 */ 17866 if (flag == SD_REMOVAL_PREVENT) { 17867 mutex_enter(SD_MUTEX(un)); 17868 if (un->un_f_ejecting == TRUE) { 17869 mutex_exit(SD_MUTEX(un)); 17870 return (EAGAIN); 17871 } 17872 mutex_exit(SD_MUTEX(un)); 17873 } 17874 17875 bzero(&cdb, sizeof (cdb)); 17876 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17877 17878 cdb.scc_cmd = SCMD_DOORLOCK; 17879 cdb.cdb_opaque[4] = (uchar_t)flag; 17880 17881 ucmd_buf.uscsi_cdb = (char *)&cdb; 17882 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17883 ucmd_buf.uscsi_bufaddr = NULL; 17884 ucmd_buf.uscsi_buflen = 0; 17885 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17886 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17887 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17888 ucmd_buf.uscsi_timeout = 15; 17889 17890 SD_TRACE(SD_LOG_IO, un, 17891 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17892 17893 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17894 UIO_SYSSPACE, path_flag); 17895 17896 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17897 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17898 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17899 /* fake success and skip subsequent doorlock commands */ 17900 un->un_f_doorlock_supported = FALSE; 17901 return (0); 17902 } 17903 17904 return (status); 17905 } 17906 17907 /* 17908 * Function: sd_send_scsi_READ_CAPACITY 17909 * 17910 * Description: This routine uses the scsi READ CAPACITY command to determine 17911 * the device capacity in number of blocks and the device native 17912 * block size. If this function returns a failure, then the 17913 * values in *capp and *lbap are undefined. If the capacity 17914 * returned is 0xffffffff then the lun is too large for a 17915 * normal READ CAPACITY command and the results of a 17916 * READ CAPACITY 16 will be used instead. 17917 * 17918 * Arguments: un - ptr to soft state struct for the target 17919 * capp - ptr to unsigned 64-bit variable to receive the 17920 * capacity value from the command. 17921 * lbap - ptr to unsigned 32-bit varaible to receive the 17922 * block size value from the command 17923 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17924 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17925 * to use the USCSI "direct" chain and bypass the normal 17926 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17927 * command is issued as part of an error recovery action. 17928 * 17929 * Return Code: 0 - Success 17930 * EIO - IO error 17931 * EACCES - Reservation conflict detected 17932 * EAGAIN - Device is becoming ready 17933 * errno return code from sd_send_scsi_cmd() 17934 * 17935 * Context: Can sleep. Blocks until command completes. 17936 */ 17937 17938 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17939 17940 static int 17941 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17942 int path_flag) 17943 { 17944 struct scsi_extended_sense sense_buf; 17945 struct uscsi_cmd ucmd_buf; 17946 union scsi_cdb cdb; 17947 uint32_t *capacity_buf; 17948 uint64_t capacity; 17949 uint32_t lbasize; 17950 int status; 17951 17952 ASSERT(un != NULL); 17953 ASSERT(!mutex_owned(SD_MUTEX(un))); 17954 ASSERT(capp != NULL); 17955 ASSERT(lbap != NULL); 17956 17957 SD_TRACE(SD_LOG_IO, un, 17958 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17959 17960 /* 17961 * First send a READ_CAPACITY command to the target. 17962 * (This command is mandatory under SCSI-2.) 17963 * 17964 * Set up the CDB for the READ_CAPACITY command. The Partial 17965 * Medium Indicator bit is cleared. The address field must be 17966 * zero if the PMI bit is zero. 17967 */ 17968 bzero(&cdb, sizeof (cdb)); 17969 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17970 17971 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17972 17973 cdb.scc_cmd = SCMD_READ_CAPACITY; 17974 17975 ucmd_buf.uscsi_cdb = (char *)&cdb; 17976 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17977 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17978 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17979 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17980 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17981 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17982 ucmd_buf.uscsi_timeout = 60; 17983 17984 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17985 UIO_SYSSPACE, path_flag); 17986 17987 switch (status) { 17988 case 0: 17989 /* Return failure if we did not get valid capacity data. */ 17990 if (ucmd_buf.uscsi_resid != 0) { 17991 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17992 return (EIO); 17993 } 17994 17995 /* 17996 * Read capacity and block size from the READ CAPACITY 10 data. 17997 * This data may be adjusted later due to device specific 17998 * issues. 17999 * 18000 * According to the SCSI spec, the READ CAPACITY 10 18001 * command returns the following: 18002 * 18003 * bytes 0-3: Maximum logical block address available. 18004 * (MSB in byte:0 & LSB in byte:3) 18005 * 18006 * bytes 4-7: Block length in bytes 18007 * (MSB in byte:4 & LSB in byte:7) 18008 * 18009 */ 18010 capacity = BE_32(capacity_buf[0]); 18011 lbasize = BE_32(capacity_buf[1]); 18012 18013 /* 18014 * Done with capacity_buf 18015 */ 18016 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18017 18018 /* 18019 * if the reported capacity is set to all 0xf's, then 18020 * this disk is too large and requires SBC-2 commands. 18021 * Reissue the request using READ CAPACITY 16. 18022 */ 18023 if (capacity == 0xffffffff) { 18024 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18025 &lbasize, path_flag); 18026 if (status != 0) { 18027 return (status); 18028 } 18029 } 18030 break; /* Success! */ 18031 case EIO: 18032 switch (ucmd_buf.uscsi_status) { 18033 case STATUS_RESERVATION_CONFLICT: 18034 status = EACCES; 18035 break; 18036 case STATUS_CHECK: 18037 /* 18038 * Check condition; look for ASC/ASCQ of 0x04/0x01 18039 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18040 */ 18041 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18042 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18043 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18044 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18045 return (EAGAIN); 18046 } 18047 break; 18048 default: 18049 break; 18050 } 18051 /* FALLTHRU */ 18052 default: 18053 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18054 return (status); 18055 } 18056 18057 /* 18058 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18059 * (2352 and 0 are common) so for these devices always force the value 18060 * to 2048 as required by the ATAPI specs. 18061 */ 18062 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18063 lbasize = 2048; 18064 } 18065 18066 /* 18067 * Get the maximum LBA value from the READ CAPACITY data. 18068 * Here we assume that the Partial Medium Indicator (PMI) bit 18069 * was cleared when issuing the command. This means that the LBA 18070 * returned from the device is the LBA of the last logical block 18071 * on the logical unit. The actual logical block count will be 18072 * this value plus one. 18073 * 18074 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18075 * so scale the capacity value to reflect this. 18076 */ 18077 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18078 18079 /* 18080 * Copy the values from the READ CAPACITY command into the space 18081 * provided by the caller. 18082 */ 18083 *capp = capacity; 18084 *lbap = lbasize; 18085 18086 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18087 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18088 18089 /* 18090 * Both the lbasize and capacity from the device must be nonzero, 18091 * otherwise we assume that the values are not valid and return 18092 * failure to the caller. (4203735) 18093 */ 18094 if ((capacity == 0) || (lbasize == 0)) { 18095 return (EIO); 18096 } 18097 18098 return (0); 18099 } 18100 18101 /* 18102 * Function: sd_send_scsi_READ_CAPACITY_16 18103 * 18104 * Description: This routine uses the scsi READ CAPACITY 16 command to 18105 * determine the device capacity in number of blocks and the 18106 * device native block size. If this function returns a failure, 18107 * then the values in *capp and *lbap are undefined. 18108 * This routine should always be called by 18109 * sd_send_scsi_READ_CAPACITY which will appy any device 18110 * specific adjustments to capacity and lbasize. 18111 * 18112 * Arguments: un - ptr to soft state struct for the target 18113 * capp - ptr to unsigned 64-bit variable to receive the 18114 * capacity value from the command. 18115 * lbap - ptr to unsigned 32-bit varaible to receive the 18116 * block size value from the command 18117 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18118 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18119 * to use the USCSI "direct" chain and bypass the normal 18120 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18121 * this command is issued as part of an error recovery 18122 * action. 18123 * 18124 * Return Code: 0 - Success 18125 * EIO - IO error 18126 * EACCES - Reservation conflict detected 18127 * EAGAIN - Device is becoming ready 18128 * errno return code from sd_send_scsi_cmd() 18129 * 18130 * Context: Can sleep. Blocks until command completes. 18131 */ 18132 18133 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18134 18135 static int 18136 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18137 uint32_t *lbap, int path_flag) 18138 { 18139 struct scsi_extended_sense sense_buf; 18140 struct uscsi_cmd ucmd_buf; 18141 union scsi_cdb cdb; 18142 uint64_t *capacity16_buf; 18143 uint64_t capacity; 18144 uint32_t lbasize; 18145 int status; 18146 18147 ASSERT(un != NULL); 18148 ASSERT(!mutex_owned(SD_MUTEX(un))); 18149 ASSERT(capp != NULL); 18150 ASSERT(lbap != NULL); 18151 18152 SD_TRACE(SD_LOG_IO, un, 18153 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18154 18155 /* 18156 * First send a READ_CAPACITY_16 command to the target. 18157 * 18158 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18159 * Medium Indicator bit is cleared. The address field must be 18160 * zero if the PMI bit is zero. 18161 */ 18162 bzero(&cdb, sizeof (cdb)); 18163 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18164 18165 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18166 18167 ucmd_buf.uscsi_cdb = (char *)&cdb; 18168 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18169 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18170 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18171 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18172 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18173 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18174 ucmd_buf.uscsi_timeout = 60; 18175 18176 /* 18177 * Read Capacity (16) is a Service Action In command. One 18178 * command byte (0x9E) is overloaded for multiple operations, 18179 * with the second CDB byte specifying the desired operation 18180 */ 18181 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18182 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18183 18184 /* 18185 * Fill in allocation length field 18186 */ 18187 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18188 18189 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18190 UIO_SYSSPACE, path_flag); 18191 18192 switch (status) { 18193 case 0: 18194 /* Return failure if we did not get valid capacity data. */ 18195 if (ucmd_buf.uscsi_resid > 20) { 18196 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18197 return (EIO); 18198 } 18199 18200 /* 18201 * Read capacity and block size from the READ CAPACITY 10 data. 18202 * This data may be adjusted later due to device specific 18203 * issues. 18204 * 18205 * According to the SCSI spec, the READ CAPACITY 10 18206 * command returns the following: 18207 * 18208 * bytes 0-7: Maximum logical block address available. 18209 * (MSB in byte:0 & LSB in byte:7) 18210 * 18211 * bytes 8-11: Block length in bytes 18212 * (MSB in byte:8 & LSB in byte:11) 18213 * 18214 */ 18215 capacity = BE_64(capacity16_buf[0]); 18216 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18217 18218 /* 18219 * Done with capacity16_buf 18220 */ 18221 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18222 18223 /* 18224 * if the reported capacity is set to all 0xf's, then 18225 * this disk is too large. This could only happen with 18226 * a device that supports LBAs larger than 64 bits which 18227 * are not defined by any current T10 standards. 18228 */ 18229 if (capacity == 0xffffffffffffffff) { 18230 return (EIO); 18231 } 18232 break; /* Success! */ 18233 case EIO: 18234 switch (ucmd_buf.uscsi_status) { 18235 case STATUS_RESERVATION_CONFLICT: 18236 status = EACCES; 18237 break; 18238 case STATUS_CHECK: 18239 /* 18240 * Check condition; look for ASC/ASCQ of 0x04/0x01 18241 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18242 */ 18243 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18244 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18245 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18246 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18247 return (EAGAIN); 18248 } 18249 break; 18250 default: 18251 break; 18252 } 18253 /* FALLTHRU */ 18254 default: 18255 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18256 return (status); 18257 } 18258 18259 *capp = capacity; 18260 *lbap = lbasize; 18261 18262 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18263 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18264 18265 return (0); 18266 } 18267 18268 18269 /* 18270 * Function: sd_send_scsi_START_STOP_UNIT 18271 * 18272 * Description: Issue a scsi START STOP UNIT command to the target. 18273 * 18274 * Arguments: un - pointer to driver soft state (unit) structure for 18275 * this target. 18276 * flag - SD_TARGET_START 18277 * SD_TARGET_STOP 18278 * SD_TARGET_EJECT 18279 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18280 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18281 * to use the USCSI "direct" chain and bypass the normal 18282 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18283 * command is issued as part of an error recovery action. 18284 * 18285 * Return Code: 0 - Success 18286 * EIO - IO error 18287 * EACCES - Reservation conflict detected 18288 * ENXIO - Not Ready, medium not present 18289 * errno return code from sd_send_scsi_cmd() 18290 * 18291 * Context: Can sleep. 18292 */ 18293 18294 static int 18295 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18296 { 18297 struct scsi_extended_sense sense_buf; 18298 union scsi_cdb cdb; 18299 struct uscsi_cmd ucmd_buf; 18300 int status; 18301 18302 ASSERT(un != NULL); 18303 ASSERT(!mutex_owned(SD_MUTEX(un))); 18304 18305 SD_TRACE(SD_LOG_IO, un, 18306 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18307 18308 if (un->un_f_check_start_stop && 18309 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18310 (un->un_f_start_stop_supported != TRUE)) { 18311 return (0); 18312 } 18313 18314 /* 18315 * If we are performing an eject operation and 18316 * we receive any command other than SD_TARGET_EJECT 18317 * we should immediately return. 18318 */ 18319 if (flag != SD_TARGET_EJECT) { 18320 mutex_enter(SD_MUTEX(un)); 18321 if (un->un_f_ejecting == TRUE) { 18322 mutex_exit(SD_MUTEX(un)); 18323 return (EAGAIN); 18324 } 18325 mutex_exit(SD_MUTEX(un)); 18326 } 18327 18328 bzero(&cdb, sizeof (cdb)); 18329 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18330 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18331 18332 cdb.scc_cmd = SCMD_START_STOP; 18333 cdb.cdb_opaque[4] = (uchar_t)flag; 18334 18335 ucmd_buf.uscsi_cdb = (char *)&cdb; 18336 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18337 ucmd_buf.uscsi_bufaddr = NULL; 18338 ucmd_buf.uscsi_buflen = 0; 18339 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18340 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18341 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18342 ucmd_buf.uscsi_timeout = 200; 18343 18344 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18345 UIO_SYSSPACE, path_flag); 18346 18347 switch (status) { 18348 case 0: 18349 break; /* Success! */ 18350 case EIO: 18351 switch (ucmd_buf.uscsi_status) { 18352 case STATUS_RESERVATION_CONFLICT: 18353 status = EACCES; 18354 break; 18355 case STATUS_CHECK: 18356 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18357 switch (scsi_sense_key( 18358 (uint8_t *)&sense_buf)) { 18359 case KEY_ILLEGAL_REQUEST: 18360 status = ENOTSUP; 18361 break; 18362 case KEY_NOT_READY: 18363 if (scsi_sense_asc( 18364 (uint8_t *)&sense_buf) 18365 == 0x3A) { 18366 status = ENXIO; 18367 } 18368 break; 18369 default: 18370 break; 18371 } 18372 } 18373 break; 18374 default: 18375 break; 18376 } 18377 break; 18378 default: 18379 break; 18380 } 18381 18382 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18383 18384 return (status); 18385 } 18386 18387 18388 /* 18389 * Function: sd_start_stop_unit_callback 18390 * 18391 * Description: timeout(9F) callback to begin recovery process for a 18392 * device that has spun down. 18393 * 18394 * Arguments: arg - pointer to associated softstate struct. 18395 * 18396 * Context: Executes in a timeout(9F) thread context 18397 */ 18398 18399 static void 18400 sd_start_stop_unit_callback(void *arg) 18401 { 18402 struct sd_lun *un = arg; 18403 ASSERT(un != NULL); 18404 ASSERT(!mutex_owned(SD_MUTEX(un))); 18405 18406 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18407 18408 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18409 } 18410 18411 18412 /* 18413 * Function: sd_start_stop_unit_task 18414 * 18415 * Description: Recovery procedure when a drive is spun down. 18416 * 18417 * Arguments: arg - pointer to associated softstate struct. 18418 * 18419 * Context: Executes in a taskq() thread context 18420 */ 18421 18422 static void 18423 sd_start_stop_unit_task(void *arg) 18424 { 18425 struct sd_lun *un = arg; 18426 18427 ASSERT(un != NULL); 18428 ASSERT(!mutex_owned(SD_MUTEX(un))); 18429 18430 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18431 18432 /* 18433 * Some unformatted drives report not ready error, no need to 18434 * restart if format has been initiated. 18435 */ 18436 mutex_enter(SD_MUTEX(un)); 18437 if (un->un_f_format_in_progress == TRUE) { 18438 mutex_exit(SD_MUTEX(un)); 18439 return; 18440 } 18441 mutex_exit(SD_MUTEX(un)); 18442 18443 /* 18444 * When a START STOP command is issued from here, it is part of a 18445 * failure recovery operation and must be issued before any other 18446 * commands, including any pending retries. Thus it must be sent 18447 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18448 * succeeds or not, we will start I/O after the attempt. 18449 */ 18450 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18451 SD_PATH_DIRECT_PRIORITY); 18452 18453 /* 18454 * The above call blocks until the START_STOP_UNIT command completes. 18455 * Now that it has completed, we must re-try the original IO that 18456 * received the NOT READY condition in the first place. There are 18457 * three possible conditions here: 18458 * 18459 * (1) The original IO is on un_retry_bp. 18460 * (2) The original IO is on the regular wait queue, and un_retry_bp 18461 * is NULL. 18462 * (3) The original IO is on the regular wait queue, and un_retry_bp 18463 * points to some other, unrelated bp. 18464 * 18465 * For each case, we must call sd_start_cmds() with un_retry_bp 18466 * as the argument. If un_retry_bp is NULL, this will initiate 18467 * processing of the regular wait queue. If un_retry_bp is not NULL, 18468 * then this will process the bp on un_retry_bp. That may or may not 18469 * be the original IO, but that does not matter: the important thing 18470 * is to keep the IO processing going at this point. 18471 * 18472 * Note: This is a very specific error recovery sequence associated 18473 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18474 * serialize the I/O with completion of the spin-up. 18475 */ 18476 mutex_enter(SD_MUTEX(un)); 18477 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18478 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18479 un, un->un_retry_bp); 18480 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18481 sd_start_cmds(un, un->un_retry_bp); 18482 mutex_exit(SD_MUTEX(un)); 18483 18484 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18485 } 18486 18487 18488 /* 18489 * Function: sd_send_scsi_INQUIRY 18490 * 18491 * Description: Issue the scsi INQUIRY command. 18492 * 18493 * Arguments: un 18494 * bufaddr 18495 * buflen 18496 * evpd 18497 * page_code 18498 * page_length 18499 * 18500 * Return Code: 0 - Success 18501 * errno return code from sd_send_scsi_cmd() 18502 * 18503 * Context: Can sleep. Does not return until command is completed. 18504 */ 18505 18506 static int 18507 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18508 uchar_t evpd, uchar_t page_code, size_t *residp) 18509 { 18510 union scsi_cdb cdb; 18511 struct uscsi_cmd ucmd_buf; 18512 int status; 18513 18514 ASSERT(un != NULL); 18515 ASSERT(!mutex_owned(SD_MUTEX(un))); 18516 ASSERT(bufaddr != NULL); 18517 18518 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18519 18520 bzero(&cdb, sizeof (cdb)); 18521 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18522 bzero(bufaddr, buflen); 18523 18524 cdb.scc_cmd = SCMD_INQUIRY; 18525 cdb.cdb_opaque[1] = evpd; 18526 cdb.cdb_opaque[2] = page_code; 18527 FORMG0COUNT(&cdb, buflen); 18528 18529 ucmd_buf.uscsi_cdb = (char *)&cdb; 18530 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18531 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18532 ucmd_buf.uscsi_buflen = buflen; 18533 ucmd_buf.uscsi_rqbuf = NULL; 18534 ucmd_buf.uscsi_rqlen = 0; 18535 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18536 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18537 18538 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18539 UIO_SYSSPACE, SD_PATH_DIRECT); 18540 18541 if ((status == 0) && (residp != NULL)) { 18542 *residp = ucmd_buf.uscsi_resid; 18543 } 18544 18545 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18546 18547 return (status); 18548 } 18549 18550 18551 /* 18552 * Function: sd_send_scsi_TEST_UNIT_READY 18553 * 18554 * Description: Issue the scsi TEST UNIT READY command. 18555 * This routine can be told to set the flag USCSI_DIAGNOSE to 18556 * prevent retrying failed commands. Use this when the intent 18557 * is either to check for device readiness, to clear a Unit 18558 * Attention, or to clear any outstanding sense data. 18559 * However under specific conditions the expected behavior 18560 * is for retries to bring a device ready, so use the flag 18561 * with caution. 18562 * 18563 * Arguments: un 18564 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18565 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18566 * 0: dont check for media present, do retries on cmd. 18567 * 18568 * Return Code: 0 - Success 18569 * EIO - IO error 18570 * EACCES - Reservation conflict detected 18571 * ENXIO - Not Ready, medium not present 18572 * errno return code from sd_send_scsi_cmd() 18573 * 18574 * Context: Can sleep. Does not return until command is completed. 18575 */ 18576 18577 static int 18578 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18579 { 18580 struct scsi_extended_sense sense_buf; 18581 union scsi_cdb cdb; 18582 struct uscsi_cmd ucmd_buf; 18583 int status; 18584 18585 ASSERT(un != NULL); 18586 ASSERT(!mutex_owned(SD_MUTEX(un))); 18587 18588 SD_TRACE(SD_LOG_IO, un, 18589 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18590 18591 /* 18592 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18593 * timeouts when they receive a TUR and the queue is not empty. Check 18594 * the configuration flag set during attach (indicating the drive has 18595 * this firmware bug) and un_ncmds_in_transport before issuing the 18596 * TUR. If there are 18597 * pending commands return success, this is a bit arbitrary but is ok 18598 * for non-removables (i.e. the eliteI disks) and non-clustering 18599 * configurations. 18600 */ 18601 if (un->un_f_cfg_tur_check == TRUE) { 18602 mutex_enter(SD_MUTEX(un)); 18603 if (un->un_ncmds_in_transport != 0) { 18604 mutex_exit(SD_MUTEX(un)); 18605 return (0); 18606 } 18607 mutex_exit(SD_MUTEX(un)); 18608 } 18609 18610 bzero(&cdb, sizeof (cdb)); 18611 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18612 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18613 18614 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18615 18616 ucmd_buf.uscsi_cdb = (char *)&cdb; 18617 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18618 ucmd_buf.uscsi_bufaddr = NULL; 18619 ucmd_buf.uscsi_buflen = 0; 18620 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18621 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18622 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18623 18624 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18625 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18626 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18627 } 18628 ucmd_buf.uscsi_timeout = 60; 18629 18630 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18631 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18632 SD_PATH_STANDARD)); 18633 18634 switch (status) { 18635 case 0: 18636 break; /* Success! */ 18637 case EIO: 18638 switch (ucmd_buf.uscsi_status) { 18639 case STATUS_RESERVATION_CONFLICT: 18640 status = EACCES; 18641 break; 18642 case STATUS_CHECK: 18643 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18644 break; 18645 } 18646 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18647 (scsi_sense_key((uint8_t *)&sense_buf) == 18648 KEY_NOT_READY) && 18649 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18650 status = ENXIO; 18651 } 18652 break; 18653 default: 18654 break; 18655 } 18656 break; 18657 default: 18658 break; 18659 } 18660 18661 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18662 18663 return (status); 18664 } 18665 18666 18667 /* 18668 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18669 * 18670 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18671 * 18672 * Arguments: un 18673 * 18674 * Return Code: 0 - Success 18675 * EACCES 18676 * ENOTSUP 18677 * errno return code from sd_send_scsi_cmd() 18678 * 18679 * Context: Can sleep. Does not return until command is completed. 18680 */ 18681 18682 static int 18683 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18684 uint16_t data_len, uchar_t *data_bufp) 18685 { 18686 struct scsi_extended_sense sense_buf; 18687 union scsi_cdb cdb; 18688 struct uscsi_cmd ucmd_buf; 18689 int status; 18690 int no_caller_buf = FALSE; 18691 18692 ASSERT(un != NULL); 18693 ASSERT(!mutex_owned(SD_MUTEX(un))); 18694 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18695 18696 SD_TRACE(SD_LOG_IO, un, 18697 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18698 18699 bzero(&cdb, sizeof (cdb)); 18700 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18701 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18702 if (data_bufp == NULL) { 18703 /* Allocate a default buf if the caller did not give one */ 18704 ASSERT(data_len == 0); 18705 data_len = MHIOC_RESV_KEY_SIZE; 18706 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18707 no_caller_buf = TRUE; 18708 } 18709 18710 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18711 cdb.cdb_opaque[1] = usr_cmd; 18712 FORMG1COUNT(&cdb, data_len); 18713 18714 ucmd_buf.uscsi_cdb = (char *)&cdb; 18715 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18716 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18717 ucmd_buf.uscsi_buflen = data_len; 18718 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18719 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18720 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18721 ucmd_buf.uscsi_timeout = 60; 18722 18723 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18724 UIO_SYSSPACE, SD_PATH_STANDARD); 18725 18726 switch (status) { 18727 case 0: 18728 break; /* Success! */ 18729 case EIO: 18730 switch (ucmd_buf.uscsi_status) { 18731 case STATUS_RESERVATION_CONFLICT: 18732 status = EACCES; 18733 break; 18734 case STATUS_CHECK: 18735 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18736 (scsi_sense_key((uint8_t *)&sense_buf) == 18737 KEY_ILLEGAL_REQUEST)) { 18738 status = ENOTSUP; 18739 } 18740 break; 18741 default: 18742 break; 18743 } 18744 break; 18745 default: 18746 break; 18747 } 18748 18749 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18750 18751 if (no_caller_buf == TRUE) { 18752 kmem_free(data_bufp, data_len); 18753 } 18754 18755 return (status); 18756 } 18757 18758 18759 /* 18760 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18761 * 18762 * Description: This routine is the driver entry point for handling CD-ROM 18763 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18764 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18765 * device. 18766 * 18767 * Arguments: un - Pointer to soft state struct for the target. 18768 * usr_cmd SCSI-3 reservation facility command (one of 18769 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18770 * SD_SCSI3_PREEMPTANDABORT) 18771 * usr_bufp - user provided pointer register, reserve descriptor or 18772 * preempt and abort structure (mhioc_register_t, 18773 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18774 * 18775 * Return Code: 0 - Success 18776 * EACCES 18777 * ENOTSUP 18778 * errno return code from sd_send_scsi_cmd() 18779 * 18780 * Context: Can sleep. Does not return until command is completed. 18781 */ 18782 18783 static int 18784 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18785 uchar_t *usr_bufp) 18786 { 18787 struct scsi_extended_sense sense_buf; 18788 union scsi_cdb cdb; 18789 struct uscsi_cmd ucmd_buf; 18790 int status; 18791 uchar_t data_len = sizeof (sd_prout_t); 18792 sd_prout_t *prp; 18793 18794 ASSERT(un != NULL); 18795 ASSERT(!mutex_owned(SD_MUTEX(un))); 18796 ASSERT(data_len == 24); /* required by scsi spec */ 18797 18798 SD_TRACE(SD_LOG_IO, un, 18799 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18800 18801 if (usr_bufp == NULL) { 18802 return (EINVAL); 18803 } 18804 18805 bzero(&cdb, sizeof (cdb)); 18806 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18807 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18808 prp = kmem_zalloc(data_len, KM_SLEEP); 18809 18810 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18811 cdb.cdb_opaque[1] = usr_cmd; 18812 FORMG1COUNT(&cdb, data_len); 18813 18814 ucmd_buf.uscsi_cdb = (char *)&cdb; 18815 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18816 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18817 ucmd_buf.uscsi_buflen = data_len; 18818 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18819 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18820 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18821 ucmd_buf.uscsi_timeout = 60; 18822 18823 switch (usr_cmd) { 18824 case SD_SCSI3_REGISTER: { 18825 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18826 18827 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18828 bcopy(ptr->newkey.key, prp->service_key, 18829 MHIOC_RESV_KEY_SIZE); 18830 prp->aptpl = ptr->aptpl; 18831 break; 18832 } 18833 case SD_SCSI3_RESERVE: 18834 case SD_SCSI3_RELEASE: { 18835 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18836 18837 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18838 prp->scope_address = BE_32(ptr->scope_specific_addr); 18839 cdb.cdb_opaque[2] = ptr->type; 18840 break; 18841 } 18842 case SD_SCSI3_PREEMPTANDABORT: { 18843 mhioc_preemptandabort_t *ptr = 18844 (mhioc_preemptandabort_t *)usr_bufp; 18845 18846 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18847 bcopy(ptr->victim_key.key, prp->service_key, 18848 MHIOC_RESV_KEY_SIZE); 18849 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18850 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18851 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18852 break; 18853 } 18854 case SD_SCSI3_REGISTERANDIGNOREKEY: 18855 { 18856 mhioc_registerandignorekey_t *ptr; 18857 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18858 bcopy(ptr->newkey.key, 18859 prp->service_key, MHIOC_RESV_KEY_SIZE); 18860 prp->aptpl = ptr->aptpl; 18861 break; 18862 } 18863 default: 18864 ASSERT(FALSE); 18865 break; 18866 } 18867 18868 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18869 UIO_SYSSPACE, SD_PATH_STANDARD); 18870 18871 switch (status) { 18872 case 0: 18873 break; /* Success! */ 18874 case EIO: 18875 switch (ucmd_buf.uscsi_status) { 18876 case STATUS_RESERVATION_CONFLICT: 18877 status = EACCES; 18878 break; 18879 case STATUS_CHECK: 18880 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18881 (scsi_sense_key((uint8_t *)&sense_buf) == 18882 KEY_ILLEGAL_REQUEST)) { 18883 status = ENOTSUP; 18884 } 18885 break; 18886 default: 18887 break; 18888 } 18889 break; 18890 default: 18891 break; 18892 } 18893 18894 kmem_free(prp, data_len); 18895 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18896 return (status); 18897 } 18898 18899 18900 /* 18901 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18902 * 18903 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18904 * 18905 * Arguments: un - pointer to the target's soft state struct 18906 * dkc - pointer to the callback structure 18907 * 18908 * Return Code: 0 - success 18909 * errno-type error code 18910 * 18911 * Context: kernel thread context only. 18912 * 18913 * _______________________________________________________________ 18914 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18915 * |FLUSH_VOLATILE| | operation | 18916 * |______________|______________|_________________________________| 18917 * | 0 | NULL | Synchronous flush on both | 18918 * | | | volatile and non-volatile cache | 18919 * |______________|______________|_________________________________| 18920 * | 1 | NULL | Synchronous flush on volatile | 18921 * | | | cache; disk drivers may suppress| 18922 * | | | flush if disk table indicates | 18923 * | | | non-volatile cache | 18924 * |______________|______________|_________________________________| 18925 * | 0 | !NULL | Asynchronous flush on both | 18926 * | | | volatile and non-volatile cache;| 18927 * |______________|______________|_________________________________| 18928 * | 1 | !NULL | Asynchronous flush on volatile | 18929 * | | | cache; disk drivers may suppress| 18930 * | | | flush if disk table indicates | 18931 * | | | non-volatile cache | 18932 * |______________|______________|_________________________________| 18933 * 18934 */ 18935 18936 static int 18937 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18938 { 18939 struct sd_uscsi_info *uip; 18940 struct uscsi_cmd *uscmd; 18941 union scsi_cdb *cdb; 18942 struct buf *bp; 18943 int rval = 0; 18944 int is_async; 18945 18946 SD_TRACE(SD_LOG_IO, un, 18947 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18948 18949 ASSERT(un != NULL); 18950 ASSERT(!mutex_owned(SD_MUTEX(un))); 18951 18952 if (dkc == NULL || dkc->dkc_callback == NULL) { 18953 is_async = FALSE; 18954 } else { 18955 is_async = TRUE; 18956 } 18957 18958 mutex_enter(SD_MUTEX(un)); 18959 /* check whether cache flush should be suppressed */ 18960 if (un->un_f_suppress_cache_flush == TRUE) { 18961 mutex_exit(SD_MUTEX(un)); 18962 /* 18963 * suppress the cache flush if the device is told to do 18964 * so by sd.conf or disk table 18965 */ 18966 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18967 skip the cache flush since suppress_cache_flush is %d!\n", 18968 un->un_f_suppress_cache_flush); 18969 18970 if (is_async == TRUE) { 18971 /* invoke callback for asynchronous flush */ 18972 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18973 } 18974 return (rval); 18975 } 18976 mutex_exit(SD_MUTEX(un)); 18977 18978 /* 18979 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18980 * set properly 18981 */ 18982 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18983 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18984 18985 mutex_enter(SD_MUTEX(un)); 18986 if (dkc != NULL && un->un_f_sync_nv_supported && 18987 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18988 /* 18989 * if the device supports SYNC_NV bit, turn on 18990 * the SYNC_NV bit to only flush volatile cache 18991 */ 18992 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18993 } 18994 mutex_exit(SD_MUTEX(un)); 18995 18996 /* 18997 * First get some memory for the uscsi_cmd struct and cdb 18998 * and initialize for SYNCHRONIZE_CACHE cmd. 18999 */ 19000 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19001 uscmd->uscsi_cdblen = CDB_GROUP1; 19002 uscmd->uscsi_cdb = (caddr_t)cdb; 19003 uscmd->uscsi_bufaddr = NULL; 19004 uscmd->uscsi_buflen = 0; 19005 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19006 uscmd->uscsi_rqlen = SENSE_LENGTH; 19007 uscmd->uscsi_rqresid = SENSE_LENGTH; 19008 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19009 uscmd->uscsi_timeout = sd_io_time; 19010 19011 /* 19012 * Allocate an sd_uscsi_info struct and fill it with the info 19013 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19014 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19015 * since we allocate the buf here in this function, we do not 19016 * need to preserve the prior contents of b_private. 19017 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19018 */ 19019 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19020 uip->ui_flags = SD_PATH_DIRECT; 19021 uip->ui_cmdp = uscmd; 19022 19023 bp = getrbuf(KM_SLEEP); 19024 bp->b_private = uip; 19025 19026 /* 19027 * Setup buffer to carry uscsi request. 19028 */ 19029 bp->b_flags = B_BUSY; 19030 bp->b_bcount = 0; 19031 bp->b_blkno = 0; 19032 19033 if (is_async == TRUE) { 19034 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19035 uip->ui_dkc = *dkc; 19036 } 19037 19038 bp->b_edev = SD_GET_DEV(un); 19039 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19040 19041 (void) sd_uscsi_strategy(bp); 19042 19043 /* 19044 * If synchronous request, wait for completion 19045 * If async just return and let b_iodone callback 19046 * cleanup. 19047 * NOTE: On return, u_ncmds_in_driver will be decremented, 19048 * but it was also incremented in sd_uscsi_strategy(), so 19049 * we should be ok. 19050 */ 19051 if (is_async == FALSE) { 19052 (void) biowait(bp); 19053 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19054 } 19055 19056 return (rval); 19057 } 19058 19059 19060 static int 19061 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19062 { 19063 struct sd_uscsi_info *uip; 19064 struct uscsi_cmd *uscmd; 19065 uint8_t *sense_buf; 19066 struct sd_lun *un; 19067 int status; 19068 union scsi_cdb *cdb; 19069 19070 uip = (struct sd_uscsi_info *)(bp->b_private); 19071 ASSERT(uip != NULL); 19072 19073 uscmd = uip->ui_cmdp; 19074 ASSERT(uscmd != NULL); 19075 19076 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19077 ASSERT(sense_buf != NULL); 19078 19079 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19080 ASSERT(un != NULL); 19081 19082 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19083 19084 status = geterror(bp); 19085 switch (status) { 19086 case 0: 19087 break; /* Success! */ 19088 case EIO: 19089 switch (uscmd->uscsi_status) { 19090 case STATUS_RESERVATION_CONFLICT: 19091 /* Ignore reservation conflict */ 19092 status = 0; 19093 goto done; 19094 19095 case STATUS_CHECK: 19096 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19097 (scsi_sense_key(sense_buf) == 19098 KEY_ILLEGAL_REQUEST)) { 19099 /* Ignore Illegal Request error */ 19100 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19101 mutex_enter(SD_MUTEX(un)); 19102 un->un_f_sync_nv_supported = FALSE; 19103 mutex_exit(SD_MUTEX(un)); 19104 status = 0; 19105 SD_TRACE(SD_LOG_IO, un, 19106 "un_f_sync_nv_supported \ 19107 is set to false.\n"); 19108 goto done; 19109 } 19110 19111 mutex_enter(SD_MUTEX(un)); 19112 un->un_f_sync_cache_supported = FALSE; 19113 mutex_exit(SD_MUTEX(un)); 19114 SD_TRACE(SD_LOG_IO, un, 19115 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19116 un_f_sync_cache_supported set to false \ 19117 with asc = %x, ascq = %x\n", 19118 scsi_sense_asc(sense_buf), 19119 scsi_sense_ascq(sense_buf)); 19120 status = ENOTSUP; 19121 goto done; 19122 } 19123 break; 19124 default: 19125 break; 19126 } 19127 /* FALLTHRU */ 19128 default: 19129 /* 19130 * Don't log an error message if this device 19131 * has removable media. 19132 */ 19133 if (!un->un_f_has_removable_media) { 19134 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19135 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19136 } 19137 break; 19138 } 19139 19140 done: 19141 if (uip->ui_dkc.dkc_callback != NULL) { 19142 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19143 } 19144 19145 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19146 freerbuf(bp); 19147 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19148 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19149 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19150 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19151 19152 return (status); 19153 } 19154 19155 19156 /* 19157 * Function: sd_send_scsi_GET_CONFIGURATION 19158 * 19159 * Description: Issues the get configuration command to the device. 19160 * Called from sd_check_for_writable_cd & sd_get_media_info 19161 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19162 * Arguments: un 19163 * ucmdbuf 19164 * rqbuf 19165 * rqbuflen 19166 * bufaddr 19167 * buflen 19168 * path_flag 19169 * 19170 * Return Code: 0 - Success 19171 * errno return code from sd_send_scsi_cmd() 19172 * 19173 * Context: Can sleep. Does not return until command is completed. 19174 * 19175 */ 19176 19177 static int 19178 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19179 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19180 int path_flag) 19181 { 19182 char cdb[CDB_GROUP1]; 19183 int status; 19184 19185 ASSERT(un != NULL); 19186 ASSERT(!mutex_owned(SD_MUTEX(un))); 19187 ASSERT(bufaddr != NULL); 19188 ASSERT(ucmdbuf != NULL); 19189 ASSERT(rqbuf != NULL); 19190 19191 SD_TRACE(SD_LOG_IO, un, 19192 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19193 19194 bzero(cdb, sizeof (cdb)); 19195 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19196 bzero(rqbuf, rqbuflen); 19197 bzero(bufaddr, buflen); 19198 19199 /* 19200 * Set up cdb field for the get configuration command. 19201 */ 19202 cdb[0] = SCMD_GET_CONFIGURATION; 19203 cdb[1] = 0x02; /* Requested Type */ 19204 cdb[8] = SD_PROFILE_HEADER_LEN; 19205 ucmdbuf->uscsi_cdb = cdb; 19206 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19207 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19208 ucmdbuf->uscsi_buflen = buflen; 19209 ucmdbuf->uscsi_timeout = sd_io_time; 19210 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19211 ucmdbuf->uscsi_rqlen = rqbuflen; 19212 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19213 19214 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19215 UIO_SYSSPACE, path_flag); 19216 19217 switch (status) { 19218 case 0: 19219 break; /* Success! */ 19220 case EIO: 19221 switch (ucmdbuf->uscsi_status) { 19222 case STATUS_RESERVATION_CONFLICT: 19223 status = EACCES; 19224 break; 19225 default: 19226 break; 19227 } 19228 break; 19229 default: 19230 break; 19231 } 19232 19233 if (status == 0) { 19234 SD_DUMP_MEMORY(un, SD_LOG_IO, 19235 "sd_send_scsi_GET_CONFIGURATION: data", 19236 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19237 } 19238 19239 SD_TRACE(SD_LOG_IO, un, 19240 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19241 19242 return (status); 19243 } 19244 19245 /* 19246 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19247 * 19248 * Description: Issues the get configuration command to the device to 19249 * retrieve a specific feature. Called from 19250 * sd_check_for_writable_cd & sd_set_mmc_caps. 19251 * Arguments: un 19252 * ucmdbuf 19253 * rqbuf 19254 * rqbuflen 19255 * bufaddr 19256 * buflen 19257 * feature 19258 * 19259 * Return Code: 0 - Success 19260 * errno return code from sd_send_scsi_cmd() 19261 * 19262 * Context: Can sleep. Does not return until command is completed. 19263 * 19264 */ 19265 static int 19266 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19267 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19268 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19269 { 19270 char cdb[CDB_GROUP1]; 19271 int status; 19272 19273 ASSERT(un != NULL); 19274 ASSERT(!mutex_owned(SD_MUTEX(un))); 19275 ASSERT(bufaddr != NULL); 19276 ASSERT(ucmdbuf != NULL); 19277 ASSERT(rqbuf != NULL); 19278 19279 SD_TRACE(SD_LOG_IO, un, 19280 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19281 19282 bzero(cdb, sizeof (cdb)); 19283 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19284 bzero(rqbuf, rqbuflen); 19285 bzero(bufaddr, buflen); 19286 19287 /* 19288 * Set up cdb field for the get configuration command. 19289 */ 19290 cdb[0] = SCMD_GET_CONFIGURATION; 19291 cdb[1] = 0x02; /* Requested Type */ 19292 cdb[3] = feature; 19293 cdb[8] = buflen; 19294 ucmdbuf->uscsi_cdb = cdb; 19295 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19296 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19297 ucmdbuf->uscsi_buflen = buflen; 19298 ucmdbuf->uscsi_timeout = sd_io_time; 19299 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19300 ucmdbuf->uscsi_rqlen = rqbuflen; 19301 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19302 19303 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19304 UIO_SYSSPACE, path_flag); 19305 19306 switch (status) { 19307 case 0: 19308 break; /* Success! */ 19309 case EIO: 19310 switch (ucmdbuf->uscsi_status) { 19311 case STATUS_RESERVATION_CONFLICT: 19312 status = EACCES; 19313 break; 19314 default: 19315 break; 19316 } 19317 break; 19318 default: 19319 break; 19320 } 19321 19322 if (status == 0) { 19323 SD_DUMP_MEMORY(un, SD_LOG_IO, 19324 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19325 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19326 } 19327 19328 SD_TRACE(SD_LOG_IO, un, 19329 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19330 19331 return (status); 19332 } 19333 19334 19335 /* 19336 * Function: sd_send_scsi_MODE_SENSE 19337 * 19338 * Description: Utility function for issuing a scsi MODE SENSE command. 19339 * Note: This routine uses a consistent implementation for Group0, 19340 * Group1, and Group2 commands across all platforms. ATAPI devices 19341 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19342 * 19343 * Arguments: un - pointer to the softstate struct for the target. 19344 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19345 * CDB_GROUP[1|2] (10 byte). 19346 * bufaddr - buffer for page data retrieved from the target. 19347 * buflen - size of page to be retrieved. 19348 * page_code - page code of data to be retrieved from the target. 19349 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19350 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19351 * to use the USCSI "direct" chain and bypass the normal 19352 * command waitq. 19353 * 19354 * Return Code: 0 - Success 19355 * errno return code from sd_send_scsi_cmd() 19356 * 19357 * Context: Can sleep. Does not return until command is completed. 19358 */ 19359 19360 static int 19361 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19362 size_t buflen, uchar_t page_code, int path_flag) 19363 { 19364 struct scsi_extended_sense sense_buf; 19365 union scsi_cdb cdb; 19366 struct uscsi_cmd ucmd_buf; 19367 int status; 19368 int headlen; 19369 19370 ASSERT(un != NULL); 19371 ASSERT(!mutex_owned(SD_MUTEX(un))); 19372 ASSERT(bufaddr != NULL); 19373 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19374 (cdbsize == CDB_GROUP2)); 19375 19376 SD_TRACE(SD_LOG_IO, un, 19377 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19378 19379 bzero(&cdb, sizeof (cdb)); 19380 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19381 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19382 bzero(bufaddr, buflen); 19383 19384 if (cdbsize == CDB_GROUP0) { 19385 cdb.scc_cmd = SCMD_MODE_SENSE; 19386 cdb.cdb_opaque[2] = page_code; 19387 FORMG0COUNT(&cdb, buflen); 19388 headlen = MODE_HEADER_LENGTH; 19389 } else { 19390 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19391 cdb.cdb_opaque[2] = page_code; 19392 FORMG1COUNT(&cdb, buflen); 19393 headlen = MODE_HEADER_LENGTH_GRP2; 19394 } 19395 19396 ASSERT(headlen <= buflen); 19397 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19398 19399 ucmd_buf.uscsi_cdb = (char *)&cdb; 19400 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19401 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19402 ucmd_buf.uscsi_buflen = buflen; 19403 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19404 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19405 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19406 ucmd_buf.uscsi_timeout = 60; 19407 19408 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19409 UIO_SYSSPACE, path_flag); 19410 19411 switch (status) { 19412 case 0: 19413 /* 19414 * sr_check_wp() uses 0x3f page code and check the header of 19415 * mode page to determine if target device is write-protected. 19416 * But some USB devices return 0 bytes for 0x3f page code. For 19417 * this case, make sure that mode page header is returned at 19418 * least. 19419 */ 19420 if (buflen - ucmd_buf.uscsi_resid < headlen) 19421 status = EIO; 19422 break; /* Success! */ 19423 case EIO: 19424 switch (ucmd_buf.uscsi_status) { 19425 case STATUS_RESERVATION_CONFLICT: 19426 status = EACCES; 19427 break; 19428 default: 19429 break; 19430 } 19431 break; 19432 default: 19433 break; 19434 } 19435 19436 if (status == 0) { 19437 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19438 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19439 } 19440 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19441 19442 return (status); 19443 } 19444 19445 19446 /* 19447 * Function: sd_send_scsi_MODE_SELECT 19448 * 19449 * Description: Utility function for issuing a scsi MODE SELECT command. 19450 * Note: This routine uses a consistent implementation for Group0, 19451 * Group1, and Group2 commands across all platforms. ATAPI devices 19452 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19453 * 19454 * Arguments: un - pointer to the softstate struct for the target. 19455 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19456 * CDB_GROUP[1|2] (10 byte). 19457 * bufaddr - buffer for page data retrieved from the target. 19458 * buflen - size of page to be retrieved. 19459 * save_page - boolean to determin if SP bit should be set. 19460 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19461 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19462 * to use the USCSI "direct" chain and bypass the normal 19463 * command waitq. 19464 * 19465 * Return Code: 0 - Success 19466 * errno return code from sd_send_scsi_cmd() 19467 * 19468 * Context: Can sleep. Does not return until command is completed. 19469 */ 19470 19471 static int 19472 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19473 size_t buflen, uchar_t save_page, int path_flag) 19474 { 19475 struct scsi_extended_sense sense_buf; 19476 union scsi_cdb cdb; 19477 struct uscsi_cmd ucmd_buf; 19478 int status; 19479 19480 ASSERT(un != NULL); 19481 ASSERT(!mutex_owned(SD_MUTEX(un))); 19482 ASSERT(bufaddr != NULL); 19483 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19484 (cdbsize == CDB_GROUP2)); 19485 19486 SD_TRACE(SD_LOG_IO, un, 19487 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19488 19489 bzero(&cdb, sizeof (cdb)); 19490 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19491 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19492 19493 /* Set the PF bit for many third party drives */ 19494 cdb.cdb_opaque[1] = 0x10; 19495 19496 /* Set the savepage(SP) bit if given */ 19497 if (save_page == SD_SAVE_PAGE) { 19498 cdb.cdb_opaque[1] |= 0x01; 19499 } 19500 19501 if (cdbsize == CDB_GROUP0) { 19502 cdb.scc_cmd = SCMD_MODE_SELECT; 19503 FORMG0COUNT(&cdb, buflen); 19504 } else { 19505 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19506 FORMG1COUNT(&cdb, buflen); 19507 } 19508 19509 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19510 19511 ucmd_buf.uscsi_cdb = (char *)&cdb; 19512 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19513 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19514 ucmd_buf.uscsi_buflen = buflen; 19515 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19516 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19517 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19518 ucmd_buf.uscsi_timeout = 60; 19519 19520 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19521 UIO_SYSSPACE, path_flag); 19522 19523 switch (status) { 19524 case 0: 19525 break; /* Success! */ 19526 case EIO: 19527 switch (ucmd_buf.uscsi_status) { 19528 case STATUS_RESERVATION_CONFLICT: 19529 status = EACCES; 19530 break; 19531 default: 19532 break; 19533 } 19534 break; 19535 default: 19536 break; 19537 } 19538 19539 if (status == 0) { 19540 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19541 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19542 } 19543 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19544 19545 return (status); 19546 } 19547 19548 19549 /* 19550 * Function: sd_send_scsi_RDWR 19551 * 19552 * Description: Issue a scsi READ or WRITE command with the given parameters. 19553 * 19554 * Arguments: un: Pointer to the sd_lun struct for the target. 19555 * cmd: SCMD_READ or SCMD_WRITE 19556 * bufaddr: Address of caller's buffer to receive the RDWR data 19557 * buflen: Length of caller's buffer receive the RDWR data. 19558 * start_block: Block number for the start of the RDWR operation. 19559 * (Assumes target-native block size.) 19560 * residp: Pointer to variable to receive the redisual of the 19561 * RDWR operation (may be NULL of no residual requested). 19562 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19563 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19564 * to use the USCSI "direct" chain and bypass the normal 19565 * command waitq. 19566 * 19567 * Return Code: 0 - Success 19568 * errno return code from sd_send_scsi_cmd() 19569 * 19570 * Context: Can sleep. Does not return until command is completed. 19571 */ 19572 19573 static int 19574 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19575 size_t buflen, daddr_t start_block, int path_flag) 19576 { 19577 struct scsi_extended_sense sense_buf; 19578 union scsi_cdb cdb; 19579 struct uscsi_cmd ucmd_buf; 19580 uint32_t block_count; 19581 int status; 19582 int cdbsize; 19583 uchar_t flag; 19584 19585 ASSERT(un != NULL); 19586 ASSERT(!mutex_owned(SD_MUTEX(un))); 19587 ASSERT(bufaddr != NULL); 19588 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19589 19590 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19591 19592 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19593 return (EINVAL); 19594 } 19595 19596 mutex_enter(SD_MUTEX(un)); 19597 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19598 mutex_exit(SD_MUTEX(un)); 19599 19600 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19601 19602 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19603 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19604 bufaddr, buflen, start_block, block_count); 19605 19606 bzero(&cdb, sizeof (cdb)); 19607 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19608 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19609 19610 /* Compute CDB size to use */ 19611 if (start_block > 0xffffffff) 19612 cdbsize = CDB_GROUP4; 19613 else if ((start_block & 0xFFE00000) || 19614 (un->un_f_cfg_is_atapi == TRUE)) 19615 cdbsize = CDB_GROUP1; 19616 else 19617 cdbsize = CDB_GROUP0; 19618 19619 switch (cdbsize) { 19620 case CDB_GROUP0: /* 6-byte CDBs */ 19621 cdb.scc_cmd = cmd; 19622 FORMG0ADDR(&cdb, start_block); 19623 FORMG0COUNT(&cdb, block_count); 19624 break; 19625 case CDB_GROUP1: /* 10-byte CDBs */ 19626 cdb.scc_cmd = cmd | SCMD_GROUP1; 19627 FORMG1ADDR(&cdb, start_block); 19628 FORMG1COUNT(&cdb, block_count); 19629 break; 19630 case CDB_GROUP4: /* 16-byte CDBs */ 19631 cdb.scc_cmd = cmd | SCMD_GROUP4; 19632 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19633 FORMG4COUNT(&cdb, block_count); 19634 break; 19635 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19636 default: 19637 /* All others reserved */ 19638 return (EINVAL); 19639 } 19640 19641 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19642 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19643 19644 ucmd_buf.uscsi_cdb = (char *)&cdb; 19645 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19646 ucmd_buf.uscsi_bufaddr = bufaddr; 19647 ucmd_buf.uscsi_buflen = buflen; 19648 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19649 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19650 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19651 ucmd_buf.uscsi_timeout = 60; 19652 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19653 UIO_SYSSPACE, path_flag); 19654 switch (status) { 19655 case 0: 19656 break; /* Success! */ 19657 case EIO: 19658 switch (ucmd_buf.uscsi_status) { 19659 case STATUS_RESERVATION_CONFLICT: 19660 status = EACCES; 19661 break; 19662 default: 19663 break; 19664 } 19665 break; 19666 default: 19667 break; 19668 } 19669 19670 if (status == 0) { 19671 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19672 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19673 } 19674 19675 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19676 19677 return (status); 19678 } 19679 19680 19681 /* 19682 * Function: sd_send_scsi_LOG_SENSE 19683 * 19684 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19685 * 19686 * Arguments: un: Pointer to the sd_lun struct for the target. 19687 * 19688 * Return Code: 0 - Success 19689 * errno return code from sd_send_scsi_cmd() 19690 * 19691 * Context: Can sleep. Does not return until command is completed. 19692 */ 19693 19694 static int 19695 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19696 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19697 int path_flag) 19698 19699 { 19700 struct scsi_extended_sense sense_buf; 19701 union scsi_cdb cdb; 19702 struct uscsi_cmd ucmd_buf; 19703 int status; 19704 19705 ASSERT(un != NULL); 19706 ASSERT(!mutex_owned(SD_MUTEX(un))); 19707 19708 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19709 19710 bzero(&cdb, sizeof (cdb)); 19711 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19712 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19713 19714 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19715 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19716 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19717 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19718 FORMG1COUNT(&cdb, buflen); 19719 19720 ucmd_buf.uscsi_cdb = (char *)&cdb; 19721 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19722 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19723 ucmd_buf.uscsi_buflen = buflen; 19724 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19725 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19726 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19727 ucmd_buf.uscsi_timeout = 60; 19728 19729 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19730 UIO_SYSSPACE, path_flag); 19731 19732 switch (status) { 19733 case 0: 19734 break; 19735 case EIO: 19736 switch (ucmd_buf.uscsi_status) { 19737 case STATUS_RESERVATION_CONFLICT: 19738 status = EACCES; 19739 break; 19740 case STATUS_CHECK: 19741 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19742 (scsi_sense_key((uint8_t *)&sense_buf) == 19743 KEY_ILLEGAL_REQUEST) && 19744 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19745 /* 19746 * ASC 0x24: INVALID FIELD IN CDB 19747 */ 19748 switch (page_code) { 19749 case START_STOP_CYCLE_PAGE: 19750 /* 19751 * The start stop cycle counter is 19752 * implemented as page 0x31 in earlier 19753 * generation disks. In new generation 19754 * disks the start stop cycle counter is 19755 * implemented as page 0xE. To properly 19756 * handle this case if an attempt for 19757 * log page 0xE is made and fails we 19758 * will try again using page 0x31. 19759 * 19760 * Network storage BU committed to 19761 * maintain the page 0x31 for this 19762 * purpose and will not have any other 19763 * page implemented with page code 0x31 19764 * until all disks transition to the 19765 * standard page. 19766 */ 19767 mutex_enter(SD_MUTEX(un)); 19768 un->un_start_stop_cycle_page = 19769 START_STOP_CYCLE_VU_PAGE; 19770 cdb.cdb_opaque[2] = 19771 (char)(page_control << 6) | 19772 un->un_start_stop_cycle_page; 19773 mutex_exit(SD_MUTEX(un)); 19774 status = sd_send_scsi_cmd( 19775 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19776 UIO_SYSSPACE, path_flag); 19777 19778 break; 19779 case TEMPERATURE_PAGE: 19780 status = ENOTTY; 19781 break; 19782 default: 19783 break; 19784 } 19785 } 19786 break; 19787 default: 19788 break; 19789 } 19790 break; 19791 default: 19792 break; 19793 } 19794 19795 if (status == 0) { 19796 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19797 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19798 } 19799 19800 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19801 19802 return (status); 19803 } 19804 19805 19806 /* 19807 * Function: sdioctl 19808 * 19809 * Description: Driver's ioctl(9e) entry point function. 19810 * 19811 * Arguments: dev - device number 19812 * cmd - ioctl operation to be performed 19813 * arg - user argument, contains data to be set or reference 19814 * parameter for get 19815 * flag - bit flag, indicating open settings, 32/64 bit type 19816 * cred_p - user credential pointer 19817 * rval_p - calling process return value (OPT) 19818 * 19819 * Return Code: EINVAL 19820 * ENOTTY 19821 * ENXIO 19822 * EIO 19823 * EFAULT 19824 * ENOTSUP 19825 * EPERM 19826 * 19827 * Context: Called from the device switch at normal priority. 19828 */ 19829 19830 static int 19831 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19832 { 19833 struct sd_lun *un = NULL; 19834 int err = 0; 19835 int i = 0; 19836 cred_t *cr; 19837 int tmprval = EINVAL; 19838 int is_valid; 19839 19840 /* 19841 * All device accesses go thru sdstrategy where we check on suspend 19842 * status 19843 */ 19844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19845 return (ENXIO); 19846 } 19847 19848 ASSERT(!mutex_owned(SD_MUTEX(un))); 19849 19850 19851 is_valid = SD_IS_VALID_LABEL(un); 19852 19853 /* 19854 * Moved this wait from sd_uscsi_strategy to here for 19855 * reasons of deadlock prevention. Internal driver commands, 19856 * specifically those to change a devices power level, result 19857 * in a call to sd_uscsi_strategy. 19858 */ 19859 mutex_enter(SD_MUTEX(un)); 19860 while ((un->un_state == SD_STATE_SUSPENDED) || 19861 (un->un_state == SD_STATE_PM_CHANGING)) { 19862 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19863 } 19864 /* 19865 * Twiddling the counter here protects commands from now 19866 * through to the top of sd_uscsi_strategy. Without the 19867 * counter inc. a power down, for example, could get in 19868 * after the above check for state is made and before 19869 * execution gets to the top of sd_uscsi_strategy. 19870 * That would cause problems. 19871 */ 19872 un->un_ncmds_in_driver++; 19873 19874 if (!is_valid && 19875 (flag & (FNDELAY | FNONBLOCK))) { 19876 switch (cmd) { 19877 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19878 case DKIOCGVTOC: 19879 case DKIOCGAPART: 19880 case DKIOCPARTINFO: 19881 case DKIOCSGEOM: 19882 case DKIOCSAPART: 19883 case DKIOCGETEFI: 19884 case DKIOCPARTITION: 19885 case DKIOCSVTOC: 19886 case DKIOCSETEFI: 19887 case DKIOCGMBOOT: 19888 case DKIOCSMBOOT: 19889 case DKIOCG_PHYGEOM: 19890 case DKIOCG_VIRTGEOM: 19891 /* let cmlb handle it */ 19892 goto skip_ready_valid; 19893 19894 case CDROMPAUSE: 19895 case CDROMRESUME: 19896 case CDROMPLAYMSF: 19897 case CDROMPLAYTRKIND: 19898 case CDROMREADTOCHDR: 19899 case CDROMREADTOCENTRY: 19900 case CDROMSTOP: 19901 case CDROMSTART: 19902 case CDROMVOLCTRL: 19903 case CDROMSUBCHNL: 19904 case CDROMREADMODE2: 19905 case CDROMREADMODE1: 19906 case CDROMREADOFFSET: 19907 case CDROMSBLKMODE: 19908 case CDROMGBLKMODE: 19909 case CDROMGDRVSPEED: 19910 case CDROMSDRVSPEED: 19911 case CDROMCDDA: 19912 case CDROMCDXA: 19913 case CDROMSUBCODE: 19914 if (!ISCD(un)) { 19915 un->un_ncmds_in_driver--; 19916 ASSERT(un->un_ncmds_in_driver >= 0); 19917 mutex_exit(SD_MUTEX(un)); 19918 return (ENOTTY); 19919 } 19920 break; 19921 case FDEJECT: 19922 case DKIOCEJECT: 19923 case CDROMEJECT: 19924 if (!un->un_f_eject_media_supported) { 19925 un->un_ncmds_in_driver--; 19926 ASSERT(un->un_ncmds_in_driver >= 0); 19927 mutex_exit(SD_MUTEX(un)); 19928 return (ENOTTY); 19929 } 19930 break; 19931 case DKIOCFLUSHWRITECACHE: 19932 mutex_exit(SD_MUTEX(un)); 19933 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19934 if (err != 0) { 19935 mutex_enter(SD_MUTEX(un)); 19936 un->un_ncmds_in_driver--; 19937 ASSERT(un->un_ncmds_in_driver >= 0); 19938 mutex_exit(SD_MUTEX(un)); 19939 return (EIO); 19940 } 19941 mutex_enter(SD_MUTEX(un)); 19942 /* FALLTHROUGH */ 19943 case DKIOCREMOVABLE: 19944 case DKIOCHOTPLUGGABLE: 19945 case DKIOCINFO: 19946 case DKIOCGMEDIAINFO: 19947 case MHIOCENFAILFAST: 19948 case MHIOCSTATUS: 19949 case MHIOCTKOWN: 19950 case MHIOCRELEASE: 19951 case MHIOCGRP_INKEYS: 19952 case MHIOCGRP_INRESV: 19953 case MHIOCGRP_REGISTER: 19954 case MHIOCGRP_RESERVE: 19955 case MHIOCGRP_PREEMPTANDABORT: 19956 case MHIOCGRP_REGISTERANDIGNOREKEY: 19957 case CDROMCLOSETRAY: 19958 case USCSICMD: 19959 goto skip_ready_valid; 19960 default: 19961 break; 19962 } 19963 19964 mutex_exit(SD_MUTEX(un)); 19965 err = sd_ready_and_valid(un); 19966 mutex_enter(SD_MUTEX(un)); 19967 19968 if (err != SD_READY_VALID) { 19969 switch (cmd) { 19970 case DKIOCSTATE: 19971 case CDROMGDRVSPEED: 19972 case CDROMSDRVSPEED: 19973 case FDEJECT: /* for eject command */ 19974 case DKIOCEJECT: 19975 case CDROMEJECT: 19976 case DKIOCREMOVABLE: 19977 case DKIOCHOTPLUGGABLE: 19978 break; 19979 default: 19980 if (un->un_f_has_removable_media) { 19981 err = ENXIO; 19982 } else { 19983 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19984 if (err == SD_RESERVED_BY_OTHERS) { 19985 err = EACCES; 19986 } else { 19987 err = EIO; 19988 } 19989 } 19990 un->un_ncmds_in_driver--; 19991 ASSERT(un->un_ncmds_in_driver >= 0); 19992 mutex_exit(SD_MUTEX(un)); 19993 return (err); 19994 } 19995 } 19996 } 19997 19998 skip_ready_valid: 19999 mutex_exit(SD_MUTEX(un)); 20000 20001 switch (cmd) { 20002 case DKIOCINFO: 20003 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20004 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20005 break; 20006 20007 case DKIOCGMEDIAINFO: 20008 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20009 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20010 break; 20011 20012 case DKIOCGGEOM: 20013 case DKIOCGVTOC: 20014 case DKIOCGAPART: 20015 case DKIOCPARTINFO: 20016 case DKIOCSGEOM: 20017 case DKIOCSAPART: 20018 case DKIOCGETEFI: 20019 case DKIOCPARTITION: 20020 case DKIOCSVTOC: 20021 case DKIOCSETEFI: 20022 case DKIOCGMBOOT: 20023 case DKIOCSMBOOT: 20024 case DKIOCG_PHYGEOM: 20025 case DKIOCG_VIRTGEOM: 20026 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20027 20028 /* TUR should spin up */ 20029 20030 if (un->un_f_has_removable_media) 20031 err = sd_send_scsi_TEST_UNIT_READY(un, 20032 SD_CHECK_FOR_MEDIA); 20033 else 20034 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20035 20036 if (err != 0) 20037 break; 20038 20039 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20040 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20041 20042 if ((err == 0) && 20043 ((cmd == DKIOCSETEFI) || 20044 (un->un_f_pkstats_enabled) && 20045 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20046 20047 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20048 (void *)SD_PATH_DIRECT); 20049 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20050 sd_set_pstats(un); 20051 SD_TRACE(SD_LOG_IO_PARTITION, un, 20052 "sd_ioctl: un:0x%p pstats created and " 20053 "set\n", un); 20054 } 20055 } 20056 20057 if ((cmd == DKIOCSVTOC) || 20058 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20059 20060 mutex_enter(SD_MUTEX(un)); 20061 if (un->un_f_devid_supported && 20062 (un->un_f_opt_fab_devid == TRUE)) { 20063 if (un->un_devid == NULL) { 20064 sd_register_devid(un, SD_DEVINFO(un), 20065 SD_TARGET_IS_UNRESERVED); 20066 } else { 20067 /* 20068 * The device id for this disk 20069 * has been fabricated. The 20070 * device id must be preserved 20071 * by writing it back out to 20072 * disk. 20073 */ 20074 if (sd_write_deviceid(un) != 0) { 20075 ddi_devid_free(un->un_devid); 20076 un->un_devid = NULL; 20077 } 20078 } 20079 } 20080 mutex_exit(SD_MUTEX(un)); 20081 } 20082 20083 break; 20084 20085 case DKIOCLOCK: 20086 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20087 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20088 SD_PATH_STANDARD); 20089 break; 20090 20091 case DKIOCUNLOCK: 20092 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20093 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20094 SD_PATH_STANDARD); 20095 break; 20096 20097 case DKIOCSTATE: { 20098 enum dkio_state state; 20099 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20100 20101 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20102 err = EFAULT; 20103 } else { 20104 err = sd_check_media(dev, state); 20105 if (err == 0) { 20106 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20107 sizeof (int), flag) != 0) 20108 err = EFAULT; 20109 } 20110 } 20111 break; 20112 } 20113 20114 case DKIOCREMOVABLE: 20115 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20116 i = un->un_f_has_removable_media ? 1 : 0; 20117 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20118 err = EFAULT; 20119 } else { 20120 err = 0; 20121 } 20122 break; 20123 20124 case DKIOCHOTPLUGGABLE: 20125 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20126 i = un->un_f_is_hotpluggable ? 1 : 0; 20127 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20128 err = EFAULT; 20129 } else { 20130 err = 0; 20131 } 20132 break; 20133 20134 case DKIOCGTEMPERATURE: 20135 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20136 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20137 break; 20138 20139 case MHIOCENFAILFAST: 20140 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20141 if ((err = drv_priv(cred_p)) == 0) { 20142 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20143 } 20144 break; 20145 20146 case MHIOCTKOWN: 20147 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20148 if ((err = drv_priv(cred_p)) == 0) { 20149 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20150 } 20151 break; 20152 20153 case MHIOCRELEASE: 20154 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20155 if ((err = drv_priv(cred_p)) == 0) { 20156 err = sd_mhdioc_release(dev); 20157 } 20158 break; 20159 20160 case MHIOCSTATUS: 20161 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20162 if ((err = drv_priv(cred_p)) == 0) { 20163 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20164 case 0: 20165 err = 0; 20166 break; 20167 case EACCES: 20168 *rval_p = 1; 20169 err = 0; 20170 break; 20171 default: 20172 err = EIO; 20173 break; 20174 } 20175 } 20176 break; 20177 20178 case MHIOCQRESERVE: 20179 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20180 if ((err = drv_priv(cred_p)) == 0) { 20181 err = sd_reserve_release(dev, SD_RESERVE); 20182 } 20183 break; 20184 20185 case MHIOCREREGISTERDEVID: 20186 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20187 if (drv_priv(cred_p) == EPERM) { 20188 err = EPERM; 20189 } else if (!un->un_f_devid_supported) { 20190 err = ENOTTY; 20191 } else { 20192 err = sd_mhdioc_register_devid(dev); 20193 } 20194 break; 20195 20196 case MHIOCGRP_INKEYS: 20197 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20198 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20199 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20200 err = ENOTSUP; 20201 } else { 20202 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20203 flag); 20204 } 20205 } 20206 break; 20207 20208 case MHIOCGRP_INRESV: 20209 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20210 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20211 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20212 err = ENOTSUP; 20213 } else { 20214 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20215 } 20216 } 20217 break; 20218 20219 case MHIOCGRP_REGISTER: 20220 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20221 if ((err = drv_priv(cred_p)) != EPERM) { 20222 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20223 err = ENOTSUP; 20224 } else if (arg != NULL) { 20225 mhioc_register_t reg; 20226 if (ddi_copyin((void *)arg, ®, 20227 sizeof (mhioc_register_t), flag) != 0) { 20228 err = EFAULT; 20229 } else { 20230 err = 20231 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20232 un, SD_SCSI3_REGISTER, 20233 (uchar_t *)®); 20234 } 20235 } 20236 } 20237 break; 20238 20239 case MHIOCGRP_RESERVE: 20240 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20241 if ((err = drv_priv(cred_p)) != EPERM) { 20242 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20243 err = ENOTSUP; 20244 } else if (arg != NULL) { 20245 mhioc_resv_desc_t resv_desc; 20246 if (ddi_copyin((void *)arg, &resv_desc, 20247 sizeof (mhioc_resv_desc_t), flag) != 0) { 20248 err = EFAULT; 20249 } else { 20250 err = 20251 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20252 un, SD_SCSI3_RESERVE, 20253 (uchar_t *)&resv_desc); 20254 } 20255 } 20256 } 20257 break; 20258 20259 case MHIOCGRP_PREEMPTANDABORT: 20260 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20261 if ((err = drv_priv(cred_p)) != EPERM) { 20262 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20263 err = ENOTSUP; 20264 } else if (arg != NULL) { 20265 mhioc_preemptandabort_t preempt_abort; 20266 if (ddi_copyin((void *)arg, &preempt_abort, 20267 sizeof (mhioc_preemptandabort_t), 20268 flag) != 0) { 20269 err = EFAULT; 20270 } else { 20271 err = 20272 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20273 un, SD_SCSI3_PREEMPTANDABORT, 20274 (uchar_t *)&preempt_abort); 20275 } 20276 } 20277 } 20278 break; 20279 20280 case MHIOCGRP_REGISTERANDIGNOREKEY: 20281 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20282 if ((err = drv_priv(cred_p)) != EPERM) { 20283 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20284 err = ENOTSUP; 20285 } else if (arg != NULL) { 20286 mhioc_registerandignorekey_t r_and_i; 20287 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20288 sizeof (mhioc_registerandignorekey_t), 20289 flag) != 0) { 20290 err = EFAULT; 20291 } else { 20292 err = 20293 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20294 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20295 (uchar_t *)&r_and_i); 20296 } 20297 } 20298 } 20299 break; 20300 20301 case USCSICMD: 20302 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20303 cr = ddi_get_cred(); 20304 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20305 err = EPERM; 20306 } else { 20307 enum uio_seg uioseg; 20308 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20309 UIO_USERSPACE; 20310 if (un->un_f_format_in_progress == TRUE) { 20311 err = EAGAIN; 20312 break; 20313 } 20314 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20315 flag, uioseg, SD_PATH_STANDARD); 20316 } 20317 break; 20318 20319 case CDROMPAUSE: 20320 case CDROMRESUME: 20321 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20322 if (!ISCD(un)) { 20323 err = ENOTTY; 20324 } else { 20325 err = sr_pause_resume(dev, cmd); 20326 } 20327 break; 20328 20329 case CDROMPLAYMSF: 20330 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20331 if (!ISCD(un)) { 20332 err = ENOTTY; 20333 } else { 20334 err = sr_play_msf(dev, (caddr_t)arg, flag); 20335 } 20336 break; 20337 20338 case CDROMPLAYTRKIND: 20339 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20340 #if defined(__i386) || defined(__amd64) 20341 /* 20342 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20343 */ 20344 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20345 #else 20346 if (!ISCD(un)) { 20347 #endif 20348 err = ENOTTY; 20349 } else { 20350 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20351 } 20352 break; 20353 20354 case CDROMREADTOCHDR: 20355 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20356 if (!ISCD(un)) { 20357 err = ENOTTY; 20358 } else { 20359 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20360 } 20361 break; 20362 20363 case CDROMREADTOCENTRY: 20364 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20365 if (!ISCD(un)) { 20366 err = ENOTTY; 20367 } else { 20368 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20369 } 20370 break; 20371 20372 case CDROMSTOP: 20373 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20374 if (!ISCD(un)) { 20375 err = ENOTTY; 20376 } else { 20377 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20378 SD_PATH_STANDARD); 20379 } 20380 break; 20381 20382 case CDROMSTART: 20383 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20384 if (!ISCD(un)) { 20385 err = ENOTTY; 20386 } else { 20387 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20388 SD_PATH_STANDARD); 20389 } 20390 break; 20391 20392 case CDROMCLOSETRAY: 20393 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20394 if (!ISCD(un)) { 20395 err = ENOTTY; 20396 } else { 20397 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20398 SD_PATH_STANDARD); 20399 } 20400 break; 20401 20402 case FDEJECT: /* for eject command */ 20403 case DKIOCEJECT: 20404 case CDROMEJECT: 20405 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20406 if (!un->un_f_eject_media_supported) { 20407 err = ENOTTY; 20408 } else { 20409 err = sr_eject(dev); 20410 } 20411 break; 20412 20413 case CDROMVOLCTRL: 20414 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20415 if (!ISCD(un)) { 20416 err = ENOTTY; 20417 } else { 20418 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20419 } 20420 break; 20421 20422 case CDROMSUBCHNL: 20423 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20424 if (!ISCD(un)) { 20425 err = ENOTTY; 20426 } else { 20427 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20428 } 20429 break; 20430 20431 case CDROMREADMODE2: 20432 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20433 if (!ISCD(un)) { 20434 err = ENOTTY; 20435 } else if (un->un_f_cfg_is_atapi == TRUE) { 20436 /* 20437 * If the drive supports READ CD, use that instead of 20438 * switching the LBA size via a MODE SELECT 20439 * Block Descriptor 20440 */ 20441 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20442 } else { 20443 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20444 } 20445 break; 20446 20447 case CDROMREADMODE1: 20448 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20449 if (!ISCD(un)) { 20450 err = ENOTTY; 20451 } else { 20452 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20453 } 20454 break; 20455 20456 case CDROMREADOFFSET: 20457 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20458 if (!ISCD(un)) { 20459 err = ENOTTY; 20460 } else { 20461 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20462 flag); 20463 } 20464 break; 20465 20466 case CDROMSBLKMODE: 20467 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20468 /* 20469 * There is no means of changing block size in case of atapi 20470 * drives, thus return ENOTTY if drive type is atapi 20471 */ 20472 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20473 err = ENOTTY; 20474 } else if (un->un_f_mmc_cap == TRUE) { 20475 20476 /* 20477 * MMC Devices do not support changing the 20478 * logical block size 20479 * 20480 * Note: EINVAL is being returned instead of ENOTTY to 20481 * maintain consistancy with the original mmc 20482 * driver update. 20483 */ 20484 err = EINVAL; 20485 } else { 20486 mutex_enter(SD_MUTEX(un)); 20487 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20488 (un->un_ncmds_in_transport > 0)) { 20489 mutex_exit(SD_MUTEX(un)); 20490 err = EINVAL; 20491 } else { 20492 mutex_exit(SD_MUTEX(un)); 20493 err = sr_change_blkmode(dev, cmd, arg, flag); 20494 } 20495 } 20496 break; 20497 20498 case CDROMGBLKMODE: 20499 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20500 if (!ISCD(un)) { 20501 err = ENOTTY; 20502 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20503 (un->un_f_blockcount_is_valid != FALSE)) { 20504 /* 20505 * Drive is an ATAPI drive so return target block 20506 * size for ATAPI drives since we cannot change the 20507 * blocksize on ATAPI drives. Used primarily to detect 20508 * if an ATAPI cdrom is present. 20509 */ 20510 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20511 sizeof (int), flag) != 0) { 20512 err = EFAULT; 20513 } else { 20514 err = 0; 20515 } 20516 20517 } else { 20518 /* 20519 * Drive supports changing block sizes via a Mode 20520 * Select. 20521 */ 20522 err = sr_change_blkmode(dev, cmd, arg, flag); 20523 } 20524 break; 20525 20526 case CDROMGDRVSPEED: 20527 case CDROMSDRVSPEED: 20528 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20529 if (!ISCD(un)) { 20530 err = ENOTTY; 20531 } else if (un->un_f_mmc_cap == TRUE) { 20532 /* 20533 * Note: In the future the driver implementation 20534 * for getting and 20535 * setting cd speed should entail: 20536 * 1) If non-mmc try the Toshiba mode page 20537 * (sr_change_speed) 20538 * 2) If mmc but no support for Real Time Streaming try 20539 * the SET CD SPEED (0xBB) command 20540 * (sr_atapi_change_speed) 20541 * 3) If mmc and support for Real Time Streaming 20542 * try the GET PERFORMANCE and SET STREAMING 20543 * commands (not yet implemented, 4380808) 20544 */ 20545 /* 20546 * As per recent MMC spec, CD-ROM speed is variable 20547 * and changes with LBA. Since there is no such 20548 * things as drive speed now, fail this ioctl. 20549 * 20550 * Note: EINVAL is returned for consistancy of original 20551 * implementation which included support for getting 20552 * the drive speed of mmc devices but not setting 20553 * the drive speed. Thus EINVAL would be returned 20554 * if a set request was made for an mmc device. 20555 * We no longer support get or set speed for 20556 * mmc but need to remain consistent with regard 20557 * to the error code returned. 20558 */ 20559 err = EINVAL; 20560 } else if (un->un_f_cfg_is_atapi == TRUE) { 20561 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20562 } else { 20563 err = sr_change_speed(dev, cmd, arg, flag); 20564 } 20565 break; 20566 20567 case CDROMCDDA: 20568 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20569 if (!ISCD(un)) { 20570 err = ENOTTY; 20571 } else { 20572 err = sr_read_cdda(dev, (void *)arg, flag); 20573 } 20574 break; 20575 20576 case CDROMCDXA: 20577 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20578 if (!ISCD(un)) { 20579 err = ENOTTY; 20580 } else { 20581 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20582 } 20583 break; 20584 20585 case CDROMSUBCODE: 20586 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20587 if (!ISCD(un)) { 20588 err = ENOTTY; 20589 } else { 20590 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20591 } 20592 break; 20593 20594 20595 #ifdef SDDEBUG 20596 /* RESET/ABORTS testing ioctls */ 20597 case DKIOCRESET: { 20598 int reset_level; 20599 20600 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20601 err = EFAULT; 20602 } else { 20603 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20604 "reset_level = 0x%lx\n", reset_level); 20605 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20606 err = 0; 20607 } else { 20608 err = EIO; 20609 } 20610 } 20611 break; 20612 } 20613 20614 case DKIOCABORT: 20615 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20616 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20617 err = 0; 20618 } else { 20619 err = EIO; 20620 } 20621 break; 20622 #endif 20623 20624 #ifdef SD_FAULT_INJECTION 20625 /* SDIOC FaultInjection testing ioctls */ 20626 case SDIOCSTART: 20627 case SDIOCSTOP: 20628 case SDIOCINSERTPKT: 20629 case SDIOCINSERTXB: 20630 case SDIOCINSERTUN: 20631 case SDIOCINSERTARQ: 20632 case SDIOCPUSH: 20633 case SDIOCRETRIEVE: 20634 case SDIOCRUN: 20635 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20636 "SDIOC detected cmd:0x%X:\n", cmd); 20637 /* call error generator */ 20638 sd_faultinjection_ioctl(cmd, arg, un); 20639 err = 0; 20640 break; 20641 20642 #endif /* SD_FAULT_INJECTION */ 20643 20644 case DKIOCFLUSHWRITECACHE: 20645 { 20646 struct dk_callback *dkc = (struct dk_callback *)arg; 20647 20648 mutex_enter(SD_MUTEX(un)); 20649 if (!un->un_f_sync_cache_supported || 20650 !un->un_f_write_cache_enabled) { 20651 err = un->un_f_sync_cache_supported ? 20652 0 : ENOTSUP; 20653 mutex_exit(SD_MUTEX(un)); 20654 if ((flag & FKIOCTL) && dkc != NULL && 20655 dkc->dkc_callback != NULL) { 20656 (*dkc->dkc_callback)(dkc->dkc_cookie, 20657 err); 20658 /* 20659 * Did callback and reported error. 20660 * Since we did a callback, ioctl 20661 * should return 0. 20662 */ 20663 err = 0; 20664 } 20665 break; 20666 } 20667 mutex_exit(SD_MUTEX(un)); 20668 20669 if ((flag & FKIOCTL) && dkc != NULL && 20670 dkc->dkc_callback != NULL) { 20671 /* async SYNC CACHE request */ 20672 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20673 } else { 20674 /* synchronous SYNC CACHE request */ 20675 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20676 } 20677 } 20678 break; 20679 20680 case DKIOCGETWCE: { 20681 20682 int wce; 20683 20684 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20685 break; 20686 } 20687 20688 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20689 err = EFAULT; 20690 } 20691 break; 20692 } 20693 20694 case DKIOCSETWCE: { 20695 20696 int wce, sync_supported; 20697 20698 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20699 err = EFAULT; 20700 break; 20701 } 20702 20703 /* 20704 * Synchronize multiple threads trying to enable 20705 * or disable the cache via the un_f_wcc_cv 20706 * condition variable. 20707 */ 20708 mutex_enter(SD_MUTEX(un)); 20709 20710 /* 20711 * Don't allow the cache to be enabled if the 20712 * config file has it disabled. 20713 */ 20714 if (un->un_f_opt_disable_cache && wce) { 20715 mutex_exit(SD_MUTEX(un)); 20716 err = EINVAL; 20717 break; 20718 } 20719 20720 /* 20721 * Wait for write cache change in progress 20722 * bit to be clear before proceeding. 20723 */ 20724 while (un->un_f_wcc_inprog) 20725 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20726 20727 un->un_f_wcc_inprog = 1; 20728 20729 if (un->un_f_write_cache_enabled && wce == 0) { 20730 /* 20731 * Disable the write cache. Don't clear 20732 * un_f_write_cache_enabled until after 20733 * the mode select and flush are complete. 20734 */ 20735 sync_supported = un->un_f_sync_cache_supported; 20736 20737 /* 20738 * If cache flush is suppressed, we assume that the 20739 * controller firmware will take care of managing the 20740 * write cache for us: no need to explicitly 20741 * disable it. 20742 */ 20743 if (!un->un_f_suppress_cache_flush) { 20744 mutex_exit(SD_MUTEX(un)); 20745 if ((err = sd_cache_control(un, 20746 SD_CACHE_NOCHANGE, 20747 SD_CACHE_DISABLE)) == 0 && 20748 sync_supported) { 20749 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20750 NULL); 20751 } 20752 } else { 20753 mutex_exit(SD_MUTEX(un)); 20754 } 20755 20756 mutex_enter(SD_MUTEX(un)); 20757 if (err == 0) { 20758 un->un_f_write_cache_enabled = 0; 20759 } 20760 20761 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20762 /* 20763 * Set un_f_write_cache_enabled first, so there is 20764 * no window where the cache is enabled, but the 20765 * bit says it isn't. 20766 */ 20767 un->un_f_write_cache_enabled = 1; 20768 20769 /* 20770 * If cache flush is suppressed, we assume that the 20771 * controller firmware will take care of managing the 20772 * write cache for us: no need to explicitly 20773 * enable it. 20774 */ 20775 if (!un->un_f_suppress_cache_flush) { 20776 mutex_exit(SD_MUTEX(un)); 20777 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20778 SD_CACHE_ENABLE); 20779 } else { 20780 mutex_exit(SD_MUTEX(un)); 20781 } 20782 20783 mutex_enter(SD_MUTEX(un)); 20784 20785 if (err) { 20786 un->un_f_write_cache_enabled = 0; 20787 } 20788 } 20789 20790 un->un_f_wcc_inprog = 0; 20791 cv_broadcast(&un->un_wcc_cv); 20792 mutex_exit(SD_MUTEX(un)); 20793 break; 20794 } 20795 20796 default: 20797 err = ENOTTY; 20798 break; 20799 } 20800 mutex_enter(SD_MUTEX(un)); 20801 un->un_ncmds_in_driver--; 20802 ASSERT(un->un_ncmds_in_driver >= 0); 20803 mutex_exit(SD_MUTEX(un)); 20804 20805 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20806 return (err); 20807 } 20808 20809 20810 /* 20811 * Function: sd_dkio_ctrl_info 20812 * 20813 * Description: This routine is the driver entry point for handling controller 20814 * information ioctl requests (DKIOCINFO). 20815 * 20816 * Arguments: dev - the device number 20817 * arg - pointer to user provided dk_cinfo structure 20818 * specifying the controller type and attributes. 20819 * flag - this argument is a pass through to ddi_copyxxx() 20820 * directly from the mode argument of ioctl(). 20821 * 20822 * Return Code: 0 20823 * EFAULT 20824 * ENXIO 20825 */ 20826 20827 static int 20828 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20829 { 20830 struct sd_lun *un = NULL; 20831 struct dk_cinfo *info; 20832 dev_info_t *pdip; 20833 int lun, tgt; 20834 20835 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20836 return (ENXIO); 20837 } 20838 20839 info = (struct dk_cinfo *) 20840 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20841 20842 switch (un->un_ctype) { 20843 case CTYPE_CDROM: 20844 info->dki_ctype = DKC_CDROM; 20845 break; 20846 default: 20847 info->dki_ctype = DKC_SCSI_CCS; 20848 break; 20849 } 20850 pdip = ddi_get_parent(SD_DEVINFO(un)); 20851 info->dki_cnum = ddi_get_instance(pdip); 20852 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20853 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20854 } else { 20855 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20856 DK_DEVLEN - 1); 20857 } 20858 20859 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20860 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20861 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20862 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20863 20864 /* Unit Information */ 20865 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20866 info->dki_slave = ((tgt << 3) | lun); 20867 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20868 DK_DEVLEN - 1); 20869 info->dki_flags = DKI_FMTVOL; 20870 info->dki_partition = SDPART(dev); 20871 20872 /* Max Transfer size of this device in blocks */ 20873 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20874 info->dki_addr = 0; 20875 info->dki_space = 0; 20876 info->dki_prio = 0; 20877 info->dki_vec = 0; 20878 20879 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20880 kmem_free(info, sizeof (struct dk_cinfo)); 20881 return (EFAULT); 20882 } else { 20883 kmem_free(info, sizeof (struct dk_cinfo)); 20884 return (0); 20885 } 20886 } 20887 20888 20889 /* 20890 * Function: sd_get_media_info 20891 * 20892 * Description: This routine is the driver entry point for handling ioctl 20893 * requests for the media type or command set profile used by the 20894 * drive to operate on the media (DKIOCGMEDIAINFO). 20895 * 20896 * Arguments: dev - the device number 20897 * arg - pointer to user provided dk_minfo structure 20898 * specifying the media type, logical block size and 20899 * drive capacity. 20900 * flag - this argument is a pass through to ddi_copyxxx() 20901 * directly from the mode argument of ioctl(). 20902 * 20903 * Return Code: 0 20904 * EACCESS 20905 * EFAULT 20906 * ENXIO 20907 * EIO 20908 */ 20909 20910 static int 20911 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20912 { 20913 struct sd_lun *un = NULL; 20914 struct uscsi_cmd com; 20915 struct scsi_inquiry *sinq; 20916 struct dk_minfo media_info; 20917 u_longlong_t media_capacity; 20918 uint64_t capacity; 20919 uint_t lbasize; 20920 uchar_t *out_data; 20921 uchar_t *rqbuf; 20922 int rval = 0; 20923 int rtn; 20924 20925 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20926 (un->un_state == SD_STATE_OFFLINE)) { 20927 return (ENXIO); 20928 } 20929 20930 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20931 20932 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20933 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20934 20935 /* Issue a TUR to determine if the drive is ready with media present */ 20936 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20937 if (rval == ENXIO) { 20938 goto done; 20939 } 20940 20941 /* Now get configuration data */ 20942 if (ISCD(un)) { 20943 media_info.dki_media_type = DK_CDROM; 20944 20945 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20946 if (un->un_f_mmc_cap == TRUE) { 20947 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20948 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20949 SD_PATH_STANDARD); 20950 20951 if (rtn) { 20952 /* 20953 * Failed for other than an illegal request 20954 * or command not supported 20955 */ 20956 if ((com.uscsi_status == STATUS_CHECK) && 20957 (com.uscsi_rqstatus == STATUS_GOOD)) { 20958 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20959 (rqbuf[12] != 0x20)) { 20960 rval = EIO; 20961 goto done; 20962 } 20963 } 20964 } else { 20965 /* 20966 * The GET CONFIGURATION command succeeded 20967 * so set the media type according to the 20968 * returned data 20969 */ 20970 media_info.dki_media_type = out_data[6]; 20971 media_info.dki_media_type <<= 8; 20972 media_info.dki_media_type |= out_data[7]; 20973 } 20974 } 20975 } else { 20976 /* 20977 * The profile list is not available, so we attempt to identify 20978 * the media type based on the inquiry data 20979 */ 20980 sinq = un->un_sd->sd_inq; 20981 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20982 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20983 /* This is a direct access device or optical disk */ 20984 media_info.dki_media_type = DK_FIXED_DISK; 20985 20986 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20987 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20988 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20989 media_info.dki_media_type = DK_ZIP; 20990 } else if ( 20991 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20992 media_info.dki_media_type = DK_JAZ; 20993 } 20994 } 20995 } else { 20996 /* 20997 * Not a CD, direct access or optical disk so return 20998 * unknown media 20999 */ 21000 media_info.dki_media_type = DK_UNKNOWN; 21001 } 21002 } 21003 21004 /* Now read the capacity so we can provide the lbasize and capacity */ 21005 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21006 SD_PATH_DIRECT)) { 21007 case 0: 21008 break; 21009 case EACCES: 21010 rval = EACCES; 21011 goto done; 21012 default: 21013 rval = EIO; 21014 goto done; 21015 } 21016 21017 /* 21018 * If lun is expanded dynamically, update the un structure. 21019 */ 21020 mutex_enter(SD_MUTEX(un)); 21021 if ((un->un_f_blockcount_is_valid == TRUE) && 21022 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21023 (capacity > un->un_blockcount)) { 21024 sd_update_block_info(un, lbasize, capacity); 21025 } 21026 mutex_exit(SD_MUTEX(un)); 21027 21028 media_info.dki_lbsize = lbasize; 21029 media_capacity = capacity; 21030 21031 /* 21032 * sd_send_scsi_READ_CAPACITY() reports capacity in 21033 * un->un_sys_blocksize chunks. So we need to convert it into 21034 * cap.lbasize chunks. 21035 */ 21036 media_capacity *= un->un_sys_blocksize; 21037 media_capacity /= lbasize; 21038 media_info.dki_capacity = media_capacity; 21039 21040 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21041 rval = EFAULT; 21042 /* Put goto. Anybody might add some code below in future */ 21043 goto done; 21044 } 21045 done: 21046 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21047 kmem_free(rqbuf, SENSE_LENGTH); 21048 return (rval); 21049 } 21050 21051 21052 /* 21053 * Function: sd_check_media 21054 * 21055 * Description: This utility routine implements the functionality for the 21056 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21057 * driver state changes from that specified by the user 21058 * (inserted or ejected). For example, if the user specifies 21059 * DKIO_EJECTED and the current media state is inserted this 21060 * routine will immediately return DKIO_INSERTED. However, if the 21061 * current media state is not inserted the user thread will be 21062 * blocked until the drive state changes. If DKIO_NONE is specified 21063 * the user thread will block until a drive state change occurs. 21064 * 21065 * Arguments: dev - the device number 21066 * state - user pointer to a dkio_state, updated with the current 21067 * drive state at return. 21068 * 21069 * Return Code: ENXIO 21070 * EIO 21071 * EAGAIN 21072 * EINTR 21073 */ 21074 21075 static int 21076 sd_check_media(dev_t dev, enum dkio_state state) 21077 { 21078 struct sd_lun *un = NULL; 21079 enum dkio_state prev_state; 21080 opaque_t token = NULL; 21081 int rval = 0; 21082 21083 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21084 return (ENXIO); 21085 } 21086 21087 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21088 21089 mutex_enter(SD_MUTEX(un)); 21090 21091 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21092 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21093 21094 prev_state = un->un_mediastate; 21095 21096 /* is there anything to do? */ 21097 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21098 /* 21099 * submit the request to the scsi_watch service; 21100 * scsi_media_watch_cb() does the real work 21101 */ 21102 mutex_exit(SD_MUTEX(un)); 21103 21104 /* 21105 * This change handles the case where a scsi watch request is 21106 * added to a device that is powered down. To accomplish this 21107 * we power up the device before adding the scsi watch request, 21108 * since the scsi watch sends a TUR directly to the device 21109 * which the device cannot handle if it is powered down. 21110 */ 21111 if (sd_pm_entry(un) != DDI_SUCCESS) { 21112 mutex_enter(SD_MUTEX(un)); 21113 goto done; 21114 } 21115 21116 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21117 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21118 (caddr_t)dev); 21119 21120 sd_pm_exit(un); 21121 21122 mutex_enter(SD_MUTEX(un)); 21123 if (token == NULL) { 21124 rval = EAGAIN; 21125 goto done; 21126 } 21127 21128 /* 21129 * This is a special case IOCTL that doesn't return 21130 * until the media state changes. Routine sdpower 21131 * knows about and handles this so don't count it 21132 * as an active cmd in the driver, which would 21133 * keep the device busy to the pm framework. 21134 * If the count isn't decremented the device can't 21135 * be powered down. 21136 */ 21137 un->un_ncmds_in_driver--; 21138 ASSERT(un->un_ncmds_in_driver >= 0); 21139 21140 /* 21141 * if a prior request had been made, this will be the same 21142 * token, as scsi_watch was designed that way. 21143 */ 21144 un->un_swr_token = token; 21145 un->un_specified_mediastate = state; 21146 21147 /* 21148 * now wait for media change 21149 * we will not be signalled unless mediastate == state but it is 21150 * still better to test for this condition, since there is a 21151 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21152 */ 21153 SD_TRACE(SD_LOG_COMMON, un, 21154 "sd_check_media: waiting for media state change\n"); 21155 while (un->un_mediastate == state) { 21156 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21157 SD_TRACE(SD_LOG_COMMON, un, 21158 "sd_check_media: waiting for media state " 21159 "was interrupted\n"); 21160 un->un_ncmds_in_driver++; 21161 rval = EINTR; 21162 goto done; 21163 } 21164 SD_TRACE(SD_LOG_COMMON, un, 21165 "sd_check_media: received signal, state=%x\n", 21166 un->un_mediastate); 21167 } 21168 /* 21169 * Inc the counter to indicate the device once again 21170 * has an active outstanding cmd. 21171 */ 21172 un->un_ncmds_in_driver++; 21173 } 21174 21175 /* invalidate geometry */ 21176 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21177 sr_ejected(un); 21178 } 21179 21180 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21181 uint64_t capacity; 21182 uint_t lbasize; 21183 21184 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21185 mutex_exit(SD_MUTEX(un)); 21186 /* 21187 * Since the following routines use SD_PATH_DIRECT, we must 21188 * call PM directly before the upcoming disk accesses. This 21189 * may cause the disk to be power/spin up. 21190 */ 21191 21192 if (sd_pm_entry(un) == DDI_SUCCESS) { 21193 rval = sd_send_scsi_READ_CAPACITY(un, 21194 &capacity, 21195 &lbasize, SD_PATH_DIRECT); 21196 if (rval != 0) { 21197 sd_pm_exit(un); 21198 mutex_enter(SD_MUTEX(un)); 21199 goto done; 21200 } 21201 } else { 21202 rval = EIO; 21203 mutex_enter(SD_MUTEX(un)); 21204 goto done; 21205 } 21206 mutex_enter(SD_MUTEX(un)); 21207 21208 sd_update_block_info(un, lbasize, capacity); 21209 21210 /* 21211 * Check if the media in the device is writable or not 21212 */ 21213 if (ISCD(un)) 21214 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21215 21216 mutex_exit(SD_MUTEX(un)); 21217 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21218 if ((cmlb_validate(un->un_cmlbhandle, 0, 21219 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21220 sd_set_pstats(un); 21221 SD_TRACE(SD_LOG_IO_PARTITION, un, 21222 "sd_check_media: un:0x%p pstats created and " 21223 "set\n", un); 21224 } 21225 21226 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21227 SD_PATH_DIRECT); 21228 sd_pm_exit(un); 21229 21230 mutex_enter(SD_MUTEX(un)); 21231 } 21232 done: 21233 un->un_f_watcht_stopped = FALSE; 21234 if (un->un_swr_token) { 21235 /* 21236 * Use of this local token and the mutex ensures that we avoid 21237 * some race conditions associated with terminating the 21238 * scsi watch. 21239 */ 21240 token = un->un_swr_token; 21241 un->un_swr_token = (opaque_t)NULL; 21242 mutex_exit(SD_MUTEX(un)); 21243 (void) scsi_watch_request_terminate(token, 21244 SCSI_WATCH_TERMINATE_WAIT); 21245 mutex_enter(SD_MUTEX(un)); 21246 } 21247 21248 /* 21249 * Update the capacity kstat value, if no media previously 21250 * (capacity kstat is 0) and a media has been inserted 21251 * (un_f_blockcount_is_valid == TRUE) 21252 */ 21253 if (un->un_errstats) { 21254 struct sd_errstats *stp = NULL; 21255 21256 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21257 if ((stp->sd_capacity.value.ui64 == 0) && 21258 (un->un_f_blockcount_is_valid == TRUE)) { 21259 stp->sd_capacity.value.ui64 = 21260 (uint64_t)((uint64_t)un->un_blockcount * 21261 un->un_sys_blocksize); 21262 } 21263 } 21264 mutex_exit(SD_MUTEX(un)); 21265 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21266 return (rval); 21267 } 21268 21269 21270 /* 21271 * Function: sd_delayed_cv_broadcast 21272 * 21273 * Description: Delayed cv_broadcast to allow for target to recover from media 21274 * insertion. 21275 * 21276 * Arguments: arg - driver soft state (unit) structure 21277 */ 21278 21279 static void 21280 sd_delayed_cv_broadcast(void *arg) 21281 { 21282 struct sd_lun *un = arg; 21283 21284 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21285 21286 mutex_enter(SD_MUTEX(un)); 21287 un->un_dcvb_timeid = NULL; 21288 cv_broadcast(&un->un_state_cv); 21289 mutex_exit(SD_MUTEX(un)); 21290 } 21291 21292 21293 /* 21294 * Function: sd_media_watch_cb 21295 * 21296 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21297 * routine processes the TUR sense data and updates the driver 21298 * state if a transition has occurred. The user thread 21299 * (sd_check_media) is then signalled. 21300 * 21301 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21302 * among multiple watches that share this callback function 21303 * resultp - scsi watch facility result packet containing scsi 21304 * packet, status byte and sense data 21305 * 21306 * Return Code: 0 for success, -1 for failure 21307 */ 21308 21309 static int 21310 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21311 { 21312 struct sd_lun *un; 21313 struct scsi_status *statusp = resultp->statusp; 21314 uint8_t *sensep = (uint8_t *)resultp->sensep; 21315 enum dkio_state state = DKIO_NONE; 21316 dev_t dev = (dev_t)arg; 21317 uchar_t actual_sense_length; 21318 uint8_t skey, asc, ascq; 21319 21320 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21321 return (-1); 21322 } 21323 actual_sense_length = resultp->actual_sense_length; 21324 21325 mutex_enter(SD_MUTEX(un)); 21326 SD_TRACE(SD_LOG_COMMON, un, 21327 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21328 *((char *)statusp), (void *)sensep, actual_sense_length); 21329 21330 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21331 un->un_mediastate = DKIO_DEV_GONE; 21332 cv_broadcast(&un->un_state_cv); 21333 mutex_exit(SD_MUTEX(un)); 21334 21335 return (0); 21336 } 21337 21338 /* 21339 * If there was a check condition then sensep points to valid sense data 21340 * If status was not a check condition but a reservation or busy status 21341 * then the new state is DKIO_NONE 21342 */ 21343 if (sensep != NULL) { 21344 skey = scsi_sense_key(sensep); 21345 asc = scsi_sense_asc(sensep); 21346 ascq = scsi_sense_ascq(sensep); 21347 21348 SD_INFO(SD_LOG_COMMON, un, 21349 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21350 skey, asc, ascq); 21351 /* This routine only uses up to 13 bytes of sense data. */ 21352 if (actual_sense_length >= 13) { 21353 if (skey == KEY_UNIT_ATTENTION) { 21354 if (asc == 0x28) { 21355 state = DKIO_INSERTED; 21356 } 21357 } else if (skey == KEY_NOT_READY) { 21358 /* 21359 * if 02/04/02 means that the host 21360 * should send start command. Explicitly 21361 * leave the media state as is 21362 * (inserted) as the media is inserted 21363 * and host has stopped device for PM 21364 * reasons. Upon next true read/write 21365 * to this media will bring the 21366 * device to the right state good for 21367 * media access. 21368 */ 21369 if (asc == 0x3a) { 21370 state = DKIO_EJECTED; 21371 } else { 21372 /* 21373 * If the drive is busy with an 21374 * operation or long write, keep the 21375 * media in an inserted state. 21376 */ 21377 21378 if ((asc == 0x04) && 21379 ((ascq == 0x02) || 21380 (ascq == 0x07) || 21381 (ascq == 0x08))) { 21382 state = DKIO_INSERTED; 21383 } 21384 } 21385 } else if (skey == KEY_NO_SENSE) { 21386 if ((asc == 0x00) && (ascq == 0x00)) { 21387 /* 21388 * Sense Data 00/00/00 does not provide 21389 * any information about the state of 21390 * the media. Ignore it. 21391 */ 21392 mutex_exit(SD_MUTEX(un)); 21393 return (0); 21394 } 21395 } 21396 } 21397 } else if ((*((char *)statusp) == STATUS_GOOD) && 21398 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21399 state = DKIO_INSERTED; 21400 } 21401 21402 SD_TRACE(SD_LOG_COMMON, un, 21403 "sd_media_watch_cb: state=%x, specified=%x\n", 21404 state, un->un_specified_mediastate); 21405 21406 /* 21407 * now signal the waiting thread if this is *not* the specified state; 21408 * delay the signal if the state is DKIO_INSERTED to allow the target 21409 * to recover 21410 */ 21411 if (state != un->un_specified_mediastate) { 21412 un->un_mediastate = state; 21413 if (state == DKIO_INSERTED) { 21414 /* 21415 * delay the signal to give the drive a chance 21416 * to do what it apparently needs to do 21417 */ 21418 SD_TRACE(SD_LOG_COMMON, un, 21419 "sd_media_watch_cb: delayed cv_broadcast\n"); 21420 if (un->un_dcvb_timeid == NULL) { 21421 un->un_dcvb_timeid = 21422 timeout(sd_delayed_cv_broadcast, un, 21423 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21424 } 21425 } else { 21426 SD_TRACE(SD_LOG_COMMON, un, 21427 "sd_media_watch_cb: immediate cv_broadcast\n"); 21428 cv_broadcast(&un->un_state_cv); 21429 } 21430 } 21431 mutex_exit(SD_MUTEX(un)); 21432 return (0); 21433 } 21434 21435 21436 /* 21437 * Function: sd_dkio_get_temp 21438 * 21439 * Description: This routine is the driver entry point for handling ioctl 21440 * requests to get the disk temperature. 21441 * 21442 * Arguments: dev - the device number 21443 * arg - pointer to user provided dk_temperature structure. 21444 * flag - this argument is a pass through to ddi_copyxxx() 21445 * directly from the mode argument of ioctl(). 21446 * 21447 * Return Code: 0 21448 * EFAULT 21449 * ENXIO 21450 * EAGAIN 21451 */ 21452 21453 static int 21454 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21455 { 21456 struct sd_lun *un = NULL; 21457 struct dk_temperature *dktemp = NULL; 21458 uchar_t *temperature_page; 21459 int rval = 0; 21460 int path_flag = SD_PATH_STANDARD; 21461 21462 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21463 return (ENXIO); 21464 } 21465 21466 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21467 21468 /* copyin the disk temp argument to get the user flags */ 21469 if (ddi_copyin((void *)arg, dktemp, 21470 sizeof (struct dk_temperature), flag) != 0) { 21471 rval = EFAULT; 21472 goto done; 21473 } 21474 21475 /* Initialize the temperature to invalid. */ 21476 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21477 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21478 21479 /* 21480 * Note: Investigate removing the "bypass pm" semantic. 21481 * Can we just bypass PM always? 21482 */ 21483 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21484 path_flag = SD_PATH_DIRECT; 21485 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21486 mutex_enter(&un->un_pm_mutex); 21487 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21488 /* 21489 * If DKT_BYPASS_PM is set, and the drive happens to be 21490 * in low power mode, we can not wake it up, Need to 21491 * return EAGAIN. 21492 */ 21493 mutex_exit(&un->un_pm_mutex); 21494 rval = EAGAIN; 21495 goto done; 21496 } else { 21497 /* 21498 * Indicate to PM the device is busy. This is required 21499 * to avoid a race - i.e. the ioctl is issuing a 21500 * command and the pm framework brings down the device 21501 * to low power mode (possible power cut-off on some 21502 * platforms). 21503 */ 21504 mutex_exit(&un->un_pm_mutex); 21505 if (sd_pm_entry(un) != DDI_SUCCESS) { 21506 rval = EAGAIN; 21507 goto done; 21508 } 21509 } 21510 } 21511 21512 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21513 21514 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21515 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21516 goto done2; 21517 } 21518 21519 /* 21520 * For the current temperature verify that the parameter length is 0x02 21521 * and the parameter code is 0x00 21522 */ 21523 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21524 (temperature_page[5] == 0x00)) { 21525 if (temperature_page[9] == 0xFF) { 21526 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21527 } else { 21528 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21529 } 21530 } 21531 21532 /* 21533 * For the reference temperature verify that the parameter 21534 * length is 0x02 and the parameter code is 0x01 21535 */ 21536 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21537 (temperature_page[11] == 0x01)) { 21538 if (temperature_page[15] == 0xFF) { 21539 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21540 } else { 21541 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21542 } 21543 } 21544 21545 /* Do the copyout regardless of the temperature commands status. */ 21546 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21547 flag) != 0) { 21548 rval = EFAULT; 21549 } 21550 21551 done2: 21552 if (path_flag == SD_PATH_DIRECT) { 21553 sd_pm_exit(un); 21554 } 21555 21556 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21557 done: 21558 if (dktemp != NULL) { 21559 kmem_free(dktemp, sizeof (struct dk_temperature)); 21560 } 21561 21562 return (rval); 21563 } 21564 21565 21566 /* 21567 * Function: sd_log_page_supported 21568 * 21569 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21570 * supported log pages. 21571 * 21572 * Arguments: un - 21573 * log_page - 21574 * 21575 * Return Code: -1 - on error (log sense is optional and may not be supported). 21576 * 0 - log page not found. 21577 * 1 - log page found. 21578 */ 21579 21580 static int 21581 sd_log_page_supported(struct sd_lun *un, int log_page) 21582 { 21583 uchar_t *log_page_data; 21584 int i; 21585 int match = 0; 21586 int log_size; 21587 21588 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21589 21590 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21591 SD_PATH_DIRECT) != 0) { 21592 SD_ERROR(SD_LOG_COMMON, un, 21593 "sd_log_page_supported: failed log page retrieval\n"); 21594 kmem_free(log_page_data, 0xFF); 21595 return (-1); 21596 } 21597 log_size = log_page_data[3]; 21598 21599 /* 21600 * The list of supported log pages start from the fourth byte. Check 21601 * until we run out of log pages or a match is found. 21602 */ 21603 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21604 if (log_page_data[i] == log_page) { 21605 match++; 21606 } 21607 } 21608 kmem_free(log_page_data, 0xFF); 21609 return (match); 21610 } 21611 21612 21613 /* 21614 * Function: sd_mhdioc_failfast 21615 * 21616 * Description: This routine is the driver entry point for handling ioctl 21617 * requests to enable/disable the multihost failfast option. 21618 * (MHIOCENFAILFAST) 21619 * 21620 * Arguments: dev - the device number 21621 * arg - user specified probing interval. 21622 * flag - this argument is a pass through to ddi_copyxxx() 21623 * directly from the mode argument of ioctl(). 21624 * 21625 * Return Code: 0 21626 * EFAULT 21627 * ENXIO 21628 */ 21629 21630 static int 21631 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21632 { 21633 struct sd_lun *un = NULL; 21634 int mh_time; 21635 int rval = 0; 21636 21637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21638 return (ENXIO); 21639 } 21640 21641 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21642 return (EFAULT); 21643 21644 if (mh_time) { 21645 mutex_enter(SD_MUTEX(un)); 21646 un->un_resvd_status |= SD_FAILFAST; 21647 mutex_exit(SD_MUTEX(un)); 21648 /* 21649 * If mh_time is INT_MAX, then this ioctl is being used for 21650 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21651 */ 21652 if (mh_time != INT_MAX) { 21653 rval = sd_check_mhd(dev, mh_time); 21654 } 21655 } else { 21656 (void) sd_check_mhd(dev, 0); 21657 mutex_enter(SD_MUTEX(un)); 21658 un->un_resvd_status &= ~SD_FAILFAST; 21659 mutex_exit(SD_MUTEX(un)); 21660 } 21661 return (rval); 21662 } 21663 21664 21665 /* 21666 * Function: sd_mhdioc_takeown 21667 * 21668 * Description: This routine is the driver entry point for handling ioctl 21669 * requests to forcefully acquire exclusive access rights to the 21670 * multihost disk (MHIOCTKOWN). 21671 * 21672 * Arguments: dev - the device number 21673 * arg - user provided structure specifying the delay 21674 * parameters in milliseconds 21675 * flag - this argument is a pass through to ddi_copyxxx() 21676 * directly from the mode argument of ioctl(). 21677 * 21678 * Return Code: 0 21679 * EFAULT 21680 * ENXIO 21681 */ 21682 21683 static int 21684 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21685 { 21686 struct sd_lun *un = NULL; 21687 struct mhioctkown *tkown = NULL; 21688 int rval = 0; 21689 21690 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21691 return (ENXIO); 21692 } 21693 21694 if (arg != NULL) { 21695 tkown = (struct mhioctkown *) 21696 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21697 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21698 if (rval != 0) { 21699 rval = EFAULT; 21700 goto error; 21701 } 21702 } 21703 21704 rval = sd_take_ownership(dev, tkown); 21705 mutex_enter(SD_MUTEX(un)); 21706 if (rval == 0) { 21707 un->un_resvd_status |= SD_RESERVE; 21708 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21709 sd_reinstate_resv_delay = 21710 tkown->reinstate_resv_delay * 1000; 21711 } else { 21712 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21713 } 21714 /* 21715 * Give the scsi_watch routine interval set by 21716 * the MHIOCENFAILFAST ioctl precedence here. 21717 */ 21718 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21719 mutex_exit(SD_MUTEX(un)); 21720 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21721 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21722 "sd_mhdioc_takeown : %d\n", 21723 sd_reinstate_resv_delay); 21724 } else { 21725 mutex_exit(SD_MUTEX(un)); 21726 } 21727 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21728 sd_mhd_reset_notify_cb, (caddr_t)un); 21729 } else { 21730 un->un_resvd_status &= ~SD_RESERVE; 21731 mutex_exit(SD_MUTEX(un)); 21732 } 21733 21734 error: 21735 if (tkown != NULL) { 21736 kmem_free(tkown, sizeof (struct mhioctkown)); 21737 } 21738 return (rval); 21739 } 21740 21741 21742 /* 21743 * Function: sd_mhdioc_release 21744 * 21745 * Description: This routine is the driver entry point for handling ioctl 21746 * requests to release exclusive access rights to the multihost 21747 * disk (MHIOCRELEASE). 21748 * 21749 * Arguments: dev - the device number 21750 * 21751 * Return Code: 0 21752 * ENXIO 21753 */ 21754 21755 static int 21756 sd_mhdioc_release(dev_t dev) 21757 { 21758 struct sd_lun *un = NULL; 21759 timeout_id_t resvd_timeid_save; 21760 int resvd_status_save; 21761 int rval = 0; 21762 21763 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21764 return (ENXIO); 21765 } 21766 21767 mutex_enter(SD_MUTEX(un)); 21768 resvd_status_save = un->un_resvd_status; 21769 un->un_resvd_status &= 21770 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21771 if (un->un_resvd_timeid) { 21772 resvd_timeid_save = un->un_resvd_timeid; 21773 un->un_resvd_timeid = NULL; 21774 mutex_exit(SD_MUTEX(un)); 21775 (void) untimeout(resvd_timeid_save); 21776 } else { 21777 mutex_exit(SD_MUTEX(un)); 21778 } 21779 21780 /* 21781 * destroy any pending timeout thread that may be attempting to 21782 * reinstate reservation on this device. 21783 */ 21784 sd_rmv_resv_reclaim_req(dev); 21785 21786 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21787 mutex_enter(SD_MUTEX(un)); 21788 if ((un->un_mhd_token) && 21789 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21790 mutex_exit(SD_MUTEX(un)); 21791 (void) sd_check_mhd(dev, 0); 21792 } else { 21793 mutex_exit(SD_MUTEX(un)); 21794 } 21795 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21796 sd_mhd_reset_notify_cb, (caddr_t)un); 21797 } else { 21798 /* 21799 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21800 */ 21801 mutex_enter(SD_MUTEX(un)); 21802 un->un_resvd_status = resvd_status_save; 21803 mutex_exit(SD_MUTEX(un)); 21804 } 21805 return (rval); 21806 } 21807 21808 21809 /* 21810 * Function: sd_mhdioc_register_devid 21811 * 21812 * Description: This routine is the driver entry point for handling ioctl 21813 * requests to register the device id (MHIOCREREGISTERDEVID). 21814 * 21815 * Note: The implementation for this ioctl has been updated to 21816 * be consistent with the original PSARC case (1999/357) 21817 * (4375899, 4241671, 4220005) 21818 * 21819 * Arguments: dev - the device number 21820 * 21821 * Return Code: 0 21822 * ENXIO 21823 */ 21824 21825 static int 21826 sd_mhdioc_register_devid(dev_t dev) 21827 { 21828 struct sd_lun *un = NULL; 21829 int rval = 0; 21830 21831 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21832 return (ENXIO); 21833 } 21834 21835 ASSERT(!mutex_owned(SD_MUTEX(un))); 21836 21837 mutex_enter(SD_MUTEX(un)); 21838 21839 /* If a devid already exists, de-register it */ 21840 if (un->un_devid != NULL) { 21841 ddi_devid_unregister(SD_DEVINFO(un)); 21842 /* 21843 * After unregister devid, needs to free devid memory 21844 */ 21845 ddi_devid_free(un->un_devid); 21846 un->un_devid = NULL; 21847 } 21848 21849 /* Check for reservation conflict */ 21850 mutex_exit(SD_MUTEX(un)); 21851 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21852 mutex_enter(SD_MUTEX(un)); 21853 21854 switch (rval) { 21855 case 0: 21856 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21857 break; 21858 case EACCES: 21859 break; 21860 default: 21861 rval = EIO; 21862 } 21863 21864 mutex_exit(SD_MUTEX(un)); 21865 return (rval); 21866 } 21867 21868 21869 /* 21870 * Function: sd_mhdioc_inkeys 21871 * 21872 * Description: This routine is the driver entry point for handling ioctl 21873 * requests to issue the SCSI-3 Persistent In Read Keys command 21874 * to the device (MHIOCGRP_INKEYS). 21875 * 21876 * Arguments: dev - the device number 21877 * arg - user provided in_keys structure 21878 * flag - this argument is a pass through to ddi_copyxxx() 21879 * directly from the mode argument of ioctl(). 21880 * 21881 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21882 * ENXIO 21883 * EFAULT 21884 */ 21885 21886 static int 21887 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21888 { 21889 struct sd_lun *un; 21890 mhioc_inkeys_t inkeys; 21891 int rval = 0; 21892 21893 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21894 return (ENXIO); 21895 } 21896 21897 #ifdef _MULTI_DATAMODEL 21898 switch (ddi_model_convert_from(flag & FMODELS)) { 21899 case DDI_MODEL_ILP32: { 21900 struct mhioc_inkeys32 inkeys32; 21901 21902 if (ddi_copyin(arg, &inkeys32, 21903 sizeof (struct mhioc_inkeys32), flag) != 0) { 21904 return (EFAULT); 21905 } 21906 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21907 if ((rval = sd_persistent_reservation_in_read_keys(un, 21908 &inkeys, flag)) != 0) { 21909 return (rval); 21910 } 21911 inkeys32.generation = inkeys.generation; 21912 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21913 flag) != 0) { 21914 return (EFAULT); 21915 } 21916 break; 21917 } 21918 case DDI_MODEL_NONE: 21919 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21920 flag) != 0) { 21921 return (EFAULT); 21922 } 21923 if ((rval = sd_persistent_reservation_in_read_keys(un, 21924 &inkeys, flag)) != 0) { 21925 return (rval); 21926 } 21927 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21928 flag) != 0) { 21929 return (EFAULT); 21930 } 21931 break; 21932 } 21933 21934 #else /* ! _MULTI_DATAMODEL */ 21935 21936 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21937 return (EFAULT); 21938 } 21939 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21940 if (rval != 0) { 21941 return (rval); 21942 } 21943 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21944 return (EFAULT); 21945 } 21946 21947 #endif /* _MULTI_DATAMODEL */ 21948 21949 return (rval); 21950 } 21951 21952 21953 /* 21954 * Function: sd_mhdioc_inresv 21955 * 21956 * Description: This routine is the driver entry point for handling ioctl 21957 * requests to issue the SCSI-3 Persistent In Read Reservations 21958 * command to the device (MHIOCGRP_INKEYS). 21959 * 21960 * Arguments: dev - the device number 21961 * arg - user provided in_resv structure 21962 * flag - this argument is a pass through to ddi_copyxxx() 21963 * directly from the mode argument of ioctl(). 21964 * 21965 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21966 * ENXIO 21967 * EFAULT 21968 */ 21969 21970 static int 21971 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21972 { 21973 struct sd_lun *un; 21974 mhioc_inresvs_t inresvs; 21975 int rval = 0; 21976 21977 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21978 return (ENXIO); 21979 } 21980 21981 #ifdef _MULTI_DATAMODEL 21982 21983 switch (ddi_model_convert_from(flag & FMODELS)) { 21984 case DDI_MODEL_ILP32: { 21985 struct mhioc_inresvs32 inresvs32; 21986 21987 if (ddi_copyin(arg, &inresvs32, 21988 sizeof (struct mhioc_inresvs32), flag) != 0) { 21989 return (EFAULT); 21990 } 21991 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21992 if ((rval = sd_persistent_reservation_in_read_resv(un, 21993 &inresvs, flag)) != 0) { 21994 return (rval); 21995 } 21996 inresvs32.generation = inresvs.generation; 21997 if (ddi_copyout(&inresvs32, arg, 21998 sizeof (struct mhioc_inresvs32), flag) != 0) { 21999 return (EFAULT); 22000 } 22001 break; 22002 } 22003 case DDI_MODEL_NONE: 22004 if (ddi_copyin(arg, &inresvs, 22005 sizeof (mhioc_inresvs_t), flag) != 0) { 22006 return (EFAULT); 22007 } 22008 if ((rval = sd_persistent_reservation_in_read_resv(un, 22009 &inresvs, flag)) != 0) { 22010 return (rval); 22011 } 22012 if (ddi_copyout(&inresvs, arg, 22013 sizeof (mhioc_inresvs_t), flag) != 0) { 22014 return (EFAULT); 22015 } 22016 break; 22017 } 22018 22019 #else /* ! _MULTI_DATAMODEL */ 22020 22021 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22022 return (EFAULT); 22023 } 22024 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22025 if (rval != 0) { 22026 return (rval); 22027 } 22028 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22029 return (EFAULT); 22030 } 22031 22032 #endif /* ! _MULTI_DATAMODEL */ 22033 22034 return (rval); 22035 } 22036 22037 22038 /* 22039 * The following routines support the clustering functionality described below 22040 * and implement lost reservation reclaim functionality. 22041 * 22042 * Clustering 22043 * ---------- 22044 * The clustering code uses two different, independent forms of SCSI 22045 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22046 * Persistent Group Reservations. For any particular disk, it will use either 22047 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22048 * 22049 * SCSI-2 22050 * The cluster software takes ownership of a multi-hosted disk by issuing the 22051 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22052 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22053 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22054 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22055 * driver. The meaning of failfast is that if the driver (on this host) ever 22056 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22057 * it should immediately panic the host. The motivation for this ioctl is that 22058 * if this host does encounter reservation conflict, the underlying cause is 22059 * that some other host of the cluster has decided that this host is no longer 22060 * in the cluster and has seized control of the disks for itself. Since this 22061 * host is no longer in the cluster, it ought to panic itself. The 22062 * MHIOCENFAILFAST ioctl does two things: 22063 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22064 * error to panic the host 22065 * (b) it sets up a periodic timer to test whether this host still has 22066 * "access" (in that no other host has reserved the device): if the 22067 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22068 * purpose of that periodic timer is to handle scenarios where the host is 22069 * otherwise temporarily quiescent, temporarily doing no real i/o. 22070 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22071 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22072 * the device itself. 22073 * 22074 * SCSI-3 PGR 22075 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22076 * facility is supported through the shared multihost disk ioctls 22077 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22078 * MHIOCGRP_PREEMPTANDABORT) 22079 * 22080 * Reservation Reclaim: 22081 * -------------------- 22082 * To support the lost reservation reclaim operations this driver creates a 22083 * single thread to handle reinstating reservations on all devices that have 22084 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22085 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22086 * and the reservation reclaim thread loops through the requests to regain the 22087 * lost reservations. 22088 */ 22089 22090 /* 22091 * Function: sd_check_mhd() 22092 * 22093 * Description: This function sets up and submits a scsi watch request or 22094 * terminates an existing watch request. This routine is used in 22095 * support of reservation reclaim. 22096 * 22097 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22098 * among multiple watches that share the callback function 22099 * interval - the number of microseconds specifying the watch 22100 * interval for issuing TEST UNIT READY commands. If 22101 * set to 0 the watch should be terminated. If the 22102 * interval is set to 0 and if the device is required 22103 * to hold reservation while disabling failfast, the 22104 * watch is restarted with an interval of 22105 * reinstate_resv_delay. 22106 * 22107 * Return Code: 0 - Successful submit/terminate of scsi watch request 22108 * ENXIO - Indicates an invalid device was specified 22109 * EAGAIN - Unable to submit the scsi watch request 22110 */ 22111 22112 static int 22113 sd_check_mhd(dev_t dev, int interval) 22114 { 22115 struct sd_lun *un; 22116 opaque_t token; 22117 22118 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22119 return (ENXIO); 22120 } 22121 22122 /* is this a watch termination request? */ 22123 if (interval == 0) { 22124 mutex_enter(SD_MUTEX(un)); 22125 /* if there is an existing watch task then terminate it */ 22126 if (un->un_mhd_token) { 22127 token = un->un_mhd_token; 22128 un->un_mhd_token = NULL; 22129 mutex_exit(SD_MUTEX(un)); 22130 (void) scsi_watch_request_terminate(token, 22131 SCSI_WATCH_TERMINATE_WAIT); 22132 mutex_enter(SD_MUTEX(un)); 22133 } else { 22134 mutex_exit(SD_MUTEX(un)); 22135 /* 22136 * Note: If we return here we don't check for the 22137 * failfast case. This is the original legacy 22138 * implementation but perhaps we should be checking 22139 * the failfast case. 22140 */ 22141 return (0); 22142 } 22143 /* 22144 * If the device is required to hold reservation while 22145 * disabling failfast, we need to restart the scsi_watch 22146 * routine with an interval of reinstate_resv_delay. 22147 */ 22148 if (un->un_resvd_status & SD_RESERVE) { 22149 interval = sd_reinstate_resv_delay/1000; 22150 } else { 22151 /* no failfast so bail */ 22152 mutex_exit(SD_MUTEX(un)); 22153 return (0); 22154 } 22155 mutex_exit(SD_MUTEX(un)); 22156 } 22157 22158 /* 22159 * adjust minimum time interval to 1 second, 22160 * and convert from msecs to usecs 22161 */ 22162 if (interval > 0 && interval < 1000) { 22163 interval = 1000; 22164 } 22165 interval *= 1000; 22166 22167 /* 22168 * submit the request to the scsi_watch service 22169 */ 22170 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22171 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22172 if (token == NULL) { 22173 return (EAGAIN); 22174 } 22175 22176 /* 22177 * save token for termination later on 22178 */ 22179 mutex_enter(SD_MUTEX(un)); 22180 un->un_mhd_token = token; 22181 mutex_exit(SD_MUTEX(un)); 22182 return (0); 22183 } 22184 22185 22186 /* 22187 * Function: sd_mhd_watch_cb() 22188 * 22189 * Description: This function is the call back function used by the scsi watch 22190 * facility. The scsi watch facility sends the "Test Unit Ready" 22191 * and processes the status. If applicable (i.e. a "Unit Attention" 22192 * status and automatic "Request Sense" not used) the scsi watch 22193 * facility will send a "Request Sense" and retrieve the sense data 22194 * to be passed to this callback function. In either case the 22195 * automatic "Request Sense" or the facility submitting one, this 22196 * callback is passed the status and sense data. 22197 * 22198 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22199 * among multiple watches that share this callback function 22200 * resultp - scsi watch facility result packet containing scsi 22201 * packet, status byte and sense data 22202 * 22203 * Return Code: 0 - continue the watch task 22204 * non-zero - terminate the watch task 22205 */ 22206 22207 static int 22208 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22209 { 22210 struct sd_lun *un; 22211 struct scsi_status *statusp; 22212 uint8_t *sensep; 22213 struct scsi_pkt *pkt; 22214 uchar_t actual_sense_length; 22215 dev_t dev = (dev_t)arg; 22216 22217 ASSERT(resultp != NULL); 22218 statusp = resultp->statusp; 22219 sensep = (uint8_t *)resultp->sensep; 22220 pkt = resultp->pkt; 22221 actual_sense_length = resultp->actual_sense_length; 22222 22223 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22224 return (ENXIO); 22225 } 22226 22227 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22228 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22229 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22230 22231 /* Begin processing of the status and/or sense data */ 22232 if (pkt->pkt_reason != CMD_CMPLT) { 22233 /* Handle the incomplete packet */ 22234 sd_mhd_watch_incomplete(un, pkt); 22235 return (0); 22236 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22237 if (*((unsigned char *)statusp) 22238 == STATUS_RESERVATION_CONFLICT) { 22239 /* 22240 * Handle a reservation conflict by panicking if 22241 * configured for failfast or by logging the conflict 22242 * and updating the reservation status 22243 */ 22244 mutex_enter(SD_MUTEX(un)); 22245 if ((un->un_resvd_status & SD_FAILFAST) && 22246 (sd_failfast_enable)) { 22247 sd_panic_for_res_conflict(un); 22248 /*NOTREACHED*/ 22249 } 22250 SD_INFO(SD_LOG_IOCTL_MHD, un, 22251 "sd_mhd_watch_cb: Reservation Conflict\n"); 22252 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22253 mutex_exit(SD_MUTEX(un)); 22254 } 22255 } 22256 22257 if (sensep != NULL) { 22258 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22259 mutex_enter(SD_MUTEX(un)); 22260 if ((scsi_sense_asc(sensep) == 22261 SD_SCSI_RESET_SENSE_CODE) && 22262 (un->un_resvd_status & SD_RESERVE)) { 22263 /* 22264 * The additional sense code indicates a power 22265 * on or bus device reset has occurred; update 22266 * the reservation status. 22267 */ 22268 un->un_resvd_status |= 22269 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22270 SD_INFO(SD_LOG_IOCTL_MHD, un, 22271 "sd_mhd_watch_cb: Lost Reservation\n"); 22272 } 22273 } else { 22274 return (0); 22275 } 22276 } else { 22277 mutex_enter(SD_MUTEX(un)); 22278 } 22279 22280 if ((un->un_resvd_status & SD_RESERVE) && 22281 (un->un_resvd_status & SD_LOST_RESERVE)) { 22282 if (un->un_resvd_status & SD_WANT_RESERVE) { 22283 /* 22284 * A reset occurred in between the last probe and this 22285 * one so if a timeout is pending cancel it. 22286 */ 22287 if (un->un_resvd_timeid) { 22288 timeout_id_t temp_id = un->un_resvd_timeid; 22289 un->un_resvd_timeid = NULL; 22290 mutex_exit(SD_MUTEX(un)); 22291 (void) untimeout(temp_id); 22292 mutex_enter(SD_MUTEX(un)); 22293 } 22294 un->un_resvd_status &= ~SD_WANT_RESERVE; 22295 } 22296 if (un->un_resvd_timeid == 0) { 22297 /* Schedule a timeout to handle the lost reservation */ 22298 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22299 (void *)dev, 22300 drv_usectohz(sd_reinstate_resv_delay)); 22301 } 22302 } 22303 mutex_exit(SD_MUTEX(un)); 22304 return (0); 22305 } 22306 22307 22308 /* 22309 * Function: sd_mhd_watch_incomplete() 22310 * 22311 * Description: This function is used to find out why a scsi pkt sent by the 22312 * scsi watch facility was not completed. Under some scenarios this 22313 * routine will return. Otherwise it will send a bus reset to see 22314 * if the drive is still online. 22315 * 22316 * Arguments: un - driver soft state (unit) structure 22317 * pkt - incomplete scsi pkt 22318 */ 22319 22320 static void 22321 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22322 { 22323 int be_chatty; 22324 int perr; 22325 22326 ASSERT(pkt != NULL); 22327 ASSERT(un != NULL); 22328 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22329 perr = (pkt->pkt_statistics & STAT_PERR); 22330 22331 mutex_enter(SD_MUTEX(un)); 22332 if (un->un_state == SD_STATE_DUMPING) { 22333 mutex_exit(SD_MUTEX(un)); 22334 return; 22335 } 22336 22337 switch (pkt->pkt_reason) { 22338 case CMD_UNX_BUS_FREE: 22339 /* 22340 * If we had a parity error that caused the target to drop BSY*, 22341 * don't be chatty about it. 22342 */ 22343 if (perr && be_chatty) { 22344 be_chatty = 0; 22345 } 22346 break; 22347 case CMD_TAG_REJECT: 22348 /* 22349 * The SCSI-2 spec states that a tag reject will be sent by the 22350 * target if tagged queuing is not supported. A tag reject may 22351 * also be sent during certain initialization periods or to 22352 * control internal resources. For the latter case the target 22353 * may also return Queue Full. 22354 * 22355 * If this driver receives a tag reject from a target that is 22356 * going through an init period or controlling internal 22357 * resources tagged queuing will be disabled. This is a less 22358 * than optimal behavior but the driver is unable to determine 22359 * the target state and assumes tagged queueing is not supported 22360 */ 22361 pkt->pkt_flags = 0; 22362 un->un_tagflags = 0; 22363 22364 if (un->un_f_opt_queueing == TRUE) { 22365 un->un_throttle = min(un->un_throttle, 3); 22366 } else { 22367 un->un_throttle = 1; 22368 } 22369 mutex_exit(SD_MUTEX(un)); 22370 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22371 mutex_enter(SD_MUTEX(un)); 22372 break; 22373 case CMD_INCOMPLETE: 22374 /* 22375 * The transport stopped with an abnormal state, fallthrough and 22376 * reset the target and/or bus unless selection did not complete 22377 * (indicated by STATE_GOT_BUS) in which case we don't want to 22378 * go through a target/bus reset 22379 */ 22380 if (pkt->pkt_state == STATE_GOT_BUS) { 22381 break; 22382 } 22383 /*FALLTHROUGH*/ 22384 22385 case CMD_TIMEOUT: 22386 default: 22387 /* 22388 * The lun may still be running the command, so a lun reset 22389 * should be attempted. If the lun reset fails or cannot be 22390 * issued, than try a target reset. Lastly try a bus reset. 22391 */ 22392 if ((pkt->pkt_statistics & 22393 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22394 int reset_retval = 0; 22395 mutex_exit(SD_MUTEX(un)); 22396 if (un->un_f_allow_bus_device_reset == TRUE) { 22397 if (un->un_f_lun_reset_enabled == TRUE) { 22398 reset_retval = 22399 scsi_reset(SD_ADDRESS(un), 22400 RESET_LUN); 22401 } 22402 if (reset_retval == 0) { 22403 reset_retval = 22404 scsi_reset(SD_ADDRESS(un), 22405 RESET_TARGET); 22406 } 22407 } 22408 if (reset_retval == 0) { 22409 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22410 } 22411 mutex_enter(SD_MUTEX(un)); 22412 } 22413 break; 22414 } 22415 22416 /* A device/bus reset has occurred; update the reservation status. */ 22417 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22418 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22419 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22420 un->un_resvd_status |= 22421 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22422 SD_INFO(SD_LOG_IOCTL_MHD, un, 22423 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22424 } 22425 } 22426 22427 /* 22428 * The disk has been turned off; Update the device state. 22429 * 22430 * Note: Should we be offlining the disk here? 22431 */ 22432 if (pkt->pkt_state == STATE_GOT_BUS) { 22433 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22434 "Disk not responding to selection\n"); 22435 if (un->un_state != SD_STATE_OFFLINE) { 22436 New_state(un, SD_STATE_OFFLINE); 22437 } 22438 } else if (be_chatty) { 22439 /* 22440 * suppress messages if they are all the same pkt reason; 22441 * with TQ, many (up to 256) are returned with the same 22442 * pkt_reason 22443 */ 22444 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22445 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22446 "sd_mhd_watch_incomplete: " 22447 "SCSI transport failed: reason '%s'\n", 22448 scsi_rname(pkt->pkt_reason)); 22449 } 22450 } 22451 un->un_last_pkt_reason = pkt->pkt_reason; 22452 mutex_exit(SD_MUTEX(un)); 22453 } 22454 22455 22456 /* 22457 * Function: sd_sname() 22458 * 22459 * Description: This is a simple little routine to return a string containing 22460 * a printable description of command status byte for use in 22461 * logging. 22462 * 22463 * Arguments: status - pointer to a status byte 22464 * 22465 * Return Code: char * - string containing status description. 22466 */ 22467 22468 static char * 22469 sd_sname(uchar_t status) 22470 { 22471 switch (status & STATUS_MASK) { 22472 case STATUS_GOOD: 22473 return ("good status"); 22474 case STATUS_CHECK: 22475 return ("check condition"); 22476 case STATUS_MET: 22477 return ("condition met"); 22478 case STATUS_BUSY: 22479 return ("busy"); 22480 case STATUS_INTERMEDIATE: 22481 return ("intermediate"); 22482 case STATUS_INTERMEDIATE_MET: 22483 return ("intermediate - condition met"); 22484 case STATUS_RESERVATION_CONFLICT: 22485 return ("reservation_conflict"); 22486 case STATUS_TERMINATED: 22487 return ("command terminated"); 22488 case STATUS_QFULL: 22489 return ("queue full"); 22490 default: 22491 return ("<unknown status>"); 22492 } 22493 } 22494 22495 22496 /* 22497 * Function: sd_mhd_resvd_recover() 22498 * 22499 * Description: This function adds a reservation entry to the 22500 * sd_resv_reclaim_request list and signals the reservation 22501 * reclaim thread that there is work pending. If the reservation 22502 * reclaim thread has not been previously created this function 22503 * will kick it off. 22504 * 22505 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22506 * among multiple watches that share this callback function 22507 * 22508 * Context: This routine is called by timeout() and is run in interrupt 22509 * context. It must not sleep or call other functions which may 22510 * sleep. 22511 */ 22512 22513 static void 22514 sd_mhd_resvd_recover(void *arg) 22515 { 22516 dev_t dev = (dev_t)arg; 22517 struct sd_lun *un; 22518 struct sd_thr_request *sd_treq = NULL; 22519 struct sd_thr_request *sd_cur = NULL; 22520 struct sd_thr_request *sd_prev = NULL; 22521 int already_there = 0; 22522 22523 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22524 return; 22525 } 22526 22527 mutex_enter(SD_MUTEX(un)); 22528 un->un_resvd_timeid = NULL; 22529 if (un->un_resvd_status & SD_WANT_RESERVE) { 22530 /* 22531 * There was a reset so don't issue the reserve, allow the 22532 * sd_mhd_watch_cb callback function to notice this and 22533 * reschedule the timeout for reservation. 22534 */ 22535 mutex_exit(SD_MUTEX(un)); 22536 return; 22537 } 22538 mutex_exit(SD_MUTEX(un)); 22539 22540 /* 22541 * Add this device to the sd_resv_reclaim_request list and the 22542 * sd_resv_reclaim_thread should take care of the rest. 22543 * 22544 * Note: We can't sleep in this context so if the memory allocation 22545 * fails allow the sd_mhd_watch_cb callback function to notice this and 22546 * reschedule the timeout for reservation. (4378460) 22547 */ 22548 sd_treq = (struct sd_thr_request *) 22549 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22550 if (sd_treq == NULL) { 22551 return; 22552 } 22553 22554 sd_treq->sd_thr_req_next = NULL; 22555 sd_treq->dev = dev; 22556 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22557 if (sd_tr.srq_thr_req_head == NULL) { 22558 sd_tr.srq_thr_req_head = sd_treq; 22559 } else { 22560 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22561 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22562 if (sd_cur->dev == dev) { 22563 /* 22564 * already in Queue so don't log 22565 * another request for the device 22566 */ 22567 already_there = 1; 22568 break; 22569 } 22570 sd_prev = sd_cur; 22571 } 22572 if (!already_there) { 22573 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22574 "logging request for %lx\n", dev); 22575 sd_prev->sd_thr_req_next = sd_treq; 22576 } else { 22577 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22578 } 22579 } 22580 22581 /* 22582 * Create a kernel thread to do the reservation reclaim and free up this 22583 * thread. We cannot block this thread while we go away to do the 22584 * reservation reclaim 22585 */ 22586 if (sd_tr.srq_resv_reclaim_thread == NULL) 22587 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22588 sd_resv_reclaim_thread, NULL, 22589 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22590 22591 /* Tell the reservation reclaim thread that it has work to do */ 22592 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22593 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22594 } 22595 22596 /* 22597 * Function: sd_resv_reclaim_thread() 22598 * 22599 * Description: This function implements the reservation reclaim operations 22600 * 22601 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22602 * among multiple watches that share this callback function 22603 */ 22604 22605 static void 22606 sd_resv_reclaim_thread() 22607 { 22608 struct sd_lun *un; 22609 struct sd_thr_request *sd_mhreq; 22610 22611 /* Wait for work */ 22612 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22613 if (sd_tr.srq_thr_req_head == NULL) { 22614 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22615 &sd_tr.srq_resv_reclaim_mutex); 22616 } 22617 22618 /* Loop while we have work */ 22619 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22620 un = ddi_get_soft_state(sd_state, 22621 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22622 if (un == NULL) { 22623 /* 22624 * softstate structure is NULL so just 22625 * dequeue the request and continue 22626 */ 22627 sd_tr.srq_thr_req_head = 22628 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22629 kmem_free(sd_tr.srq_thr_cur_req, 22630 sizeof (struct sd_thr_request)); 22631 continue; 22632 } 22633 22634 /* dequeue the request */ 22635 sd_mhreq = sd_tr.srq_thr_cur_req; 22636 sd_tr.srq_thr_req_head = 22637 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22638 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22639 22640 /* 22641 * Reclaim reservation only if SD_RESERVE is still set. There 22642 * may have been a call to MHIOCRELEASE before we got here. 22643 */ 22644 mutex_enter(SD_MUTEX(un)); 22645 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22646 /* 22647 * Note: The SD_LOST_RESERVE flag is cleared before 22648 * reclaiming the reservation. If this is done after the 22649 * call to sd_reserve_release a reservation loss in the 22650 * window between pkt completion of reserve cmd and 22651 * mutex_enter below may not be recognized 22652 */ 22653 un->un_resvd_status &= ~SD_LOST_RESERVE; 22654 mutex_exit(SD_MUTEX(un)); 22655 22656 if (sd_reserve_release(sd_mhreq->dev, 22657 SD_RESERVE) == 0) { 22658 mutex_enter(SD_MUTEX(un)); 22659 un->un_resvd_status |= SD_RESERVE; 22660 mutex_exit(SD_MUTEX(un)); 22661 SD_INFO(SD_LOG_IOCTL_MHD, un, 22662 "sd_resv_reclaim_thread: " 22663 "Reservation Recovered\n"); 22664 } else { 22665 mutex_enter(SD_MUTEX(un)); 22666 un->un_resvd_status |= SD_LOST_RESERVE; 22667 mutex_exit(SD_MUTEX(un)); 22668 SD_INFO(SD_LOG_IOCTL_MHD, un, 22669 "sd_resv_reclaim_thread: Failed " 22670 "Reservation Recovery\n"); 22671 } 22672 } else { 22673 mutex_exit(SD_MUTEX(un)); 22674 } 22675 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22676 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22677 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22678 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22679 /* 22680 * wakeup the destroy thread if anyone is waiting on 22681 * us to complete. 22682 */ 22683 cv_signal(&sd_tr.srq_inprocess_cv); 22684 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22685 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22686 } 22687 22688 /* 22689 * cleanup the sd_tr structure now that this thread will not exist 22690 */ 22691 ASSERT(sd_tr.srq_thr_req_head == NULL); 22692 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22693 sd_tr.srq_resv_reclaim_thread = NULL; 22694 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22695 thread_exit(); 22696 } 22697 22698 22699 /* 22700 * Function: sd_rmv_resv_reclaim_req() 22701 * 22702 * Description: This function removes any pending reservation reclaim requests 22703 * for the specified device. 22704 * 22705 * Arguments: dev - the device 'dev_t' 22706 */ 22707 22708 static void 22709 sd_rmv_resv_reclaim_req(dev_t dev) 22710 { 22711 struct sd_thr_request *sd_mhreq; 22712 struct sd_thr_request *sd_prev; 22713 22714 /* Remove a reservation reclaim request from the list */ 22715 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22716 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22717 /* 22718 * We are attempting to reinstate reservation for 22719 * this device. We wait for sd_reserve_release() 22720 * to return before we return. 22721 */ 22722 cv_wait(&sd_tr.srq_inprocess_cv, 22723 &sd_tr.srq_resv_reclaim_mutex); 22724 } else { 22725 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22726 if (sd_mhreq && sd_mhreq->dev == dev) { 22727 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22728 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22729 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22730 return; 22731 } 22732 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22733 if (sd_mhreq && sd_mhreq->dev == dev) { 22734 break; 22735 } 22736 sd_prev = sd_mhreq; 22737 } 22738 if (sd_mhreq != NULL) { 22739 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22740 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22741 } 22742 } 22743 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22744 } 22745 22746 22747 /* 22748 * Function: sd_mhd_reset_notify_cb() 22749 * 22750 * Description: This is a call back function for scsi_reset_notify. This 22751 * function updates the softstate reserved status and logs the 22752 * reset. The driver scsi watch facility callback function 22753 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22754 * will reclaim the reservation. 22755 * 22756 * Arguments: arg - driver soft state (unit) structure 22757 */ 22758 22759 static void 22760 sd_mhd_reset_notify_cb(caddr_t arg) 22761 { 22762 struct sd_lun *un = (struct sd_lun *)arg; 22763 22764 mutex_enter(SD_MUTEX(un)); 22765 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22766 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22767 SD_INFO(SD_LOG_IOCTL_MHD, un, 22768 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22769 } 22770 mutex_exit(SD_MUTEX(un)); 22771 } 22772 22773 22774 /* 22775 * Function: sd_take_ownership() 22776 * 22777 * Description: This routine implements an algorithm to achieve a stable 22778 * reservation on disks which don't implement priority reserve, 22779 * and makes sure that other host lose re-reservation attempts. 22780 * This algorithm contains of a loop that keeps issuing the RESERVE 22781 * for some period of time (min_ownership_delay, default 6 seconds) 22782 * During that loop, it looks to see if there has been a bus device 22783 * reset or bus reset (both of which cause an existing reservation 22784 * to be lost). If the reservation is lost issue RESERVE until a 22785 * period of min_ownership_delay with no resets has gone by, or 22786 * until max_ownership_delay has expired. This loop ensures that 22787 * the host really did manage to reserve the device, in spite of 22788 * resets. The looping for min_ownership_delay (default six 22789 * seconds) is important to early generation clustering products, 22790 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22791 * MHIOCENFAILFAST periodic timer of two seconds. By having 22792 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22793 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22794 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22795 * have already noticed, via the MHIOCENFAILFAST polling, that it 22796 * no longer "owns" the disk and will have panicked itself. Thus, 22797 * the host issuing the MHIOCTKOWN is assured (with timing 22798 * dependencies) that by the time it actually starts to use the 22799 * disk for real work, the old owner is no longer accessing it. 22800 * 22801 * min_ownership_delay is the minimum amount of time for which the 22802 * disk must be reserved continuously devoid of resets before the 22803 * MHIOCTKOWN ioctl will return success. 22804 * 22805 * max_ownership_delay indicates the amount of time by which the 22806 * take ownership should succeed or timeout with an error. 22807 * 22808 * Arguments: dev - the device 'dev_t' 22809 * *p - struct containing timing info. 22810 * 22811 * Return Code: 0 for success or error code 22812 */ 22813 22814 static int 22815 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22816 { 22817 struct sd_lun *un; 22818 int rval; 22819 int err; 22820 int reservation_count = 0; 22821 int min_ownership_delay = 6000000; /* in usec */ 22822 int max_ownership_delay = 30000000; /* in usec */ 22823 clock_t start_time; /* starting time of this algorithm */ 22824 clock_t end_time; /* time limit for giving up */ 22825 clock_t ownership_time; /* time limit for stable ownership */ 22826 clock_t current_time; 22827 clock_t previous_current_time; 22828 22829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22830 return (ENXIO); 22831 } 22832 22833 /* 22834 * Attempt a device reservation. A priority reservation is requested. 22835 */ 22836 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22837 != SD_SUCCESS) { 22838 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22839 "sd_take_ownership: return(1)=%d\n", rval); 22840 return (rval); 22841 } 22842 22843 /* Update the softstate reserved status to indicate the reservation */ 22844 mutex_enter(SD_MUTEX(un)); 22845 un->un_resvd_status |= SD_RESERVE; 22846 un->un_resvd_status &= 22847 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22848 mutex_exit(SD_MUTEX(un)); 22849 22850 if (p != NULL) { 22851 if (p->min_ownership_delay != 0) { 22852 min_ownership_delay = p->min_ownership_delay * 1000; 22853 } 22854 if (p->max_ownership_delay != 0) { 22855 max_ownership_delay = p->max_ownership_delay * 1000; 22856 } 22857 } 22858 SD_INFO(SD_LOG_IOCTL_MHD, un, 22859 "sd_take_ownership: min, max delays: %d, %d\n", 22860 min_ownership_delay, max_ownership_delay); 22861 22862 start_time = ddi_get_lbolt(); 22863 current_time = start_time; 22864 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22865 end_time = start_time + drv_usectohz(max_ownership_delay); 22866 22867 while (current_time - end_time < 0) { 22868 delay(drv_usectohz(500000)); 22869 22870 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22871 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22872 mutex_enter(SD_MUTEX(un)); 22873 rval = (un->un_resvd_status & 22874 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22875 mutex_exit(SD_MUTEX(un)); 22876 break; 22877 } 22878 } 22879 previous_current_time = current_time; 22880 current_time = ddi_get_lbolt(); 22881 mutex_enter(SD_MUTEX(un)); 22882 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22883 ownership_time = ddi_get_lbolt() + 22884 drv_usectohz(min_ownership_delay); 22885 reservation_count = 0; 22886 } else { 22887 reservation_count++; 22888 } 22889 un->un_resvd_status |= SD_RESERVE; 22890 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22891 mutex_exit(SD_MUTEX(un)); 22892 22893 SD_INFO(SD_LOG_IOCTL_MHD, un, 22894 "sd_take_ownership: ticks for loop iteration=%ld, " 22895 "reservation=%s\n", (current_time - previous_current_time), 22896 reservation_count ? "ok" : "reclaimed"); 22897 22898 if (current_time - ownership_time >= 0 && 22899 reservation_count >= 4) { 22900 rval = 0; /* Achieved a stable ownership */ 22901 break; 22902 } 22903 if (current_time - end_time >= 0) { 22904 rval = EACCES; /* No ownership in max possible time */ 22905 break; 22906 } 22907 } 22908 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22909 "sd_take_ownership: return(2)=%d\n", rval); 22910 return (rval); 22911 } 22912 22913 22914 /* 22915 * Function: sd_reserve_release() 22916 * 22917 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22918 * PRIORITY RESERVE commands based on a user specified command type 22919 * 22920 * Arguments: dev - the device 'dev_t' 22921 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22922 * SD_RESERVE, SD_RELEASE 22923 * 22924 * Return Code: 0 or Error Code 22925 */ 22926 22927 static int 22928 sd_reserve_release(dev_t dev, int cmd) 22929 { 22930 struct uscsi_cmd *com = NULL; 22931 struct sd_lun *un = NULL; 22932 char cdb[CDB_GROUP0]; 22933 int rval; 22934 22935 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22936 (cmd == SD_PRIORITY_RESERVE)); 22937 22938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22939 return (ENXIO); 22940 } 22941 22942 /* instantiate and initialize the command and cdb */ 22943 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22944 bzero(cdb, CDB_GROUP0); 22945 com->uscsi_flags = USCSI_SILENT; 22946 com->uscsi_timeout = un->un_reserve_release_time; 22947 com->uscsi_cdblen = CDB_GROUP0; 22948 com->uscsi_cdb = cdb; 22949 if (cmd == SD_RELEASE) { 22950 cdb[0] = SCMD_RELEASE; 22951 } else { 22952 cdb[0] = SCMD_RESERVE; 22953 } 22954 22955 /* Send the command. */ 22956 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22957 SD_PATH_STANDARD); 22958 22959 /* 22960 * "break" a reservation that is held by another host, by issuing a 22961 * reset if priority reserve is desired, and we could not get the 22962 * device. 22963 */ 22964 if ((cmd == SD_PRIORITY_RESERVE) && 22965 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22966 /* 22967 * First try to reset the LUN. If we cannot, then try a target 22968 * reset, followed by a bus reset if the target reset fails. 22969 */ 22970 int reset_retval = 0; 22971 if (un->un_f_lun_reset_enabled == TRUE) { 22972 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22973 } 22974 if (reset_retval == 0) { 22975 /* The LUN reset either failed or was not issued */ 22976 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22977 } 22978 if ((reset_retval == 0) && 22979 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22980 rval = EIO; 22981 kmem_free(com, sizeof (*com)); 22982 return (rval); 22983 } 22984 22985 bzero(com, sizeof (struct uscsi_cmd)); 22986 com->uscsi_flags = USCSI_SILENT; 22987 com->uscsi_cdb = cdb; 22988 com->uscsi_cdblen = CDB_GROUP0; 22989 com->uscsi_timeout = 5; 22990 22991 /* 22992 * Reissue the last reserve command, this time without request 22993 * sense. Assume that it is just a regular reserve command. 22994 */ 22995 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22996 SD_PATH_STANDARD); 22997 } 22998 22999 /* Return an error if still getting a reservation conflict. */ 23000 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23001 rval = EACCES; 23002 } 23003 23004 kmem_free(com, sizeof (*com)); 23005 return (rval); 23006 } 23007 23008 23009 #define SD_NDUMP_RETRIES 12 23010 /* 23011 * System Crash Dump routine 23012 */ 23013 23014 static int 23015 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23016 { 23017 int instance; 23018 int partition; 23019 int i; 23020 int err; 23021 struct sd_lun *un; 23022 struct scsi_pkt *wr_pktp; 23023 struct buf *wr_bp; 23024 struct buf wr_buf; 23025 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23026 daddr_t tgt_blkno; /* rmw - blkno for target */ 23027 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23028 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23029 size_t io_start_offset; 23030 int doing_rmw = FALSE; 23031 int rval; 23032 ssize_t dma_resid; 23033 daddr_t oblkno; 23034 diskaddr_t nblks = 0; 23035 diskaddr_t start_block; 23036 23037 instance = SDUNIT(dev); 23038 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23039 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23040 return (ENXIO); 23041 } 23042 23043 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23044 23045 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23046 23047 partition = SDPART(dev); 23048 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23049 23050 /* Validate blocks to dump at against partition size. */ 23051 23052 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23053 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23054 23055 if ((blkno + nblk) > nblks) { 23056 SD_TRACE(SD_LOG_DUMP, un, 23057 "sddump: dump range larger than partition: " 23058 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23059 blkno, nblk, nblks); 23060 return (EINVAL); 23061 } 23062 23063 mutex_enter(&un->un_pm_mutex); 23064 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23065 struct scsi_pkt *start_pktp; 23066 23067 mutex_exit(&un->un_pm_mutex); 23068 23069 /* 23070 * use pm framework to power on HBA 1st 23071 */ 23072 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23073 23074 /* 23075 * Dump no long uses sdpower to power on a device, it's 23076 * in-line here so it can be done in polled mode. 23077 */ 23078 23079 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23080 23081 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23082 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23083 23084 if (start_pktp == NULL) { 23085 /* We were not given a SCSI packet, fail. */ 23086 return (EIO); 23087 } 23088 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23089 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23090 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23091 start_pktp->pkt_flags = FLAG_NOINTR; 23092 23093 mutex_enter(SD_MUTEX(un)); 23094 SD_FILL_SCSI1_LUN(un, start_pktp); 23095 mutex_exit(SD_MUTEX(un)); 23096 /* 23097 * Scsi_poll returns 0 (success) if the command completes and 23098 * the status block is STATUS_GOOD. 23099 */ 23100 if (sd_scsi_poll(un, start_pktp) != 0) { 23101 scsi_destroy_pkt(start_pktp); 23102 return (EIO); 23103 } 23104 scsi_destroy_pkt(start_pktp); 23105 (void) sd_ddi_pm_resume(un); 23106 } else { 23107 mutex_exit(&un->un_pm_mutex); 23108 } 23109 23110 mutex_enter(SD_MUTEX(un)); 23111 un->un_throttle = 0; 23112 23113 /* 23114 * The first time through, reset the specific target device. 23115 * However, when cpr calls sddump we know that sd is in a 23116 * a good state so no bus reset is required. 23117 * Clear sense data via Request Sense cmd. 23118 * In sddump we don't care about allow_bus_device_reset anymore 23119 */ 23120 23121 if ((un->un_state != SD_STATE_SUSPENDED) && 23122 (un->un_state != SD_STATE_DUMPING)) { 23123 23124 New_state(un, SD_STATE_DUMPING); 23125 23126 if (un->un_f_is_fibre == FALSE) { 23127 mutex_exit(SD_MUTEX(un)); 23128 /* 23129 * Attempt a bus reset for parallel scsi. 23130 * 23131 * Note: A bus reset is required because on some host 23132 * systems (i.e. E420R) a bus device reset is 23133 * insufficient to reset the state of the target. 23134 * 23135 * Note: Don't issue the reset for fibre-channel, 23136 * because this tends to hang the bus (loop) for 23137 * too long while everyone is logging out and in 23138 * and the deadman timer for dumping will fire 23139 * before the dump is complete. 23140 */ 23141 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23142 mutex_enter(SD_MUTEX(un)); 23143 Restore_state(un); 23144 mutex_exit(SD_MUTEX(un)); 23145 return (EIO); 23146 } 23147 23148 /* Delay to give the device some recovery time. */ 23149 drv_usecwait(10000); 23150 23151 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23152 SD_INFO(SD_LOG_DUMP, un, 23153 "sddump: sd_send_polled_RQS failed\n"); 23154 } 23155 mutex_enter(SD_MUTEX(un)); 23156 } 23157 } 23158 23159 /* 23160 * Convert the partition-relative block number to a 23161 * disk physical block number. 23162 */ 23163 blkno += start_block; 23164 23165 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23166 23167 23168 /* 23169 * Check if the device has a non-512 block size. 23170 */ 23171 wr_bp = NULL; 23172 if (NOT_DEVBSIZE(un)) { 23173 tgt_byte_offset = blkno * un->un_sys_blocksize; 23174 tgt_byte_count = nblk * un->un_sys_blocksize; 23175 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23176 (tgt_byte_count % un->un_tgt_blocksize)) { 23177 doing_rmw = TRUE; 23178 /* 23179 * Calculate the block number and number of block 23180 * in terms of the media block size. 23181 */ 23182 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23183 tgt_nblk = 23184 ((tgt_byte_offset + tgt_byte_count + 23185 (un->un_tgt_blocksize - 1)) / 23186 un->un_tgt_blocksize) - tgt_blkno; 23187 23188 /* 23189 * Invoke the routine which is going to do read part 23190 * of read-modify-write. 23191 * Note that this routine returns a pointer to 23192 * a valid bp in wr_bp. 23193 */ 23194 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23195 &wr_bp); 23196 if (err) { 23197 mutex_exit(SD_MUTEX(un)); 23198 return (err); 23199 } 23200 /* 23201 * Offset is being calculated as - 23202 * (original block # * system block size) - 23203 * (new block # * target block size) 23204 */ 23205 io_start_offset = 23206 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23207 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23208 23209 ASSERT((io_start_offset >= 0) && 23210 (io_start_offset < un->un_tgt_blocksize)); 23211 /* 23212 * Do the modify portion of read modify write. 23213 */ 23214 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23215 (size_t)nblk * un->un_sys_blocksize); 23216 } else { 23217 doing_rmw = FALSE; 23218 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23219 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23220 } 23221 23222 /* Convert blkno and nblk to target blocks */ 23223 blkno = tgt_blkno; 23224 nblk = tgt_nblk; 23225 } else { 23226 wr_bp = &wr_buf; 23227 bzero(wr_bp, sizeof (struct buf)); 23228 wr_bp->b_flags = B_BUSY; 23229 wr_bp->b_un.b_addr = addr; 23230 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23231 wr_bp->b_resid = 0; 23232 } 23233 23234 mutex_exit(SD_MUTEX(un)); 23235 23236 /* 23237 * Obtain a SCSI packet for the write command. 23238 * It should be safe to call the allocator here without 23239 * worrying about being locked for DVMA mapping because 23240 * the address we're passed is already a DVMA mapping 23241 * 23242 * We are also not going to worry about semaphore ownership 23243 * in the dump buffer. Dumping is single threaded at present. 23244 */ 23245 23246 wr_pktp = NULL; 23247 23248 dma_resid = wr_bp->b_bcount; 23249 oblkno = blkno; 23250 23251 while (dma_resid != 0) { 23252 23253 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23254 wr_bp->b_flags &= ~B_ERROR; 23255 23256 if (un->un_partial_dma_supported == 1) { 23257 blkno = oblkno + 23258 ((wr_bp->b_bcount - dma_resid) / 23259 un->un_tgt_blocksize); 23260 nblk = dma_resid / un->un_tgt_blocksize; 23261 23262 if (wr_pktp) { 23263 /* 23264 * Partial DMA transfers after initial transfer 23265 */ 23266 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23267 blkno, nblk); 23268 } else { 23269 /* Initial transfer */ 23270 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23271 un->un_pkt_flags, NULL_FUNC, NULL, 23272 blkno, nblk); 23273 } 23274 } else { 23275 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23276 0, NULL_FUNC, NULL, blkno, nblk); 23277 } 23278 23279 if (rval == 0) { 23280 /* We were given a SCSI packet, continue. */ 23281 break; 23282 } 23283 23284 if (i == 0) { 23285 if (wr_bp->b_flags & B_ERROR) { 23286 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23287 "no resources for dumping; " 23288 "error code: 0x%x, retrying", 23289 geterror(wr_bp)); 23290 } else { 23291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23292 "no resources for dumping; retrying"); 23293 } 23294 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23295 if (wr_bp->b_flags & B_ERROR) { 23296 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23297 "no resources for dumping; error code: " 23298 "0x%x, retrying\n", geterror(wr_bp)); 23299 } 23300 } else { 23301 if (wr_bp->b_flags & B_ERROR) { 23302 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23303 "no resources for dumping; " 23304 "error code: 0x%x, retries failed, " 23305 "giving up.\n", geterror(wr_bp)); 23306 } else { 23307 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23308 "no resources for dumping; " 23309 "retries failed, giving up.\n"); 23310 } 23311 mutex_enter(SD_MUTEX(un)); 23312 Restore_state(un); 23313 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23314 mutex_exit(SD_MUTEX(un)); 23315 scsi_free_consistent_buf(wr_bp); 23316 } else { 23317 mutex_exit(SD_MUTEX(un)); 23318 } 23319 return (EIO); 23320 } 23321 drv_usecwait(10000); 23322 } 23323 23324 if (un->un_partial_dma_supported == 1) { 23325 /* 23326 * save the resid from PARTIAL_DMA 23327 */ 23328 dma_resid = wr_pktp->pkt_resid; 23329 if (dma_resid != 0) 23330 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23331 wr_pktp->pkt_resid = 0; 23332 } else { 23333 dma_resid = 0; 23334 } 23335 23336 /* SunBug 1222170 */ 23337 wr_pktp->pkt_flags = FLAG_NOINTR; 23338 23339 err = EIO; 23340 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23341 23342 /* 23343 * Scsi_poll returns 0 (success) if the command completes and 23344 * the status block is STATUS_GOOD. We should only check 23345 * errors if this condition is not true. Even then we should 23346 * send our own request sense packet only if we have a check 23347 * condition and auto request sense has not been performed by 23348 * the hba. 23349 */ 23350 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23351 23352 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23353 (wr_pktp->pkt_resid == 0)) { 23354 err = SD_SUCCESS; 23355 break; 23356 } 23357 23358 /* 23359 * Check CMD_DEV_GONE 1st, give up if device is gone. 23360 */ 23361 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23362 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23363 "Error while dumping state...Device is gone\n"); 23364 break; 23365 } 23366 23367 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23368 SD_INFO(SD_LOG_DUMP, un, 23369 "sddump: write failed with CHECK, try # %d\n", i); 23370 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23371 (void) sd_send_polled_RQS(un); 23372 } 23373 23374 continue; 23375 } 23376 23377 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23378 int reset_retval = 0; 23379 23380 SD_INFO(SD_LOG_DUMP, un, 23381 "sddump: write failed with BUSY, try # %d\n", i); 23382 23383 if (un->un_f_lun_reset_enabled == TRUE) { 23384 reset_retval = scsi_reset(SD_ADDRESS(un), 23385 RESET_LUN); 23386 } 23387 if (reset_retval == 0) { 23388 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23389 } 23390 (void) sd_send_polled_RQS(un); 23391 23392 } else { 23393 SD_INFO(SD_LOG_DUMP, un, 23394 "sddump: write failed with 0x%x, try # %d\n", 23395 SD_GET_PKT_STATUS(wr_pktp), i); 23396 mutex_enter(SD_MUTEX(un)); 23397 sd_reset_target(un, wr_pktp); 23398 mutex_exit(SD_MUTEX(un)); 23399 } 23400 23401 /* 23402 * If we are not getting anywhere with lun/target resets, 23403 * let's reset the bus. 23404 */ 23405 if (i == SD_NDUMP_RETRIES/2) { 23406 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23407 (void) sd_send_polled_RQS(un); 23408 } 23409 } 23410 } 23411 23412 scsi_destroy_pkt(wr_pktp); 23413 mutex_enter(SD_MUTEX(un)); 23414 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23415 mutex_exit(SD_MUTEX(un)); 23416 scsi_free_consistent_buf(wr_bp); 23417 } else { 23418 mutex_exit(SD_MUTEX(un)); 23419 } 23420 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23421 return (err); 23422 } 23423 23424 /* 23425 * Function: sd_scsi_poll() 23426 * 23427 * Description: This is a wrapper for the scsi_poll call. 23428 * 23429 * Arguments: sd_lun - The unit structure 23430 * scsi_pkt - The scsi packet being sent to the device. 23431 * 23432 * Return Code: 0 - Command completed successfully with good status 23433 * -1 - Command failed. This could indicate a check condition 23434 * or other status value requiring recovery action. 23435 * 23436 * NOTE: This code is only called off sddump(). 23437 */ 23438 23439 static int 23440 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23441 { 23442 int status; 23443 23444 ASSERT(un != NULL); 23445 ASSERT(!mutex_owned(SD_MUTEX(un))); 23446 ASSERT(pktp != NULL); 23447 23448 status = SD_SUCCESS; 23449 23450 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23451 pktp->pkt_flags |= un->un_tagflags; 23452 pktp->pkt_flags &= ~FLAG_NODISCON; 23453 } 23454 23455 status = sd_ddi_scsi_poll(pktp); 23456 /* 23457 * Scsi_poll returns 0 (success) if the command completes and the 23458 * status block is STATUS_GOOD. We should only check errors if this 23459 * condition is not true. Even then we should send our own request 23460 * sense packet only if we have a check condition and auto 23461 * request sense has not been performed by the hba. 23462 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23463 */ 23464 if ((status != SD_SUCCESS) && 23465 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23466 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23467 (pktp->pkt_reason != CMD_DEV_GONE)) 23468 (void) sd_send_polled_RQS(un); 23469 23470 return (status); 23471 } 23472 23473 /* 23474 * Function: sd_send_polled_RQS() 23475 * 23476 * Description: This sends the request sense command to a device. 23477 * 23478 * Arguments: sd_lun - The unit structure 23479 * 23480 * Return Code: 0 - Command completed successfully with good status 23481 * -1 - Command failed. 23482 * 23483 */ 23484 23485 static int 23486 sd_send_polled_RQS(struct sd_lun *un) 23487 { 23488 int ret_val; 23489 struct scsi_pkt *rqs_pktp; 23490 struct buf *rqs_bp; 23491 23492 ASSERT(un != NULL); 23493 ASSERT(!mutex_owned(SD_MUTEX(un))); 23494 23495 ret_val = SD_SUCCESS; 23496 23497 rqs_pktp = un->un_rqs_pktp; 23498 rqs_bp = un->un_rqs_bp; 23499 23500 mutex_enter(SD_MUTEX(un)); 23501 23502 if (un->un_sense_isbusy) { 23503 ret_val = SD_FAILURE; 23504 mutex_exit(SD_MUTEX(un)); 23505 return (ret_val); 23506 } 23507 23508 /* 23509 * If the request sense buffer (and packet) is not in use, 23510 * let's set the un_sense_isbusy and send our packet 23511 */ 23512 un->un_sense_isbusy = 1; 23513 rqs_pktp->pkt_resid = 0; 23514 rqs_pktp->pkt_reason = 0; 23515 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23516 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23517 23518 mutex_exit(SD_MUTEX(un)); 23519 23520 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23521 " 0x%p\n", rqs_bp->b_un.b_addr); 23522 23523 /* 23524 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23525 * axle - it has a call into us! 23526 */ 23527 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23528 SD_INFO(SD_LOG_COMMON, un, 23529 "sd_send_polled_RQS: RQS failed\n"); 23530 } 23531 23532 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23533 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23534 23535 mutex_enter(SD_MUTEX(un)); 23536 un->un_sense_isbusy = 0; 23537 mutex_exit(SD_MUTEX(un)); 23538 23539 return (ret_val); 23540 } 23541 23542 /* 23543 * Defines needed for localized version of the scsi_poll routine. 23544 */ 23545 #define CSEC 10000 /* usecs */ 23546 #define SEC_TO_CSEC (1000000/CSEC) 23547 23548 /* 23549 * Function: sd_ddi_scsi_poll() 23550 * 23551 * Description: Localized version of the scsi_poll routine. The purpose is to 23552 * send a scsi_pkt to a device as a polled command. This version 23553 * is to ensure more robust handling of transport errors. 23554 * Specifically this routine cures not ready, coming ready 23555 * transition for power up and reset of sonoma's. This can take 23556 * up to 45 seconds for power-on and 20 seconds for reset of a 23557 * sonoma lun. 23558 * 23559 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23560 * 23561 * Return Code: 0 - Command completed successfully with good status 23562 * -1 - Command failed. 23563 * 23564 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23565 * be fixed (removing this code), we need to determine how to handle the 23566 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23567 * 23568 * NOTE: This code is only called off sddump(). 23569 */ 23570 static int 23571 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23572 { 23573 int rval = -1; 23574 int savef; 23575 long savet; 23576 void (*savec)(); 23577 int timeout; 23578 int busy_count; 23579 int poll_delay; 23580 int rc; 23581 uint8_t *sensep; 23582 struct scsi_arq_status *arqstat; 23583 extern int do_polled_io; 23584 23585 ASSERT(pkt->pkt_scbp); 23586 23587 /* 23588 * save old flags.. 23589 */ 23590 savef = pkt->pkt_flags; 23591 savec = pkt->pkt_comp; 23592 savet = pkt->pkt_time; 23593 23594 pkt->pkt_flags |= FLAG_NOINTR; 23595 23596 /* 23597 * XXX there is nothing in the SCSA spec that states that we should not 23598 * do a callback for polled cmds; however, removing this will break sd 23599 * and probably other target drivers 23600 */ 23601 pkt->pkt_comp = NULL; 23602 23603 /* 23604 * we don't like a polled command without timeout. 23605 * 60 seconds seems long enough. 23606 */ 23607 if (pkt->pkt_time == 0) 23608 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23609 23610 /* 23611 * Send polled cmd. 23612 * 23613 * We do some error recovery for various errors. Tran_busy, 23614 * queue full, and non-dispatched commands are retried every 10 msec. 23615 * as they are typically transient failures. Busy status and Not 23616 * Ready are retried every second as this status takes a while to 23617 * change. 23618 */ 23619 timeout = pkt->pkt_time * SEC_TO_CSEC; 23620 23621 for (busy_count = 0; busy_count < timeout; busy_count++) { 23622 /* 23623 * Initialize pkt status variables. 23624 */ 23625 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23626 23627 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23628 if (rc != TRAN_BUSY) { 23629 /* Transport failed - give up. */ 23630 break; 23631 } else { 23632 /* Transport busy - try again. */ 23633 poll_delay = 1 * CSEC; /* 10 msec. */ 23634 } 23635 } else { 23636 /* 23637 * Transport accepted - check pkt status. 23638 */ 23639 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23640 if ((pkt->pkt_reason == CMD_CMPLT) && 23641 (rc == STATUS_CHECK) && 23642 (pkt->pkt_state & STATE_ARQ_DONE)) { 23643 arqstat = 23644 (struct scsi_arq_status *)(pkt->pkt_scbp); 23645 sensep = (uint8_t *)&arqstat->sts_sensedata; 23646 } else { 23647 sensep = NULL; 23648 } 23649 23650 if ((pkt->pkt_reason == CMD_CMPLT) && 23651 (rc == STATUS_GOOD)) { 23652 /* No error - we're done */ 23653 rval = 0; 23654 break; 23655 23656 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23657 /* Lost connection - give up */ 23658 break; 23659 23660 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23661 (pkt->pkt_state == 0)) { 23662 /* Pkt not dispatched - try again. */ 23663 poll_delay = 1 * CSEC; /* 10 msec. */ 23664 23665 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23666 (rc == STATUS_QFULL)) { 23667 /* Queue full - try again. */ 23668 poll_delay = 1 * CSEC; /* 10 msec. */ 23669 23670 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23671 (rc == STATUS_BUSY)) { 23672 /* Busy - try again. */ 23673 poll_delay = 100 * CSEC; /* 1 sec. */ 23674 busy_count += (SEC_TO_CSEC - 1); 23675 23676 } else if ((sensep != NULL) && 23677 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23678 /* 23679 * Unit Attention - try again. 23680 * Pretend it took 1 sec. 23681 * NOTE: 'continue' avoids poll_delay 23682 */ 23683 busy_count += (SEC_TO_CSEC - 1); 23684 continue; 23685 23686 } else if ((sensep != NULL) && 23687 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23688 (scsi_sense_asc(sensep) == 0x04) && 23689 (scsi_sense_ascq(sensep) == 0x01)) { 23690 /* 23691 * Not ready -> ready - try again. 23692 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23693 * ...same as STATUS_BUSY 23694 */ 23695 poll_delay = 100 * CSEC; /* 1 sec. */ 23696 busy_count += (SEC_TO_CSEC - 1); 23697 23698 } else { 23699 /* BAD status - give up. */ 23700 break; 23701 } 23702 } 23703 23704 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23705 !do_polled_io) { 23706 delay(drv_usectohz(poll_delay)); 23707 } else { 23708 /* we busy wait during cpr_dump or interrupt threads */ 23709 drv_usecwait(poll_delay); 23710 } 23711 } 23712 23713 pkt->pkt_flags = savef; 23714 pkt->pkt_comp = savec; 23715 pkt->pkt_time = savet; 23716 23717 /* return on error */ 23718 if (rval) 23719 return (rval); 23720 23721 /* 23722 * This is not a performance critical code path. 23723 * 23724 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23725 * issues associated with looking at DMA memory prior to 23726 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23727 */ 23728 scsi_sync_pkt(pkt); 23729 return (0); 23730 } 23731 23732 23733 23734 /* 23735 * Function: sd_persistent_reservation_in_read_keys 23736 * 23737 * Description: This routine is the driver entry point for handling CD-ROM 23738 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23739 * by sending the SCSI-3 PRIN commands to the device. 23740 * Processes the read keys command response by copying the 23741 * reservation key information into the user provided buffer. 23742 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23743 * 23744 * Arguments: un - Pointer to soft state struct for the target. 23745 * usrp - user provided pointer to multihost Persistent In Read 23746 * Keys structure (mhioc_inkeys_t) 23747 * flag - this argument is a pass through to ddi_copyxxx() 23748 * directly from the mode argument of ioctl(). 23749 * 23750 * Return Code: 0 - Success 23751 * EACCES 23752 * ENOTSUP 23753 * errno return code from sd_send_scsi_cmd() 23754 * 23755 * Context: Can sleep. Does not return until command is completed. 23756 */ 23757 23758 static int 23759 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23760 mhioc_inkeys_t *usrp, int flag) 23761 { 23762 #ifdef _MULTI_DATAMODEL 23763 struct mhioc_key_list32 li32; 23764 #endif 23765 sd_prin_readkeys_t *in; 23766 mhioc_inkeys_t *ptr; 23767 mhioc_key_list_t li; 23768 uchar_t *data_bufp; 23769 int data_len; 23770 int rval; 23771 size_t copysz; 23772 23773 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23774 return (EINVAL); 23775 } 23776 bzero(&li, sizeof (mhioc_key_list_t)); 23777 23778 /* 23779 * Get the listsize from user 23780 */ 23781 #ifdef _MULTI_DATAMODEL 23782 23783 switch (ddi_model_convert_from(flag & FMODELS)) { 23784 case DDI_MODEL_ILP32: 23785 copysz = sizeof (struct mhioc_key_list32); 23786 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23787 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23788 "sd_persistent_reservation_in_read_keys: " 23789 "failed ddi_copyin: mhioc_key_list32_t\n"); 23790 rval = EFAULT; 23791 goto done; 23792 } 23793 li.listsize = li32.listsize; 23794 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23795 break; 23796 23797 case DDI_MODEL_NONE: 23798 copysz = sizeof (mhioc_key_list_t); 23799 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23800 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23801 "sd_persistent_reservation_in_read_keys: " 23802 "failed ddi_copyin: mhioc_key_list_t\n"); 23803 rval = EFAULT; 23804 goto done; 23805 } 23806 break; 23807 } 23808 23809 #else /* ! _MULTI_DATAMODEL */ 23810 copysz = sizeof (mhioc_key_list_t); 23811 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23812 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23813 "sd_persistent_reservation_in_read_keys: " 23814 "failed ddi_copyin: mhioc_key_list_t\n"); 23815 rval = EFAULT; 23816 goto done; 23817 } 23818 #endif 23819 23820 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23821 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23822 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23823 23824 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23825 data_len, data_bufp)) != 0) { 23826 goto done; 23827 } 23828 in = (sd_prin_readkeys_t *)data_bufp; 23829 ptr->generation = BE_32(in->generation); 23830 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23831 23832 /* 23833 * Return the min(listsize, listlen) keys 23834 */ 23835 #ifdef _MULTI_DATAMODEL 23836 23837 switch (ddi_model_convert_from(flag & FMODELS)) { 23838 case DDI_MODEL_ILP32: 23839 li32.listlen = li.listlen; 23840 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23841 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23842 "sd_persistent_reservation_in_read_keys: " 23843 "failed ddi_copyout: mhioc_key_list32_t\n"); 23844 rval = EFAULT; 23845 goto done; 23846 } 23847 break; 23848 23849 case DDI_MODEL_NONE: 23850 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23851 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23852 "sd_persistent_reservation_in_read_keys: " 23853 "failed ddi_copyout: mhioc_key_list_t\n"); 23854 rval = EFAULT; 23855 goto done; 23856 } 23857 break; 23858 } 23859 23860 #else /* ! _MULTI_DATAMODEL */ 23861 23862 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23863 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23864 "sd_persistent_reservation_in_read_keys: " 23865 "failed ddi_copyout: mhioc_key_list_t\n"); 23866 rval = EFAULT; 23867 goto done; 23868 } 23869 23870 #endif /* _MULTI_DATAMODEL */ 23871 23872 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23873 li.listsize * MHIOC_RESV_KEY_SIZE); 23874 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23875 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23876 "sd_persistent_reservation_in_read_keys: " 23877 "failed ddi_copyout: keylist\n"); 23878 rval = EFAULT; 23879 } 23880 done: 23881 kmem_free(data_bufp, data_len); 23882 return (rval); 23883 } 23884 23885 23886 /* 23887 * Function: sd_persistent_reservation_in_read_resv 23888 * 23889 * Description: This routine is the driver entry point for handling CD-ROM 23890 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23891 * by sending the SCSI-3 PRIN commands to the device. 23892 * Process the read persistent reservations command response by 23893 * copying the reservation information into the user provided 23894 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23895 * 23896 * Arguments: un - Pointer to soft state struct for the target. 23897 * usrp - user provided pointer to multihost Persistent In Read 23898 * Keys structure (mhioc_inkeys_t) 23899 * flag - this argument is a pass through to ddi_copyxxx() 23900 * directly from the mode argument of ioctl(). 23901 * 23902 * Return Code: 0 - Success 23903 * EACCES 23904 * ENOTSUP 23905 * errno return code from sd_send_scsi_cmd() 23906 * 23907 * Context: Can sleep. Does not return until command is completed. 23908 */ 23909 23910 static int 23911 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23912 mhioc_inresvs_t *usrp, int flag) 23913 { 23914 #ifdef _MULTI_DATAMODEL 23915 struct mhioc_resv_desc_list32 resvlist32; 23916 #endif 23917 sd_prin_readresv_t *in; 23918 mhioc_inresvs_t *ptr; 23919 sd_readresv_desc_t *readresv_ptr; 23920 mhioc_resv_desc_list_t resvlist; 23921 mhioc_resv_desc_t resvdesc; 23922 uchar_t *data_bufp; 23923 int data_len; 23924 int rval; 23925 int i; 23926 size_t copysz; 23927 mhioc_resv_desc_t *bufp; 23928 23929 if ((ptr = usrp) == NULL) { 23930 return (EINVAL); 23931 } 23932 23933 /* 23934 * Get the listsize from user 23935 */ 23936 #ifdef _MULTI_DATAMODEL 23937 switch (ddi_model_convert_from(flag & FMODELS)) { 23938 case DDI_MODEL_ILP32: 23939 copysz = sizeof (struct mhioc_resv_desc_list32); 23940 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23941 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23942 "sd_persistent_reservation_in_read_resv: " 23943 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23944 rval = EFAULT; 23945 goto done; 23946 } 23947 resvlist.listsize = resvlist32.listsize; 23948 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23949 break; 23950 23951 case DDI_MODEL_NONE: 23952 copysz = sizeof (mhioc_resv_desc_list_t); 23953 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23954 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23955 "sd_persistent_reservation_in_read_resv: " 23956 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23957 rval = EFAULT; 23958 goto done; 23959 } 23960 break; 23961 } 23962 #else /* ! _MULTI_DATAMODEL */ 23963 copysz = sizeof (mhioc_resv_desc_list_t); 23964 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23965 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23966 "sd_persistent_reservation_in_read_resv: " 23967 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23968 rval = EFAULT; 23969 goto done; 23970 } 23971 #endif /* ! _MULTI_DATAMODEL */ 23972 23973 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23974 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23975 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23976 23977 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23978 data_len, data_bufp)) != 0) { 23979 goto done; 23980 } 23981 in = (sd_prin_readresv_t *)data_bufp; 23982 ptr->generation = BE_32(in->generation); 23983 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23984 23985 /* 23986 * Return the min(listsize, listlen( keys 23987 */ 23988 #ifdef _MULTI_DATAMODEL 23989 23990 switch (ddi_model_convert_from(flag & FMODELS)) { 23991 case DDI_MODEL_ILP32: 23992 resvlist32.listlen = resvlist.listlen; 23993 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23994 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23995 "sd_persistent_reservation_in_read_resv: " 23996 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23997 rval = EFAULT; 23998 goto done; 23999 } 24000 break; 24001 24002 case DDI_MODEL_NONE: 24003 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24004 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24005 "sd_persistent_reservation_in_read_resv: " 24006 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24007 rval = EFAULT; 24008 goto done; 24009 } 24010 break; 24011 } 24012 24013 #else /* ! _MULTI_DATAMODEL */ 24014 24015 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24016 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24017 "sd_persistent_reservation_in_read_resv: " 24018 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24019 rval = EFAULT; 24020 goto done; 24021 } 24022 24023 #endif /* ! _MULTI_DATAMODEL */ 24024 24025 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24026 bufp = resvlist.list; 24027 copysz = sizeof (mhioc_resv_desc_t); 24028 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24029 i++, readresv_ptr++, bufp++) { 24030 24031 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24032 MHIOC_RESV_KEY_SIZE); 24033 resvdesc.type = readresv_ptr->type; 24034 resvdesc.scope = readresv_ptr->scope; 24035 resvdesc.scope_specific_addr = 24036 BE_32(readresv_ptr->scope_specific_addr); 24037 24038 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24039 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24040 "sd_persistent_reservation_in_read_resv: " 24041 "failed ddi_copyout: resvlist\n"); 24042 rval = EFAULT; 24043 goto done; 24044 } 24045 } 24046 done: 24047 kmem_free(data_bufp, data_len); 24048 return (rval); 24049 } 24050 24051 24052 /* 24053 * Function: sr_change_blkmode() 24054 * 24055 * Description: This routine is the driver entry point for handling CD-ROM 24056 * block mode ioctl requests. Support for returning and changing 24057 * the current block size in use by the device is implemented. The 24058 * LBA size is changed via a MODE SELECT Block Descriptor. 24059 * 24060 * This routine issues a mode sense with an allocation length of 24061 * 12 bytes for the mode page header and a single block descriptor. 24062 * 24063 * Arguments: dev - the device 'dev_t' 24064 * cmd - the request type; one of CDROMGBLKMODE (get) or 24065 * CDROMSBLKMODE (set) 24066 * data - current block size or requested block size 24067 * flag - this argument is a pass through to ddi_copyxxx() directly 24068 * from the mode argument of ioctl(). 24069 * 24070 * Return Code: the code returned by sd_send_scsi_cmd() 24071 * EINVAL if invalid arguments are provided 24072 * EFAULT if ddi_copyxxx() fails 24073 * ENXIO if fail ddi_get_soft_state 24074 * EIO if invalid mode sense block descriptor length 24075 * 24076 */ 24077 24078 static int 24079 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24080 { 24081 struct sd_lun *un = NULL; 24082 struct mode_header *sense_mhp, *select_mhp; 24083 struct block_descriptor *sense_desc, *select_desc; 24084 int current_bsize; 24085 int rval = EINVAL; 24086 uchar_t *sense = NULL; 24087 uchar_t *select = NULL; 24088 24089 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24090 24091 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24092 return (ENXIO); 24093 } 24094 24095 /* 24096 * The block length is changed via the Mode Select block descriptor, the 24097 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24098 * required as part of this routine. Therefore the mode sense allocation 24099 * length is specified to be the length of a mode page header and a 24100 * block descriptor. 24101 */ 24102 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24103 24104 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24105 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24106 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24107 "sr_change_blkmode: Mode Sense Failed\n"); 24108 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24109 return (rval); 24110 } 24111 24112 /* Check the block descriptor len to handle only 1 block descriptor */ 24113 sense_mhp = (struct mode_header *)sense; 24114 if ((sense_mhp->bdesc_length == 0) || 24115 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24116 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24117 "sr_change_blkmode: Mode Sense returned invalid block" 24118 " descriptor length\n"); 24119 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24120 return (EIO); 24121 } 24122 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24123 current_bsize = ((sense_desc->blksize_hi << 16) | 24124 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24125 24126 /* Process command */ 24127 switch (cmd) { 24128 case CDROMGBLKMODE: 24129 /* Return the block size obtained during the mode sense */ 24130 if (ddi_copyout(¤t_bsize, (void *)data, 24131 sizeof (int), flag) != 0) 24132 rval = EFAULT; 24133 break; 24134 case CDROMSBLKMODE: 24135 /* Validate the requested block size */ 24136 switch (data) { 24137 case CDROM_BLK_512: 24138 case CDROM_BLK_1024: 24139 case CDROM_BLK_2048: 24140 case CDROM_BLK_2056: 24141 case CDROM_BLK_2336: 24142 case CDROM_BLK_2340: 24143 case CDROM_BLK_2352: 24144 case CDROM_BLK_2368: 24145 case CDROM_BLK_2448: 24146 case CDROM_BLK_2646: 24147 case CDROM_BLK_2647: 24148 break; 24149 default: 24150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24151 "sr_change_blkmode: " 24152 "Block Size '%ld' Not Supported\n", data); 24153 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24154 return (EINVAL); 24155 } 24156 24157 /* 24158 * The current block size matches the requested block size so 24159 * there is no need to send the mode select to change the size 24160 */ 24161 if (current_bsize == data) { 24162 break; 24163 } 24164 24165 /* Build the select data for the requested block size */ 24166 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24167 select_mhp = (struct mode_header *)select; 24168 select_desc = 24169 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24170 /* 24171 * The LBA size is changed via the block descriptor, so the 24172 * descriptor is built according to the user data 24173 */ 24174 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24175 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24176 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24177 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24178 24179 /* Send the mode select for the requested block size */ 24180 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24181 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24182 SD_PATH_STANDARD)) != 0) { 24183 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24184 "sr_change_blkmode: Mode Select Failed\n"); 24185 /* 24186 * The mode select failed for the requested block size, 24187 * so reset the data for the original block size and 24188 * send it to the target. The error is indicated by the 24189 * return value for the failed mode select. 24190 */ 24191 select_desc->blksize_hi = sense_desc->blksize_hi; 24192 select_desc->blksize_mid = sense_desc->blksize_mid; 24193 select_desc->blksize_lo = sense_desc->blksize_lo; 24194 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24195 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24196 SD_PATH_STANDARD); 24197 } else { 24198 ASSERT(!mutex_owned(SD_MUTEX(un))); 24199 mutex_enter(SD_MUTEX(un)); 24200 sd_update_block_info(un, (uint32_t)data, 0); 24201 mutex_exit(SD_MUTEX(un)); 24202 } 24203 break; 24204 default: 24205 /* should not reach here, but check anyway */ 24206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24207 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24208 rval = EINVAL; 24209 break; 24210 } 24211 24212 if (select) { 24213 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24214 } 24215 if (sense) { 24216 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24217 } 24218 return (rval); 24219 } 24220 24221 24222 /* 24223 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24224 * implement driver support for getting and setting the CD speed. The command 24225 * set used will be based on the device type. If the device has not been 24226 * identified as MMC the Toshiba vendor specific mode page will be used. If 24227 * the device is MMC but does not support the Real Time Streaming feature 24228 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24229 * be used to read the speed. 24230 */ 24231 24232 /* 24233 * Function: sr_change_speed() 24234 * 24235 * Description: This routine is the driver entry point for handling CD-ROM 24236 * drive speed ioctl requests for devices supporting the Toshiba 24237 * vendor specific drive speed mode page. Support for returning 24238 * and changing the current drive speed in use by the device is 24239 * implemented. 24240 * 24241 * Arguments: dev - the device 'dev_t' 24242 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24243 * CDROMSDRVSPEED (set) 24244 * data - current drive speed or requested drive speed 24245 * flag - this argument is a pass through to ddi_copyxxx() directly 24246 * from the mode argument of ioctl(). 24247 * 24248 * Return Code: the code returned by sd_send_scsi_cmd() 24249 * EINVAL if invalid arguments are provided 24250 * EFAULT if ddi_copyxxx() fails 24251 * ENXIO if fail ddi_get_soft_state 24252 * EIO if invalid mode sense block descriptor length 24253 */ 24254 24255 static int 24256 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24257 { 24258 struct sd_lun *un = NULL; 24259 struct mode_header *sense_mhp, *select_mhp; 24260 struct mode_speed *sense_page, *select_page; 24261 int current_speed; 24262 int rval = EINVAL; 24263 int bd_len; 24264 uchar_t *sense = NULL; 24265 uchar_t *select = NULL; 24266 24267 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24268 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24269 return (ENXIO); 24270 } 24271 24272 /* 24273 * Note: The drive speed is being modified here according to a Toshiba 24274 * vendor specific mode page (0x31). 24275 */ 24276 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24277 24278 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24279 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24280 SD_PATH_STANDARD)) != 0) { 24281 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24282 "sr_change_speed: Mode Sense Failed\n"); 24283 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24284 return (rval); 24285 } 24286 sense_mhp = (struct mode_header *)sense; 24287 24288 /* Check the block descriptor len to handle only 1 block descriptor */ 24289 bd_len = sense_mhp->bdesc_length; 24290 if (bd_len > MODE_BLK_DESC_LENGTH) { 24291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24292 "sr_change_speed: Mode Sense returned invalid block " 24293 "descriptor length\n"); 24294 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24295 return (EIO); 24296 } 24297 24298 sense_page = (struct mode_speed *) 24299 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24300 current_speed = sense_page->speed; 24301 24302 /* Process command */ 24303 switch (cmd) { 24304 case CDROMGDRVSPEED: 24305 /* Return the drive speed obtained during the mode sense */ 24306 if (current_speed == 0x2) { 24307 current_speed = CDROM_TWELVE_SPEED; 24308 } 24309 if (ddi_copyout(¤t_speed, (void *)data, 24310 sizeof (int), flag) != 0) { 24311 rval = EFAULT; 24312 } 24313 break; 24314 case CDROMSDRVSPEED: 24315 /* Validate the requested drive speed */ 24316 switch ((uchar_t)data) { 24317 case CDROM_TWELVE_SPEED: 24318 data = 0x2; 24319 /*FALLTHROUGH*/ 24320 case CDROM_NORMAL_SPEED: 24321 case CDROM_DOUBLE_SPEED: 24322 case CDROM_QUAD_SPEED: 24323 case CDROM_MAXIMUM_SPEED: 24324 break; 24325 default: 24326 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24327 "sr_change_speed: " 24328 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24329 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24330 return (EINVAL); 24331 } 24332 24333 /* 24334 * The current drive speed matches the requested drive speed so 24335 * there is no need to send the mode select to change the speed 24336 */ 24337 if (current_speed == data) { 24338 break; 24339 } 24340 24341 /* Build the select data for the requested drive speed */ 24342 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24343 select_mhp = (struct mode_header *)select; 24344 select_mhp->bdesc_length = 0; 24345 select_page = 24346 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24347 select_page = 24348 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24349 select_page->mode_page.code = CDROM_MODE_SPEED; 24350 select_page->mode_page.length = 2; 24351 select_page->speed = (uchar_t)data; 24352 24353 /* Send the mode select for the requested block size */ 24354 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24355 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24356 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24357 /* 24358 * The mode select failed for the requested drive speed, 24359 * so reset the data for the original drive speed and 24360 * send it to the target. The error is indicated by the 24361 * return value for the failed mode select. 24362 */ 24363 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24364 "sr_drive_speed: Mode Select Failed\n"); 24365 select_page->speed = sense_page->speed; 24366 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24367 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24368 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24369 } 24370 break; 24371 default: 24372 /* should not reach here, but check anyway */ 24373 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24374 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24375 rval = EINVAL; 24376 break; 24377 } 24378 24379 if (select) { 24380 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24381 } 24382 if (sense) { 24383 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24384 } 24385 24386 return (rval); 24387 } 24388 24389 24390 /* 24391 * Function: sr_atapi_change_speed() 24392 * 24393 * Description: This routine is the driver entry point for handling CD-ROM 24394 * drive speed ioctl requests for MMC devices that do not support 24395 * the Real Time Streaming feature (0x107). 24396 * 24397 * Note: This routine will use the SET SPEED command which may not 24398 * be supported by all devices. 24399 * 24400 * Arguments: dev- the device 'dev_t' 24401 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24402 * CDROMSDRVSPEED (set) 24403 * data- current drive speed or requested drive speed 24404 * flag- this argument is a pass through to ddi_copyxxx() directly 24405 * from the mode argument of ioctl(). 24406 * 24407 * Return Code: the code returned by sd_send_scsi_cmd() 24408 * EINVAL if invalid arguments are provided 24409 * EFAULT if ddi_copyxxx() fails 24410 * ENXIO if fail ddi_get_soft_state 24411 * EIO if invalid mode sense block descriptor length 24412 */ 24413 24414 static int 24415 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24416 { 24417 struct sd_lun *un; 24418 struct uscsi_cmd *com = NULL; 24419 struct mode_header_grp2 *sense_mhp; 24420 uchar_t *sense_page; 24421 uchar_t *sense = NULL; 24422 char cdb[CDB_GROUP5]; 24423 int bd_len; 24424 int current_speed = 0; 24425 int max_speed = 0; 24426 int rval; 24427 24428 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24429 24430 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24431 return (ENXIO); 24432 } 24433 24434 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24435 24436 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24437 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24438 SD_PATH_STANDARD)) != 0) { 24439 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24440 "sr_atapi_change_speed: Mode Sense Failed\n"); 24441 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24442 return (rval); 24443 } 24444 24445 /* Check the block descriptor len to handle only 1 block descriptor */ 24446 sense_mhp = (struct mode_header_grp2 *)sense; 24447 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24448 if (bd_len > MODE_BLK_DESC_LENGTH) { 24449 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24450 "sr_atapi_change_speed: Mode Sense returned invalid " 24451 "block descriptor length\n"); 24452 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24453 return (EIO); 24454 } 24455 24456 /* Calculate the current and maximum drive speeds */ 24457 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24458 current_speed = (sense_page[14] << 8) | sense_page[15]; 24459 max_speed = (sense_page[8] << 8) | sense_page[9]; 24460 24461 /* Process the command */ 24462 switch (cmd) { 24463 case CDROMGDRVSPEED: 24464 current_speed /= SD_SPEED_1X; 24465 if (ddi_copyout(¤t_speed, (void *)data, 24466 sizeof (int), flag) != 0) 24467 rval = EFAULT; 24468 break; 24469 case CDROMSDRVSPEED: 24470 /* Convert the speed code to KB/sec */ 24471 switch ((uchar_t)data) { 24472 case CDROM_NORMAL_SPEED: 24473 current_speed = SD_SPEED_1X; 24474 break; 24475 case CDROM_DOUBLE_SPEED: 24476 current_speed = 2 * SD_SPEED_1X; 24477 break; 24478 case CDROM_QUAD_SPEED: 24479 current_speed = 4 * SD_SPEED_1X; 24480 break; 24481 case CDROM_TWELVE_SPEED: 24482 current_speed = 12 * SD_SPEED_1X; 24483 break; 24484 case CDROM_MAXIMUM_SPEED: 24485 current_speed = 0xffff; 24486 break; 24487 default: 24488 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24489 "sr_atapi_change_speed: invalid drive speed %d\n", 24490 (uchar_t)data); 24491 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24492 return (EINVAL); 24493 } 24494 24495 /* Check the request against the drive's max speed. */ 24496 if (current_speed != 0xffff) { 24497 if (current_speed > max_speed) { 24498 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24499 return (EINVAL); 24500 } 24501 } 24502 24503 /* 24504 * Build and send the SET SPEED command 24505 * 24506 * Note: The SET SPEED (0xBB) command used in this routine is 24507 * obsolete per the SCSI MMC spec but still supported in the 24508 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24509 * therefore the command is still implemented in this routine. 24510 */ 24511 bzero(cdb, sizeof (cdb)); 24512 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24513 cdb[2] = (uchar_t)(current_speed >> 8); 24514 cdb[3] = (uchar_t)current_speed; 24515 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24516 com->uscsi_cdb = (caddr_t)cdb; 24517 com->uscsi_cdblen = CDB_GROUP5; 24518 com->uscsi_bufaddr = NULL; 24519 com->uscsi_buflen = 0; 24520 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24521 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24522 break; 24523 default: 24524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24525 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24526 rval = EINVAL; 24527 } 24528 24529 if (sense) { 24530 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24531 } 24532 if (com) { 24533 kmem_free(com, sizeof (*com)); 24534 } 24535 return (rval); 24536 } 24537 24538 24539 /* 24540 * Function: sr_pause_resume() 24541 * 24542 * Description: This routine is the driver entry point for handling CD-ROM 24543 * pause/resume ioctl requests. This only affects the audio play 24544 * operation. 24545 * 24546 * Arguments: dev - the device 'dev_t' 24547 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24548 * for setting the resume bit of the cdb. 24549 * 24550 * Return Code: the code returned by sd_send_scsi_cmd() 24551 * EINVAL if invalid mode specified 24552 * 24553 */ 24554 24555 static int 24556 sr_pause_resume(dev_t dev, int cmd) 24557 { 24558 struct sd_lun *un; 24559 struct uscsi_cmd *com; 24560 char cdb[CDB_GROUP1]; 24561 int rval; 24562 24563 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24564 return (ENXIO); 24565 } 24566 24567 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24568 bzero(cdb, CDB_GROUP1); 24569 cdb[0] = SCMD_PAUSE_RESUME; 24570 switch (cmd) { 24571 case CDROMRESUME: 24572 cdb[8] = 1; 24573 break; 24574 case CDROMPAUSE: 24575 cdb[8] = 0; 24576 break; 24577 default: 24578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24579 " Command '%x' Not Supported\n", cmd); 24580 rval = EINVAL; 24581 goto done; 24582 } 24583 24584 com->uscsi_cdb = cdb; 24585 com->uscsi_cdblen = CDB_GROUP1; 24586 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24587 24588 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24589 SD_PATH_STANDARD); 24590 24591 done: 24592 kmem_free(com, sizeof (*com)); 24593 return (rval); 24594 } 24595 24596 24597 /* 24598 * Function: sr_play_msf() 24599 * 24600 * Description: This routine is the driver entry point for handling CD-ROM 24601 * ioctl requests to output the audio signals at the specified 24602 * starting address and continue the audio play until the specified 24603 * ending address (CDROMPLAYMSF) The address is in Minute Second 24604 * Frame (MSF) format. 24605 * 24606 * Arguments: dev - the device 'dev_t' 24607 * data - pointer to user provided audio msf structure, 24608 * specifying start/end addresses. 24609 * flag - this argument is a pass through to ddi_copyxxx() 24610 * directly from the mode argument of ioctl(). 24611 * 24612 * Return Code: the code returned by sd_send_scsi_cmd() 24613 * EFAULT if ddi_copyxxx() fails 24614 * ENXIO if fail ddi_get_soft_state 24615 * EINVAL if data pointer is NULL 24616 */ 24617 24618 static int 24619 sr_play_msf(dev_t dev, caddr_t data, int flag) 24620 { 24621 struct sd_lun *un; 24622 struct uscsi_cmd *com; 24623 struct cdrom_msf msf_struct; 24624 struct cdrom_msf *msf = &msf_struct; 24625 char cdb[CDB_GROUP1]; 24626 int rval; 24627 24628 if (data == NULL) { 24629 return (EINVAL); 24630 } 24631 24632 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24633 return (ENXIO); 24634 } 24635 24636 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24637 return (EFAULT); 24638 } 24639 24640 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24641 bzero(cdb, CDB_GROUP1); 24642 cdb[0] = SCMD_PLAYAUDIO_MSF; 24643 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24644 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24645 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24646 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24647 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24648 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24649 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24650 } else { 24651 cdb[3] = msf->cdmsf_min0; 24652 cdb[4] = msf->cdmsf_sec0; 24653 cdb[5] = msf->cdmsf_frame0; 24654 cdb[6] = msf->cdmsf_min1; 24655 cdb[7] = msf->cdmsf_sec1; 24656 cdb[8] = msf->cdmsf_frame1; 24657 } 24658 com->uscsi_cdb = cdb; 24659 com->uscsi_cdblen = CDB_GROUP1; 24660 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24661 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24662 SD_PATH_STANDARD); 24663 kmem_free(com, sizeof (*com)); 24664 return (rval); 24665 } 24666 24667 24668 /* 24669 * Function: sr_play_trkind() 24670 * 24671 * Description: This routine is the driver entry point for handling CD-ROM 24672 * ioctl requests to output the audio signals at the specified 24673 * starting address and continue the audio play until the specified 24674 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24675 * format. 24676 * 24677 * Arguments: dev - the device 'dev_t' 24678 * data - pointer to user provided audio track/index structure, 24679 * specifying start/end addresses. 24680 * flag - this argument is a pass through to ddi_copyxxx() 24681 * directly from the mode argument of ioctl(). 24682 * 24683 * Return Code: the code returned by sd_send_scsi_cmd() 24684 * EFAULT if ddi_copyxxx() fails 24685 * ENXIO if fail ddi_get_soft_state 24686 * EINVAL if data pointer is NULL 24687 */ 24688 24689 static int 24690 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24691 { 24692 struct cdrom_ti ti_struct; 24693 struct cdrom_ti *ti = &ti_struct; 24694 struct uscsi_cmd *com = NULL; 24695 char cdb[CDB_GROUP1]; 24696 int rval; 24697 24698 if (data == NULL) { 24699 return (EINVAL); 24700 } 24701 24702 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24703 return (EFAULT); 24704 } 24705 24706 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24707 bzero(cdb, CDB_GROUP1); 24708 cdb[0] = SCMD_PLAYAUDIO_TI; 24709 cdb[4] = ti->cdti_trk0; 24710 cdb[5] = ti->cdti_ind0; 24711 cdb[7] = ti->cdti_trk1; 24712 cdb[8] = ti->cdti_ind1; 24713 com->uscsi_cdb = cdb; 24714 com->uscsi_cdblen = CDB_GROUP1; 24715 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24716 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24717 SD_PATH_STANDARD); 24718 kmem_free(com, sizeof (*com)); 24719 return (rval); 24720 } 24721 24722 24723 /* 24724 * Function: sr_read_all_subcodes() 24725 * 24726 * Description: This routine is the driver entry point for handling CD-ROM 24727 * ioctl requests to return raw subcode data while the target is 24728 * playing audio (CDROMSUBCODE). 24729 * 24730 * Arguments: dev - the device 'dev_t' 24731 * data - pointer to user provided cdrom subcode structure, 24732 * specifying the transfer length and address. 24733 * flag - this argument is a pass through to ddi_copyxxx() 24734 * directly from the mode argument of ioctl(). 24735 * 24736 * Return Code: the code returned by sd_send_scsi_cmd() 24737 * EFAULT if ddi_copyxxx() fails 24738 * ENXIO if fail ddi_get_soft_state 24739 * EINVAL if data pointer is NULL 24740 */ 24741 24742 static int 24743 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24744 { 24745 struct sd_lun *un = NULL; 24746 struct uscsi_cmd *com = NULL; 24747 struct cdrom_subcode *subcode = NULL; 24748 int rval; 24749 size_t buflen; 24750 char cdb[CDB_GROUP5]; 24751 24752 #ifdef _MULTI_DATAMODEL 24753 /* To support ILP32 applications in an LP64 world */ 24754 struct cdrom_subcode32 cdrom_subcode32; 24755 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24756 #endif 24757 if (data == NULL) { 24758 return (EINVAL); 24759 } 24760 24761 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24762 return (ENXIO); 24763 } 24764 24765 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24766 24767 #ifdef _MULTI_DATAMODEL 24768 switch (ddi_model_convert_from(flag & FMODELS)) { 24769 case DDI_MODEL_ILP32: 24770 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24772 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24773 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24774 return (EFAULT); 24775 } 24776 /* Convert the ILP32 uscsi data from the application to LP64 */ 24777 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24778 break; 24779 case DDI_MODEL_NONE: 24780 if (ddi_copyin(data, subcode, 24781 sizeof (struct cdrom_subcode), flag)) { 24782 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24783 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24784 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24785 return (EFAULT); 24786 } 24787 break; 24788 } 24789 #else /* ! _MULTI_DATAMODEL */ 24790 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24791 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24792 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24793 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24794 return (EFAULT); 24795 } 24796 #endif /* _MULTI_DATAMODEL */ 24797 24798 /* 24799 * Since MMC-2 expects max 3 bytes for length, check if the 24800 * length input is greater than 3 bytes 24801 */ 24802 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24803 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24804 "sr_read_all_subcodes: " 24805 "cdrom transfer length too large: %d (limit %d)\n", 24806 subcode->cdsc_length, 0xFFFFFF); 24807 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24808 return (EINVAL); 24809 } 24810 24811 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24812 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24813 bzero(cdb, CDB_GROUP5); 24814 24815 if (un->un_f_mmc_cap == TRUE) { 24816 cdb[0] = (char)SCMD_READ_CD; 24817 cdb[2] = (char)0xff; 24818 cdb[3] = (char)0xff; 24819 cdb[4] = (char)0xff; 24820 cdb[5] = (char)0xff; 24821 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24822 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24823 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24824 cdb[10] = 1; 24825 } else { 24826 /* 24827 * Note: A vendor specific command (0xDF) is being used her to 24828 * request a read of all subcodes. 24829 */ 24830 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24831 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24832 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24833 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24834 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24835 } 24836 com->uscsi_cdb = cdb; 24837 com->uscsi_cdblen = CDB_GROUP5; 24838 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24839 com->uscsi_buflen = buflen; 24840 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24841 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24842 SD_PATH_STANDARD); 24843 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24844 kmem_free(com, sizeof (*com)); 24845 return (rval); 24846 } 24847 24848 24849 /* 24850 * Function: sr_read_subchannel() 24851 * 24852 * Description: This routine is the driver entry point for handling CD-ROM 24853 * ioctl requests to return the Q sub-channel data of the CD 24854 * current position block. (CDROMSUBCHNL) The data includes the 24855 * track number, index number, absolute CD-ROM address (LBA or MSF 24856 * format per the user) , track relative CD-ROM address (LBA or MSF 24857 * format per the user), control data and audio status. 24858 * 24859 * Arguments: dev - the device 'dev_t' 24860 * data - pointer to user provided cdrom sub-channel structure 24861 * flag - this argument is a pass through to ddi_copyxxx() 24862 * directly from the mode argument of ioctl(). 24863 * 24864 * Return Code: the code returned by sd_send_scsi_cmd() 24865 * EFAULT if ddi_copyxxx() fails 24866 * ENXIO if fail ddi_get_soft_state 24867 * EINVAL if data pointer is NULL 24868 */ 24869 24870 static int 24871 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24872 { 24873 struct sd_lun *un; 24874 struct uscsi_cmd *com; 24875 struct cdrom_subchnl subchanel; 24876 struct cdrom_subchnl *subchnl = &subchanel; 24877 char cdb[CDB_GROUP1]; 24878 caddr_t buffer; 24879 int rval; 24880 24881 if (data == NULL) { 24882 return (EINVAL); 24883 } 24884 24885 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24886 (un->un_state == SD_STATE_OFFLINE)) { 24887 return (ENXIO); 24888 } 24889 24890 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24891 return (EFAULT); 24892 } 24893 24894 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24895 bzero(cdb, CDB_GROUP1); 24896 cdb[0] = SCMD_READ_SUBCHANNEL; 24897 /* Set the MSF bit based on the user requested address format */ 24898 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24899 /* 24900 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24901 * returned 24902 */ 24903 cdb[2] = 0x40; 24904 /* 24905 * Set byte 3 to specify the return data format. A value of 0x01 24906 * indicates that the CD-ROM current position should be returned. 24907 */ 24908 cdb[3] = 0x01; 24909 cdb[8] = 0x10; 24910 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24911 com->uscsi_cdb = cdb; 24912 com->uscsi_cdblen = CDB_GROUP1; 24913 com->uscsi_bufaddr = buffer; 24914 com->uscsi_buflen = 16; 24915 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24916 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24917 SD_PATH_STANDARD); 24918 if (rval != 0) { 24919 kmem_free(buffer, 16); 24920 kmem_free(com, sizeof (*com)); 24921 return (rval); 24922 } 24923 24924 /* Process the returned Q sub-channel data */ 24925 subchnl->cdsc_audiostatus = buffer[1]; 24926 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24927 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24928 subchnl->cdsc_trk = buffer[6]; 24929 subchnl->cdsc_ind = buffer[7]; 24930 if (subchnl->cdsc_format & CDROM_LBA) { 24931 subchnl->cdsc_absaddr.lba = 24932 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24933 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24934 subchnl->cdsc_reladdr.lba = 24935 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24936 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24937 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24938 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24939 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24940 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24941 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24942 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24943 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24944 } else { 24945 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24946 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24947 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24948 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24949 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24950 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24951 } 24952 kmem_free(buffer, 16); 24953 kmem_free(com, sizeof (*com)); 24954 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24955 != 0) { 24956 return (EFAULT); 24957 } 24958 return (rval); 24959 } 24960 24961 24962 /* 24963 * Function: sr_read_tocentry() 24964 * 24965 * Description: This routine is the driver entry point for handling CD-ROM 24966 * ioctl requests to read from the Table of Contents (TOC) 24967 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24968 * fields, the starting address (LBA or MSF format per the user) 24969 * and the data mode if the user specified track is a data track. 24970 * 24971 * Note: The READ HEADER (0x44) command used in this routine is 24972 * obsolete per the SCSI MMC spec but still supported in the 24973 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24974 * therefore the command is still implemented in this routine. 24975 * 24976 * Arguments: dev - the device 'dev_t' 24977 * data - pointer to user provided toc entry structure, 24978 * specifying the track # and the address format 24979 * (LBA or MSF). 24980 * flag - this argument is a pass through to ddi_copyxxx() 24981 * directly from the mode argument of ioctl(). 24982 * 24983 * Return Code: the code returned by sd_send_scsi_cmd() 24984 * EFAULT if ddi_copyxxx() fails 24985 * ENXIO if fail ddi_get_soft_state 24986 * EINVAL if data pointer is NULL 24987 */ 24988 24989 static int 24990 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24991 { 24992 struct sd_lun *un = NULL; 24993 struct uscsi_cmd *com; 24994 struct cdrom_tocentry toc_entry; 24995 struct cdrom_tocentry *entry = &toc_entry; 24996 caddr_t buffer; 24997 int rval; 24998 char cdb[CDB_GROUP1]; 24999 25000 if (data == NULL) { 25001 return (EINVAL); 25002 } 25003 25004 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25005 (un->un_state == SD_STATE_OFFLINE)) { 25006 return (ENXIO); 25007 } 25008 25009 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25010 return (EFAULT); 25011 } 25012 25013 /* Validate the requested track and address format */ 25014 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25015 return (EINVAL); 25016 } 25017 25018 if (entry->cdte_track == 0) { 25019 return (EINVAL); 25020 } 25021 25022 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25023 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25024 bzero(cdb, CDB_GROUP1); 25025 25026 cdb[0] = SCMD_READ_TOC; 25027 /* Set the MSF bit based on the user requested address format */ 25028 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25029 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25030 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25031 } else { 25032 cdb[6] = entry->cdte_track; 25033 } 25034 25035 /* 25036 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25037 * (4 byte TOC response header + 8 byte track descriptor) 25038 */ 25039 cdb[8] = 12; 25040 com->uscsi_cdb = cdb; 25041 com->uscsi_cdblen = CDB_GROUP1; 25042 com->uscsi_bufaddr = buffer; 25043 com->uscsi_buflen = 0x0C; 25044 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25045 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25046 SD_PATH_STANDARD); 25047 if (rval != 0) { 25048 kmem_free(buffer, 12); 25049 kmem_free(com, sizeof (*com)); 25050 return (rval); 25051 } 25052 25053 /* Process the toc entry */ 25054 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25055 entry->cdte_ctrl = (buffer[5] & 0x0F); 25056 if (entry->cdte_format & CDROM_LBA) { 25057 entry->cdte_addr.lba = 25058 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25059 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25060 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25061 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25062 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25063 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25064 /* 25065 * Send a READ TOC command using the LBA address format to get 25066 * the LBA for the track requested so it can be used in the 25067 * READ HEADER request 25068 * 25069 * Note: The MSF bit of the READ HEADER command specifies the 25070 * output format. The block address specified in that command 25071 * must be in LBA format. 25072 */ 25073 cdb[1] = 0; 25074 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25075 SD_PATH_STANDARD); 25076 if (rval != 0) { 25077 kmem_free(buffer, 12); 25078 kmem_free(com, sizeof (*com)); 25079 return (rval); 25080 } 25081 } else { 25082 entry->cdte_addr.msf.minute = buffer[9]; 25083 entry->cdte_addr.msf.second = buffer[10]; 25084 entry->cdte_addr.msf.frame = buffer[11]; 25085 /* 25086 * Send a READ TOC command using the LBA address format to get 25087 * the LBA for the track requested so it can be used in the 25088 * READ HEADER request 25089 * 25090 * Note: The MSF bit of the READ HEADER command specifies the 25091 * output format. The block address specified in that command 25092 * must be in LBA format. 25093 */ 25094 cdb[1] = 0; 25095 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25096 SD_PATH_STANDARD); 25097 if (rval != 0) { 25098 kmem_free(buffer, 12); 25099 kmem_free(com, sizeof (*com)); 25100 return (rval); 25101 } 25102 } 25103 25104 /* 25105 * Build and send the READ HEADER command to determine the data mode of 25106 * the user specified track. 25107 */ 25108 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25109 (entry->cdte_track != CDROM_LEADOUT)) { 25110 bzero(cdb, CDB_GROUP1); 25111 cdb[0] = SCMD_READ_HEADER; 25112 cdb[2] = buffer[8]; 25113 cdb[3] = buffer[9]; 25114 cdb[4] = buffer[10]; 25115 cdb[5] = buffer[11]; 25116 cdb[8] = 0x08; 25117 com->uscsi_buflen = 0x08; 25118 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25119 SD_PATH_STANDARD); 25120 if (rval == 0) { 25121 entry->cdte_datamode = buffer[0]; 25122 } else { 25123 /* 25124 * READ HEADER command failed, since this is 25125 * obsoleted in one spec, its better to return 25126 * -1 for an invlid track so that we can still 25127 * receive the rest of the TOC data. 25128 */ 25129 entry->cdte_datamode = (uchar_t)-1; 25130 } 25131 } else { 25132 entry->cdte_datamode = (uchar_t)-1; 25133 } 25134 25135 kmem_free(buffer, 12); 25136 kmem_free(com, sizeof (*com)); 25137 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25138 return (EFAULT); 25139 25140 return (rval); 25141 } 25142 25143 25144 /* 25145 * Function: sr_read_tochdr() 25146 * 25147 * Description: This routine is the driver entry point for handling CD-ROM 25148 * ioctl requests to read the Table of Contents (TOC) header 25149 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25150 * and ending track numbers 25151 * 25152 * Arguments: dev - the device 'dev_t' 25153 * data - pointer to user provided toc header structure, 25154 * specifying the starting and ending track numbers. 25155 * flag - this argument is a pass through to ddi_copyxxx() 25156 * directly from the mode argument of ioctl(). 25157 * 25158 * Return Code: the code returned by sd_send_scsi_cmd() 25159 * EFAULT if ddi_copyxxx() fails 25160 * ENXIO if fail ddi_get_soft_state 25161 * EINVAL if data pointer is NULL 25162 */ 25163 25164 static int 25165 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25166 { 25167 struct sd_lun *un; 25168 struct uscsi_cmd *com; 25169 struct cdrom_tochdr toc_header; 25170 struct cdrom_tochdr *hdr = &toc_header; 25171 char cdb[CDB_GROUP1]; 25172 int rval; 25173 caddr_t buffer; 25174 25175 if (data == NULL) { 25176 return (EINVAL); 25177 } 25178 25179 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25180 (un->un_state == SD_STATE_OFFLINE)) { 25181 return (ENXIO); 25182 } 25183 25184 buffer = kmem_zalloc(4, KM_SLEEP); 25185 bzero(cdb, CDB_GROUP1); 25186 cdb[0] = SCMD_READ_TOC; 25187 /* 25188 * Specifying a track number of 0x00 in the READ TOC command indicates 25189 * that the TOC header should be returned 25190 */ 25191 cdb[6] = 0x00; 25192 /* 25193 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25194 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25195 */ 25196 cdb[8] = 0x04; 25197 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25198 com->uscsi_cdb = cdb; 25199 com->uscsi_cdblen = CDB_GROUP1; 25200 com->uscsi_bufaddr = buffer; 25201 com->uscsi_buflen = 0x04; 25202 com->uscsi_timeout = 300; 25203 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25204 25205 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25206 SD_PATH_STANDARD); 25207 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25208 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25209 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25210 } else { 25211 hdr->cdth_trk0 = buffer[2]; 25212 hdr->cdth_trk1 = buffer[3]; 25213 } 25214 kmem_free(buffer, 4); 25215 kmem_free(com, sizeof (*com)); 25216 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25217 return (EFAULT); 25218 } 25219 return (rval); 25220 } 25221 25222 25223 /* 25224 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25225 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25226 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25227 * digital audio and extended architecture digital audio. These modes are 25228 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25229 * MMC specs. 25230 * 25231 * In addition to support for the various data formats these routines also 25232 * include support for devices that implement only the direct access READ 25233 * commands (0x08, 0x28), devices that implement the READ_CD commands 25234 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25235 * READ CDXA commands (0xD8, 0xDB) 25236 */ 25237 25238 /* 25239 * Function: sr_read_mode1() 25240 * 25241 * Description: This routine is the driver entry point for handling CD-ROM 25242 * ioctl read mode1 requests (CDROMREADMODE1). 25243 * 25244 * Arguments: dev - the device 'dev_t' 25245 * data - pointer to user provided cd read structure specifying 25246 * the lba buffer address and length. 25247 * flag - this argument is a pass through to ddi_copyxxx() 25248 * directly from the mode argument of ioctl(). 25249 * 25250 * Return Code: the code returned by sd_send_scsi_cmd() 25251 * EFAULT if ddi_copyxxx() fails 25252 * ENXIO if fail ddi_get_soft_state 25253 * EINVAL if data pointer is NULL 25254 */ 25255 25256 static int 25257 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25258 { 25259 struct sd_lun *un; 25260 struct cdrom_read mode1_struct; 25261 struct cdrom_read *mode1 = &mode1_struct; 25262 int rval; 25263 #ifdef _MULTI_DATAMODEL 25264 /* To support ILP32 applications in an LP64 world */ 25265 struct cdrom_read32 cdrom_read32; 25266 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25267 #endif /* _MULTI_DATAMODEL */ 25268 25269 if (data == NULL) { 25270 return (EINVAL); 25271 } 25272 25273 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25274 (un->un_state == SD_STATE_OFFLINE)) { 25275 return (ENXIO); 25276 } 25277 25278 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25279 "sd_read_mode1: entry: un:0x%p\n", un); 25280 25281 #ifdef _MULTI_DATAMODEL 25282 switch (ddi_model_convert_from(flag & FMODELS)) { 25283 case DDI_MODEL_ILP32: 25284 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25285 return (EFAULT); 25286 } 25287 /* Convert the ILP32 uscsi data from the application to LP64 */ 25288 cdrom_read32tocdrom_read(cdrd32, mode1); 25289 break; 25290 case DDI_MODEL_NONE: 25291 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25292 return (EFAULT); 25293 } 25294 } 25295 #else /* ! _MULTI_DATAMODEL */ 25296 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25297 return (EFAULT); 25298 } 25299 #endif /* _MULTI_DATAMODEL */ 25300 25301 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25302 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25303 25304 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25305 "sd_read_mode1: exit: un:0x%p\n", un); 25306 25307 return (rval); 25308 } 25309 25310 25311 /* 25312 * Function: sr_read_cd_mode2() 25313 * 25314 * Description: This routine is the driver entry point for handling CD-ROM 25315 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25316 * support the READ CD (0xBE) command or the 1st generation 25317 * READ CD (0xD4) command. 25318 * 25319 * Arguments: dev - the device 'dev_t' 25320 * data - pointer to user provided cd read structure specifying 25321 * the lba buffer address and length. 25322 * flag - this argument is a pass through to ddi_copyxxx() 25323 * directly from the mode argument of ioctl(). 25324 * 25325 * Return Code: the code returned by sd_send_scsi_cmd() 25326 * EFAULT if ddi_copyxxx() fails 25327 * ENXIO if fail ddi_get_soft_state 25328 * EINVAL if data pointer is NULL 25329 */ 25330 25331 static int 25332 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25333 { 25334 struct sd_lun *un; 25335 struct uscsi_cmd *com; 25336 struct cdrom_read mode2_struct; 25337 struct cdrom_read *mode2 = &mode2_struct; 25338 uchar_t cdb[CDB_GROUP5]; 25339 int nblocks; 25340 int rval; 25341 #ifdef _MULTI_DATAMODEL 25342 /* To support ILP32 applications in an LP64 world */ 25343 struct cdrom_read32 cdrom_read32; 25344 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25345 #endif /* _MULTI_DATAMODEL */ 25346 25347 if (data == NULL) { 25348 return (EINVAL); 25349 } 25350 25351 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25352 (un->un_state == SD_STATE_OFFLINE)) { 25353 return (ENXIO); 25354 } 25355 25356 #ifdef _MULTI_DATAMODEL 25357 switch (ddi_model_convert_from(flag & FMODELS)) { 25358 case DDI_MODEL_ILP32: 25359 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25360 return (EFAULT); 25361 } 25362 /* Convert the ILP32 uscsi data from the application to LP64 */ 25363 cdrom_read32tocdrom_read(cdrd32, mode2); 25364 break; 25365 case DDI_MODEL_NONE: 25366 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25367 return (EFAULT); 25368 } 25369 break; 25370 } 25371 25372 #else /* ! _MULTI_DATAMODEL */ 25373 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25374 return (EFAULT); 25375 } 25376 #endif /* _MULTI_DATAMODEL */ 25377 25378 bzero(cdb, sizeof (cdb)); 25379 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25380 /* Read command supported by 1st generation atapi drives */ 25381 cdb[0] = SCMD_READ_CDD4; 25382 } else { 25383 /* Universal CD Access Command */ 25384 cdb[0] = SCMD_READ_CD; 25385 } 25386 25387 /* 25388 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25389 */ 25390 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25391 25392 /* set the start address */ 25393 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25394 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25395 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25396 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25397 25398 /* set the transfer length */ 25399 nblocks = mode2->cdread_buflen / 2336; 25400 cdb[6] = (uchar_t)(nblocks >> 16); 25401 cdb[7] = (uchar_t)(nblocks >> 8); 25402 cdb[8] = (uchar_t)nblocks; 25403 25404 /* set the filter bits */ 25405 cdb[9] = CDROM_READ_CD_USERDATA; 25406 25407 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25408 com->uscsi_cdb = (caddr_t)cdb; 25409 com->uscsi_cdblen = sizeof (cdb); 25410 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25411 com->uscsi_buflen = mode2->cdread_buflen; 25412 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25413 25414 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25415 SD_PATH_STANDARD); 25416 kmem_free(com, sizeof (*com)); 25417 return (rval); 25418 } 25419 25420 25421 /* 25422 * Function: sr_read_mode2() 25423 * 25424 * Description: This routine is the driver entry point for handling CD-ROM 25425 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25426 * do not support the READ CD (0xBE) command. 25427 * 25428 * Arguments: dev - the device 'dev_t' 25429 * data - pointer to user provided cd read structure specifying 25430 * the lba buffer address and length. 25431 * flag - this argument is a pass through to ddi_copyxxx() 25432 * directly from the mode argument of ioctl(). 25433 * 25434 * Return Code: the code returned by sd_send_scsi_cmd() 25435 * EFAULT if ddi_copyxxx() fails 25436 * ENXIO if fail ddi_get_soft_state 25437 * EINVAL if data pointer is NULL 25438 * EIO if fail to reset block size 25439 * EAGAIN if commands are in progress in the driver 25440 */ 25441 25442 static int 25443 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25444 { 25445 struct sd_lun *un; 25446 struct cdrom_read mode2_struct; 25447 struct cdrom_read *mode2 = &mode2_struct; 25448 int rval; 25449 uint32_t restore_blksize; 25450 struct uscsi_cmd *com; 25451 uchar_t cdb[CDB_GROUP0]; 25452 int nblocks; 25453 25454 #ifdef _MULTI_DATAMODEL 25455 /* To support ILP32 applications in an LP64 world */ 25456 struct cdrom_read32 cdrom_read32; 25457 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25458 #endif /* _MULTI_DATAMODEL */ 25459 25460 if (data == NULL) { 25461 return (EINVAL); 25462 } 25463 25464 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25465 (un->un_state == SD_STATE_OFFLINE)) { 25466 return (ENXIO); 25467 } 25468 25469 /* 25470 * Because this routine will update the device and driver block size 25471 * being used we want to make sure there are no commands in progress. 25472 * If commands are in progress the user will have to try again. 25473 * 25474 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25475 * in sdioctl to protect commands from sdioctl through to the top of 25476 * sd_uscsi_strategy. See sdioctl for details. 25477 */ 25478 mutex_enter(SD_MUTEX(un)); 25479 if (un->un_ncmds_in_driver != 1) { 25480 mutex_exit(SD_MUTEX(un)); 25481 return (EAGAIN); 25482 } 25483 mutex_exit(SD_MUTEX(un)); 25484 25485 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25486 "sd_read_mode2: entry: un:0x%p\n", un); 25487 25488 #ifdef _MULTI_DATAMODEL 25489 switch (ddi_model_convert_from(flag & FMODELS)) { 25490 case DDI_MODEL_ILP32: 25491 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25492 return (EFAULT); 25493 } 25494 /* Convert the ILP32 uscsi data from the application to LP64 */ 25495 cdrom_read32tocdrom_read(cdrd32, mode2); 25496 break; 25497 case DDI_MODEL_NONE: 25498 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25499 return (EFAULT); 25500 } 25501 break; 25502 } 25503 #else /* ! _MULTI_DATAMODEL */ 25504 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25505 return (EFAULT); 25506 } 25507 #endif /* _MULTI_DATAMODEL */ 25508 25509 /* Store the current target block size for restoration later */ 25510 restore_blksize = un->un_tgt_blocksize; 25511 25512 /* Change the device and soft state target block size to 2336 */ 25513 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25514 rval = EIO; 25515 goto done; 25516 } 25517 25518 25519 bzero(cdb, sizeof (cdb)); 25520 25521 /* set READ operation */ 25522 cdb[0] = SCMD_READ; 25523 25524 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25525 mode2->cdread_lba >>= 2; 25526 25527 /* set the start address */ 25528 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25529 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25530 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25531 25532 /* set the transfer length */ 25533 nblocks = mode2->cdread_buflen / 2336; 25534 cdb[4] = (uchar_t)nblocks & 0xFF; 25535 25536 /* build command */ 25537 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25538 com->uscsi_cdb = (caddr_t)cdb; 25539 com->uscsi_cdblen = sizeof (cdb); 25540 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25541 com->uscsi_buflen = mode2->cdread_buflen; 25542 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25543 25544 /* 25545 * Issue SCSI command with user space address for read buffer. 25546 * 25547 * This sends the command through main channel in the driver. 25548 * 25549 * Since this is accessed via an IOCTL call, we go through the 25550 * standard path, so that if the device was powered down, then 25551 * it would be 'awakened' to handle the command. 25552 */ 25553 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25554 SD_PATH_STANDARD); 25555 25556 kmem_free(com, sizeof (*com)); 25557 25558 /* Restore the device and soft state target block size */ 25559 if (sr_sector_mode(dev, restore_blksize) != 0) { 25560 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25561 "can't do switch back to mode 1\n"); 25562 /* 25563 * If sd_send_scsi_READ succeeded we still need to report 25564 * an error because we failed to reset the block size 25565 */ 25566 if (rval == 0) { 25567 rval = EIO; 25568 } 25569 } 25570 25571 done: 25572 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25573 "sd_read_mode2: exit: un:0x%p\n", un); 25574 25575 return (rval); 25576 } 25577 25578 25579 /* 25580 * Function: sr_sector_mode() 25581 * 25582 * Description: This utility function is used by sr_read_mode2 to set the target 25583 * block size based on the user specified size. This is a legacy 25584 * implementation based upon a vendor specific mode page 25585 * 25586 * Arguments: dev - the device 'dev_t' 25587 * data - flag indicating if block size is being set to 2336 or 25588 * 512. 25589 * 25590 * Return Code: the code returned by sd_send_scsi_cmd() 25591 * EFAULT if ddi_copyxxx() fails 25592 * ENXIO if fail ddi_get_soft_state 25593 * EINVAL if data pointer is NULL 25594 */ 25595 25596 static int 25597 sr_sector_mode(dev_t dev, uint32_t blksize) 25598 { 25599 struct sd_lun *un; 25600 uchar_t *sense; 25601 uchar_t *select; 25602 int rval; 25603 25604 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25605 (un->un_state == SD_STATE_OFFLINE)) { 25606 return (ENXIO); 25607 } 25608 25609 sense = kmem_zalloc(20, KM_SLEEP); 25610 25611 /* Note: This is a vendor specific mode page (0x81) */ 25612 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25613 SD_PATH_STANDARD)) != 0) { 25614 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25615 "sr_sector_mode: Mode Sense failed\n"); 25616 kmem_free(sense, 20); 25617 return (rval); 25618 } 25619 select = kmem_zalloc(20, KM_SLEEP); 25620 select[3] = 0x08; 25621 select[10] = ((blksize >> 8) & 0xff); 25622 select[11] = (blksize & 0xff); 25623 select[12] = 0x01; 25624 select[13] = 0x06; 25625 select[14] = sense[14]; 25626 select[15] = sense[15]; 25627 if (blksize == SD_MODE2_BLKSIZE) { 25628 select[14] |= 0x01; 25629 } 25630 25631 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25632 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25633 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25634 "sr_sector_mode: Mode Select failed\n"); 25635 } else { 25636 /* 25637 * Only update the softstate block size if we successfully 25638 * changed the device block mode. 25639 */ 25640 mutex_enter(SD_MUTEX(un)); 25641 sd_update_block_info(un, blksize, 0); 25642 mutex_exit(SD_MUTEX(un)); 25643 } 25644 kmem_free(sense, 20); 25645 kmem_free(select, 20); 25646 return (rval); 25647 } 25648 25649 25650 /* 25651 * Function: sr_read_cdda() 25652 * 25653 * Description: This routine is the driver entry point for handling CD-ROM 25654 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25655 * the target supports CDDA these requests are handled via a vendor 25656 * specific command (0xD8) If the target does not support CDDA 25657 * these requests are handled via the READ CD command (0xBE). 25658 * 25659 * Arguments: dev - the device 'dev_t' 25660 * data - pointer to user provided CD-DA structure specifying 25661 * the track starting address, transfer length, and 25662 * subcode options. 25663 * flag - this argument is a pass through to ddi_copyxxx() 25664 * directly from the mode argument of ioctl(). 25665 * 25666 * Return Code: the code returned by sd_send_scsi_cmd() 25667 * EFAULT if ddi_copyxxx() fails 25668 * ENXIO if fail ddi_get_soft_state 25669 * EINVAL if invalid arguments are provided 25670 * ENOTTY 25671 */ 25672 25673 static int 25674 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25675 { 25676 struct sd_lun *un; 25677 struct uscsi_cmd *com; 25678 struct cdrom_cdda *cdda; 25679 int rval; 25680 size_t buflen; 25681 char cdb[CDB_GROUP5]; 25682 25683 #ifdef _MULTI_DATAMODEL 25684 /* To support ILP32 applications in an LP64 world */ 25685 struct cdrom_cdda32 cdrom_cdda32; 25686 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25687 #endif /* _MULTI_DATAMODEL */ 25688 25689 if (data == NULL) { 25690 return (EINVAL); 25691 } 25692 25693 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25694 return (ENXIO); 25695 } 25696 25697 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25698 25699 #ifdef _MULTI_DATAMODEL 25700 switch (ddi_model_convert_from(flag & FMODELS)) { 25701 case DDI_MODEL_ILP32: 25702 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25703 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25704 "sr_read_cdda: ddi_copyin Failed\n"); 25705 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25706 return (EFAULT); 25707 } 25708 /* Convert the ILP32 uscsi data from the application to LP64 */ 25709 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25710 break; 25711 case DDI_MODEL_NONE: 25712 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25713 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25714 "sr_read_cdda: ddi_copyin Failed\n"); 25715 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25716 return (EFAULT); 25717 } 25718 break; 25719 } 25720 #else /* ! _MULTI_DATAMODEL */ 25721 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25722 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25723 "sr_read_cdda: ddi_copyin Failed\n"); 25724 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25725 return (EFAULT); 25726 } 25727 #endif /* _MULTI_DATAMODEL */ 25728 25729 /* 25730 * Since MMC-2 expects max 3 bytes for length, check if the 25731 * length input is greater than 3 bytes 25732 */ 25733 if ((cdda->cdda_length & 0xFF000000) != 0) { 25734 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25735 "cdrom transfer length too large: %d (limit %d)\n", 25736 cdda->cdda_length, 0xFFFFFF); 25737 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25738 return (EINVAL); 25739 } 25740 25741 switch (cdda->cdda_subcode) { 25742 case CDROM_DA_NO_SUBCODE: 25743 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25744 break; 25745 case CDROM_DA_SUBQ: 25746 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25747 break; 25748 case CDROM_DA_ALL_SUBCODE: 25749 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25750 break; 25751 case CDROM_DA_SUBCODE_ONLY: 25752 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25753 break; 25754 default: 25755 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25756 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25757 cdda->cdda_subcode); 25758 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25759 return (EINVAL); 25760 } 25761 25762 /* Build and send the command */ 25763 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25764 bzero(cdb, CDB_GROUP5); 25765 25766 if (un->un_f_cfg_cdda == TRUE) { 25767 cdb[0] = (char)SCMD_READ_CD; 25768 cdb[1] = 0x04; 25769 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25770 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25771 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25772 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25773 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25774 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25775 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25776 cdb[9] = 0x10; 25777 switch (cdda->cdda_subcode) { 25778 case CDROM_DA_NO_SUBCODE : 25779 cdb[10] = 0x0; 25780 break; 25781 case CDROM_DA_SUBQ : 25782 cdb[10] = 0x2; 25783 break; 25784 case CDROM_DA_ALL_SUBCODE : 25785 cdb[10] = 0x1; 25786 break; 25787 case CDROM_DA_SUBCODE_ONLY : 25788 /* FALLTHROUGH */ 25789 default : 25790 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25791 kmem_free(com, sizeof (*com)); 25792 return (ENOTTY); 25793 } 25794 } else { 25795 cdb[0] = (char)SCMD_READ_CDDA; 25796 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25797 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25798 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25799 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25800 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25801 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25802 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25803 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25804 cdb[10] = cdda->cdda_subcode; 25805 } 25806 25807 com->uscsi_cdb = cdb; 25808 com->uscsi_cdblen = CDB_GROUP5; 25809 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25810 com->uscsi_buflen = buflen; 25811 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25812 25813 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25814 SD_PATH_STANDARD); 25815 25816 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25817 kmem_free(com, sizeof (*com)); 25818 return (rval); 25819 } 25820 25821 25822 /* 25823 * Function: sr_read_cdxa() 25824 * 25825 * Description: This routine is the driver entry point for handling CD-ROM 25826 * ioctl requests to return CD-XA (Extended Architecture) data. 25827 * (CDROMCDXA). 25828 * 25829 * Arguments: dev - the device 'dev_t' 25830 * data - pointer to user provided CD-XA structure specifying 25831 * the data starting address, transfer length, and format 25832 * flag - this argument is a pass through to ddi_copyxxx() 25833 * directly from the mode argument of ioctl(). 25834 * 25835 * Return Code: the code returned by sd_send_scsi_cmd() 25836 * EFAULT if ddi_copyxxx() fails 25837 * ENXIO if fail ddi_get_soft_state 25838 * EINVAL if data pointer is NULL 25839 */ 25840 25841 static int 25842 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25843 { 25844 struct sd_lun *un; 25845 struct uscsi_cmd *com; 25846 struct cdrom_cdxa *cdxa; 25847 int rval; 25848 size_t buflen; 25849 char cdb[CDB_GROUP5]; 25850 uchar_t read_flags; 25851 25852 #ifdef _MULTI_DATAMODEL 25853 /* To support ILP32 applications in an LP64 world */ 25854 struct cdrom_cdxa32 cdrom_cdxa32; 25855 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25856 #endif /* _MULTI_DATAMODEL */ 25857 25858 if (data == NULL) { 25859 return (EINVAL); 25860 } 25861 25862 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25863 return (ENXIO); 25864 } 25865 25866 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25867 25868 #ifdef _MULTI_DATAMODEL 25869 switch (ddi_model_convert_from(flag & FMODELS)) { 25870 case DDI_MODEL_ILP32: 25871 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25872 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25873 return (EFAULT); 25874 } 25875 /* 25876 * Convert the ILP32 uscsi data from the 25877 * application to LP64 for internal use. 25878 */ 25879 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25880 break; 25881 case DDI_MODEL_NONE: 25882 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25883 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25884 return (EFAULT); 25885 } 25886 break; 25887 } 25888 #else /* ! _MULTI_DATAMODEL */ 25889 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25890 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25891 return (EFAULT); 25892 } 25893 #endif /* _MULTI_DATAMODEL */ 25894 25895 /* 25896 * Since MMC-2 expects max 3 bytes for length, check if the 25897 * length input is greater than 3 bytes 25898 */ 25899 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25900 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25901 "cdrom transfer length too large: %d (limit %d)\n", 25902 cdxa->cdxa_length, 0xFFFFFF); 25903 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25904 return (EINVAL); 25905 } 25906 25907 switch (cdxa->cdxa_format) { 25908 case CDROM_XA_DATA: 25909 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25910 read_flags = 0x10; 25911 break; 25912 case CDROM_XA_SECTOR_DATA: 25913 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25914 read_flags = 0xf8; 25915 break; 25916 case CDROM_XA_DATA_W_ERROR: 25917 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25918 read_flags = 0xfc; 25919 break; 25920 default: 25921 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25922 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25923 cdxa->cdxa_format); 25924 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25925 return (EINVAL); 25926 } 25927 25928 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25929 bzero(cdb, CDB_GROUP5); 25930 if (un->un_f_mmc_cap == TRUE) { 25931 cdb[0] = (char)SCMD_READ_CD; 25932 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25933 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25934 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25935 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25936 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25937 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25938 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25939 cdb[9] = (char)read_flags; 25940 } else { 25941 /* 25942 * Note: A vendor specific command (0xDB) is being used her to 25943 * request a read of all subcodes. 25944 */ 25945 cdb[0] = (char)SCMD_READ_CDXA; 25946 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25947 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25948 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25949 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25950 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25951 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25952 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25953 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25954 cdb[10] = cdxa->cdxa_format; 25955 } 25956 com->uscsi_cdb = cdb; 25957 com->uscsi_cdblen = CDB_GROUP5; 25958 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25959 com->uscsi_buflen = buflen; 25960 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25961 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25962 SD_PATH_STANDARD); 25963 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25964 kmem_free(com, sizeof (*com)); 25965 return (rval); 25966 } 25967 25968 25969 /* 25970 * Function: sr_eject() 25971 * 25972 * Description: This routine is the driver entry point for handling CD-ROM 25973 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25974 * 25975 * Arguments: dev - the device 'dev_t' 25976 * 25977 * Return Code: the code returned by sd_send_scsi_cmd() 25978 */ 25979 25980 static int 25981 sr_eject(dev_t dev) 25982 { 25983 struct sd_lun *un; 25984 int rval; 25985 25986 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25987 (un->un_state == SD_STATE_OFFLINE)) { 25988 return (ENXIO); 25989 } 25990 25991 /* 25992 * To prevent race conditions with the eject 25993 * command, keep track of an eject command as 25994 * it progresses. If we are already handling 25995 * an eject command in the driver for the given 25996 * unit and another request to eject is received 25997 * immediately return EAGAIN so we don't lose 25998 * the command if the current eject command fails. 25999 */ 26000 mutex_enter(SD_MUTEX(un)); 26001 if (un->un_f_ejecting == TRUE) { 26002 mutex_exit(SD_MUTEX(un)); 26003 return (EAGAIN); 26004 } 26005 un->un_f_ejecting = TRUE; 26006 mutex_exit(SD_MUTEX(un)); 26007 26008 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26009 SD_PATH_STANDARD)) != 0) { 26010 mutex_enter(SD_MUTEX(un)); 26011 un->un_f_ejecting = FALSE; 26012 mutex_exit(SD_MUTEX(un)); 26013 return (rval); 26014 } 26015 26016 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26017 SD_PATH_STANDARD); 26018 26019 if (rval == 0) { 26020 mutex_enter(SD_MUTEX(un)); 26021 sr_ejected(un); 26022 un->un_mediastate = DKIO_EJECTED; 26023 un->un_f_ejecting = FALSE; 26024 cv_broadcast(&un->un_state_cv); 26025 mutex_exit(SD_MUTEX(un)); 26026 } else { 26027 mutex_enter(SD_MUTEX(un)); 26028 un->un_f_ejecting = FALSE; 26029 mutex_exit(SD_MUTEX(un)); 26030 } 26031 return (rval); 26032 } 26033 26034 26035 /* 26036 * Function: sr_ejected() 26037 * 26038 * Description: This routine updates the soft state structure to invalidate the 26039 * geometry information after the media has been ejected or a 26040 * media eject has been detected. 26041 * 26042 * Arguments: un - driver soft state (unit) structure 26043 */ 26044 26045 static void 26046 sr_ejected(struct sd_lun *un) 26047 { 26048 struct sd_errstats *stp; 26049 26050 ASSERT(un != NULL); 26051 ASSERT(mutex_owned(SD_MUTEX(un))); 26052 26053 un->un_f_blockcount_is_valid = FALSE; 26054 un->un_f_tgt_blocksize_is_valid = FALSE; 26055 mutex_exit(SD_MUTEX(un)); 26056 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26057 mutex_enter(SD_MUTEX(un)); 26058 26059 if (un->un_errstats != NULL) { 26060 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26061 stp->sd_capacity.value.ui64 = 0; 26062 } 26063 26064 /* remove "capacity-of-device" properties */ 26065 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26066 "device-nblocks"); 26067 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26068 "device-blksize"); 26069 } 26070 26071 26072 /* 26073 * Function: sr_check_wp() 26074 * 26075 * Description: This routine checks the write protection of a removable 26076 * media disk and hotpluggable devices via the write protect bit of 26077 * the Mode Page Header device specific field. Some devices choke 26078 * on unsupported mode page. In order to workaround this issue, 26079 * this routine has been implemented to use 0x3f mode page(request 26080 * for all pages) for all device types. 26081 * 26082 * Arguments: dev - the device 'dev_t' 26083 * 26084 * Return Code: int indicating if the device is write protected (1) or not (0) 26085 * 26086 * Context: Kernel thread. 26087 * 26088 */ 26089 26090 static int 26091 sr_check_wp(dev_t dev) 26092 { 26093 struct sd_lun *un; 26094 uchar_t device_specific; 26095 uchar_t *sense; 26096 int hdrlen; 26097 int rval = FALSE; 26098 26099 /* 26100 * Note: The return codes for this routine should be reworked to 26101 * properly handle the case of a NULL softstate. 26102 */ 26103 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26104 return (FALSE); 26105 } 26106 26107 if (un->un_f_cfg_is_atapi == TRUE) { 26108 /* 26109 * The mode page contents are not required; set the allocation 26110 * length for the mode page header only 26111 */ 26112 hdrlen = MODE_HEADER_LENGTH_GRP2; 26113 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26114 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26115 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26116 goto err_exit; 26117 device_specific = 26118 ((struct mode_header_grp2 *)sense)->device_specific; 26119 } else { 26120 hdrlen = MODE_HEADER_LENGTH; 26121 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26122 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26123 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26124 goto err_exit; 26125 device_specific = 26126 ((struct mode_header *)sense)->device_specific; 26127 } 26128 26129 /* 26130 * Write protect mode sense failed; not all disks 26131 * understand this query. Return FALSE assuming that 26132 * these devices are not writable. 26133 */ 26134 if (device_specific & WRITE_PROTECT) { 26135 rval = TRUE; 26136 } 26137 26138 err_exit: 26139 kmem_free(sense, hdrlen); 26140 return (rval); 26141 } 26142 26143 /* 26144 * Function: sr_volume_ctrl() 26145 * 26146 * Description: This routine is the driver entry point for handling CD-ROM 26147 * audio output volume ioctl requests. (CDROMVOLCTRL) 26148 * 26149 * Arguments: dev - the device 'dev_t' 26150 * data - pointer to user audio volume control structure 26151 * flag - this argument is a pass through to ddi_copyxxx() 26152 * directly from the mode argument of ioctl(). 26153 * 26154 * Return Code: the code returned by sd_send_scsi_cmd() 26155 * EFAULT if ddi_copyxxx() fails 26156 * ENXIO if fail ddi_get_soft_state 26157 * EINVAL if data pointer is NULL 26158 * 26159 */ 26160 26161 static int 26162 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26163 { 26164 struct sd_lun *un; 26165 struct cdrom_volctrl volume; 26166 struct cdrom_volctrl *vol = &volume; 26167 uchar_t *sense_page; 26168 uchar_t *select_page; 26169 uchar_t *sense; 26170 uchar_t *select; 26171 int sense_buflen; 26172 int select_buflen; 26173 int rval; 26174 26175 if (data == NULL) { 26176 return (EINVAL); 26177 } 26178 26179 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26180 (un->un_state == SD_STATE_OFFLINE)) { 26181 return (ENXIO); 26182 } 26183 26184 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26185 return (EFAULT); 26186 } 26187 26188 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26189 struct mode_header_grp2 *sense_mhp; 26190 struct mode_header_grp2 *select_mhp; 26191 int bd_len; 26192 26193 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26194 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26195 MODEPAGE_AUDIO_CTRL_LEN; 26196 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26197 select = kmem_zalloc(select_buflen, KM_SLEEP); 26198 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26199 sense_buflen, MODEPAGE_AUDIO_CTRL, 26200 SD_PATH_STANDARD)) != 0) { 26201 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26202 "sr_volume_ctrl: Mode Sense Failed\n"); 26203 kmem_free(sense, sense_buflen); 26204 kmem_free(select, select_buflen); 26205 return (rval); 26206 } 26207 sense_mhp = (struct mode_header_grp2 *)sense; 26208 select_mhp = (struct mode_header_grp2 *)select; 26209 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26210 sense_mhp->bdesc_length_lo; 26211 if (bd_len > MODE_BLK_DESC_LENGTH) { 26212 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26213 "sr_volume_ctrl: Mode Sense returned invalid " 26214 "block descriptor length\n"); 26215 kmem_free(sense, sense_buflen); 26216 kmem_free(select, select_buflen); 26217 return (EIO); 26218 } 26219 sense_page = (uchar_t *) 26220 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26221 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26222 select_mhp->length_msb = 0; 26223 select_mhp->length_lsb = 0; 26224 select_mhp->bdesc_length_hi = 0; 26225 select_mhp->bdesc_length_lo = 0; 26226 } else { 26227 struct mode_header *sense_mhp, *select_mhp; 26228 26229 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26230 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26231 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26232 select = kmem_zalloc(select_buflen, KM_SLEEP); 26233 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26234 sense_buflen, MODEPAGE_AUDIO_CTRL, 26235 SD_PATH_STANDARD)) != 0) { 26236 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26237 "sr_volume_ctrl: Mode Sense Failed\n"); 26238 kmem_free(sense, sense_buflen); 26239 kmem_free(select, select_buflen); 26240 return (rval); 26241 } 26242 sense_mhp = (struct mode_header *)sense; 26243 select_mhp = (struct mode_header *)select; 26244 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26245 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26246 "sr_volume_ctrl: Mode Sense returned invalid " 26247 "block descriptor length\n"); 26248 kmem_free(sense, sense_buflen); 26249 kmem_free(select, select_buflen); 26250 return (EIO); 26251 } 26252 sense_page = (uchar_t *) 26253 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26254 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26255 select_mhp->length = 0; 26256 select_mhp->bdesc_length = 0; 26257 } 26258 /* 26259 * Note: An audio control data structure could be created and overlayed 26260 * on the following in place of the array indexing method implemented. 26261 */ 26262 26263 /* Build the select data for the user volume data */ 26264 select_page[0] = MODEPAGE_AUDIO_CTRL; 26265 select_page[1] = 0xE; 26266 /* Set the immediate bit */ 26267 select_page[2] = 0x04; 26268 /* Zero out reserved fields */ 26269 select_page[3] = 0x00; 26270 select_page[4] = 0x00; 26271 /* Return sense data for fields not to be modified */ 26272 select_page[5] = sense_page[5]; 26273 select_page[6] = sense_page[6]; 26274 select_page[7] = sense_page[7]; 26275 /* Set the user specified volume levels for channel 0 and 1 */ 26276 select_page[8] = 0x01; 26277 select_page[9] = vol->channel0; 26278 select_page[10] = 0x02; 26279 select_page[11] = vol->channel1; 26280 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26281 select_page[12] = sense_page[12]; 26282 select_page[13] = sense_page[13]; 26283 select_page[14] = sense_page[14]; 26284 select_page[15] = sense_page[15]; 26285 26286 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26287 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26288 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26289 } else { 26290 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26291 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26292 } 26293 26294 kmem_free(sense, sense_buflen); 26295 kmem_free(select, select_buflen); 26296 return (rval); 26297 } 26298 26299 26300 /* 26301 * Function: sr_read_sony_session_offset() 26302 * 26303 * Description: This routine is the driver entry point for handling CD-ROM 26304 * ioctl requests for session offset information. (CDROMREADOFFSET) 26305 * The address of the first track in the last session of a 26306 * multi-session CD-ROM is returned 26307 * 26308 * Note: This routine uses a vendor specific key value in the 26309 * command control field without implementing any vendor check here 26310 * or in the ioctl routine. 26311 * 26312 * Arguments: dev - the device 'dev_t' 26313 * data - pointer to an int to hold the requested address 26314 * flag - this argument is a pass through to ddi_copyxxx() 26315 * directly from the mode argument of ioctl(). 26316 * 26317 * Return Code: the code returned by sd_send_scsi_cmd() 26318 * EFAULT if ddi_copyxxx() fails 26319 * ENXIO if fail ddi_get_soft_state 26320 * EINVAL if data pointer is NULL 26321 */ 26322 26323 static int 26324 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26325 { 26326 struct sd_lun *un; 26327 struct uscsi_cmd *com; 26328 caddr_t buffer; 26329 char cdb[CDB_GROUP1]; 26330 int session_offset = 0; 26331 int rval; 26332 26333 if (data == NULL) { 26334 return (EINVAL); 26335 } 26336 26337 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26338 (un->un_state == SD_STATE_OFFLINE)) { 26339 return (ENXIO); 26340 } 26341 26342 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26343 bzero(cdb, CDB_GROUP1); 26344 cdb[0] = SCMD_READ_TOC; 26345 /* 26346 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26347 * (4 byte TOC response header + 8 byte response data) 26348 */ 26349 cdb[8] = SONY_SESSION_OFFSET_LEN; 26350 /* Byte 9 is the control byte. A vendor specific value is used */ 26351 cdb[9] = SONY_SESSION_OFFSET_KEY; 26352 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26353 com->uscsi_cdb = cdb; 26354 com->uscsi_cdblen = CDB_GROUP1; 26355 com->uscsi_bufaddr = buffer; 26356 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26357 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26358 26359 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26360 SD_PATH_STANDARD); 26361 if (rval != 0) { 26362 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26363 kmem_free(com, sizeof (*com)); 26364 return (rval); 26365 } 26366 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26367 session_offset = 26368 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26369 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26370 /* 26371 * Offset returned offset in current lbasize block's. Convert to 26372 * 2k block's to return to the user 26373 */ 26374 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26375 session_offset >>= 2; 26376 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26377 session_offset >>= 1; 26378 } 26379 } 26380 26381 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26382 rval = EFAULT; 26383 } 26384 26385 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26386 kmem_free(com, sizeof (*com)); 26387 return (rval); 26388 } 26389 26390 26391 /* 26392 * Function: sd_wm_cache_constructor() 26393 * 26394 * Description: Cache Constructor for the wmap cache for the read/modify/write 26395 * devices. 26396 * 26397 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26398 * un - sd_lun structure for the device. 26399 * flag - the km flags passed to constructor 26400 * 26401 * Return Code: 0 on success. 26402 * -1 on failure. 26403 */ 26404 26405 /*ARGSUSED*/ 26406 static int 26407 sd_wm_cache_constructor(void *wm, void *un, int flags) 26408 { 26409 bzero(wm, sizeof (struct sd_w_map)); 26410 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26411 return (0); 26412 } 26413 26414 26415 /* 26416 * Function: sd_wm_cache_destructor() 26417 * 26418 * Description: Cache destructor for the wmap cache for the read/modify/write 26419 * devices. 26420 * 26421 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26422 * un - sd_lun structure for the device. 26423 */ 26424 /*ARGSUSED*/ 26425 static void 26426 sd_wm_cache_destructor(void *wm, void *un) 26427 { 26428 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26429 } 26430 26431 26432 /* 26433 * Function: sd_range_lock() 26434 * 26435 * Description: Lock the range of blocks specified as parameter to ensure 26436 * that read, modify write is atomic and no other i/o writes 26437 * to the same location. The range is specified in terms 26438 * of start and end blocks. Block numbers are the actual 26439 * media block numbers and not system. 26440 * 26441 * Arguments: un - sd_lun structure for the device. 26442 * startb - The starting block number 26443 * endb - The end block number 26444 * typ - type of i/o - simple/read_modify_write 26445 * 26446 * Return Code: wm - pointer to the wmap structure. 26447 * 26448 * Context: This routine can sleep. 26449 */ 26450 26451 static struct sd_w_map * 26452 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26453 { 26454 struct sd_w_map *wmp = NULL; 26455 struct sd_w_map *sl_wmp = NULL; 26456 struct sd_w_map *tmp_wmp; 26457 wm_state state = SD_WM_CHK_LIST; 26458 26459 26460 ASSERT(un != NULL); 26461 ASSERT(!mutex_owned(SD_MUTEX(un))); 26462 26463 mutex_enter(SD_MUTEX(un)); 26464 26465 while (state != SD_WM_DONE) { 26466 26467 switch (state) { 26468 case SD_WM_CHK_LIST: 26469 /* 26470 * This is the starting state. Check the wmap list 26471 * to see if the range is currently available. 26472 */ 26473 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26474 /* 26475 * If this is a simple write and no rmw 26476 * i/o is pending then try to lock the 26477 * range as the range should be available. 26478 */ 26479 state = SD_WM_LOCK_RANGE; 26480 } else { 26481 tmp_wmp = sd_get_range(un, startb, endb); 26482 if (tmp_wmp != NULL) { 26483 if ((wmp != NULL) && ONLIST(un, wmp)) { 26484 /* 26485 * Should not keep onlist wmps 26486 * while waiting this macro 26487 * will also do wmp = NULL; 26488 */ 26489 FREE_ONLIST_WMAP(un, wmp); 26490 } 26491 /* 26492 * sl_wmp is the wmap on which wait 26493 * is done, since the tmp_wmp points 26494 * to the inuse wmap, set sl_wmp to 26495 * tmp_wmp and change the state to sleep 26496 */ 26497 sl_wmp = tmp_wmp; 26498 state = SD_WM_WAIT_MAP; 26499 } else { 26500 state = SD_WM_LOCK_RANGE; 26501 } 26502 26503 } 26504 break; 26505 26506 case SD_WM_LOCK_RANGE: 26507 ASSERT(un->un_wm_cache); 26508 /* 26509 * The range need to be locked, try to get a wmap. 26510 * First attempt it with NO_SLEEP, want to avoid a sleep 26511 * if possible as we will have to release the sd mutex 26512 * if we have to sleep. 26513 */ 26514 if (wmp == NULL) 26515 wmp = kmem_cache_alloc(un->un_wm_cache, 26516 KM_NOSLEEP); 26517 if (wmp == NULL) { 26518 mutex_exit(SD_MUTEX(un)); 26519 _NOTE(DATA_READABLE_WITHOUT_LOCK 26520 (sd_lun::un_wm_cache)) 26521 wmp = kmem_cache_alloc(un->un_wm_cache, 26522 KM_SLEEP); 26523 mutex_enter(SD_MUTEX(un)); 26524 /* 26525 * we released the mutex so recheck and go to 26526 * check list state. 26527 */ 26528 state = SD_WM_CHK_LIST; 26529 } else { 26530 /* 26531 * We exit out of state machine since we 26532 * have the wmap. Do the housekeeping first. 26533 * place the wmap on the wmap list if it is not 26534 * on it already and then set the state to done. 26535 */ 26536 wmp->wm_start = startb; 26537 wmp->wm_end = endb; 26538 wmp->wm_flags = typ | SD_WM_BUSY; 26539 if (typ & SD_WTYPE_RMW) { 26540 un->un_rmw_count++; 26541 } 26542 /* 26543 * If not already on the list then link 26544 */ 26545 if (!ONLIST(un, wmp)) { 26546 wmp->wm_next = un->un_wm; 26547 wmp->wm_prev = NULL; 26548 if (wmp->wm_next) 26549 wmp->wm_next->wm_prev = wmp; 26550 un->un_wm = wmp; 26551 } 26552 state = SD_WM_DONE; 26553 } 26554 break; 26555 26556 case SD_WM_WAIT_MAP: 26557 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26558 /* 26559 * Wait is done on sl_wmp, which is set in the 26560 * check_list state. 26561 */ 26562 sl_wmp->wm_wanted_count++; 26563 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26564 sl_wmp->wm_wanted_count--; 26565 /* 26566 * We can reuse the memory from the completed sl_wmp 26567 * lock range for our new lock, but only if noone is 26568 * waiting for it. 26569 */ 26570 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26571 if (sl_wmp->wm_wanted_count == 0) { 26572 if (wmp != NULL) 26573 CHK_N_FREEWMP(un, wmp); 26574 wmp = sl_wmp; 26575 } 26576 sl_wmp = NULL; 26577 /* 26578 * After waking up, need to recheck for availability of 26579 * range. 26580 */ 26581 state = SD_WM_CHK_LIST; 26582 break; 26583 26584 default: 26585 panic("sd_range_lock: " 26586 "Unknown state %d in sd_range_lock", state); 26587 /*NOTREACHED*/ 26588 } /* switch(state) */ 26589 26590 } /* while(state != SD_WM_DONE) */ 26591 26592 mutex_exit(SD_MUTEX(un)); 26593 26594 ASSERT(wmp != NULL); 26595 26596 return (wmp); 26597 } 26598 26599 26600 /* 26601 * Function: sd_get_range() 26602 * 26603 * Description: Find if there any overlapping I/O to this one 26604 * Returns the write-map of 1st such I/O, NULL otherwise. 26605 * 26606 * Arguments: un - sd_lun structure for the device. 26607 * startb - The starting block number 26608 * endb - The end block number 26609 * 26610 * Return Code: wm - pointer to the wmap structure. 26611 */ 26612 26613 static struct sd_w_map * 26614 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26615 { 26616 struct sd_w_map *wmp; 26617 26618 ASSERT(un != NULL); 26619 26620 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26621 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26622 continue; 26623 } 26624 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26625 break; 26626 } 26627 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26628 break; 26629 } 26630 } 26631 26632 return (wmp); 26633 } 26634 26635 26636 /* 26637 * Function: sd_free_inlist_wmap() 26638 * 26639 * Description: Unlink and free a write map struct. 26640 * 26641 * Arguments: un - sd_lun structure for the device. 26642 * wmp - sd_w_map which needs to be unlinked. 26643 */ 26644 26645 static void 26646 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26647 { 26648 ASSERT(un != NULL); 26649 26650 if (un->un_wm == wmp) { 26651 un->un_wm = wmp->wm_next; 26652 } else { 26653 wmp->wm_prev->wm_next = wmp->wm_next; 26654 } 26655 26656 if (wmp->wm_next) { 26657 wmp->wm_next->wm_prev = wmp->wm_prev; 26658 } 26659 26660 wmp->wm_next = wmp->wm_prev = NULL; 26661 26662 kmem_cache_free(un->un_wm_cache, wmp); 26663 } 26664 26665 26666 /* 26667 * Function: sd_range_unlock() 26668 * 26669 * Description: Unlock the range locked by wm. 26670 * Free write map if nobody else is waiting on it. 26671 * 26672 * Arguments: un - sd_lun structure for the device. 26673 * wmp - sd_w_map which needs to be unlinked. 26674 */ 26675 26676 static void 26677 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26678 { 26679 ASSERT(un != NULL); 26680 ASSERT(wm != NULL); 26681 ASSERT(!mutex_owned(SD_MUTEX(un))); 26682 26683 mutex_enter(SD_MUTEX(un)); 26684 26685 if (wm->wm_flags & SD_WTYPE_RMW) { 26686 un->un_rmw_count--; 26687 } 26688 26689 if (wm->wm_wanted_count) { 26690 wm->wm_flags = 0; 26691 /* 26692 * Broadcast that the wmap is available now. 26693 */ 26694 cv_broadcast(&wm->wm_avail); 26695 } else { 26696 /* 26697 * If no one is waiting on the map, it should be free'ed. 26698 */ 26699 sd_free_inlist_wmap(un, wm); 26700 } 26701 26702 mutex_exit(SD_MUTEX(un)); 26703 } 26704 26705 26706 /* 26707 * Function: sd_read_modify_write_task 26708 * 26709 * Description: Called from a taskq thread to initiate the write phase of 26710 * a read-modify-write request. This is used for targets where 26711 * un->un_sys_blocksize != un->un_tgt_blocksize. 26712 * 26713 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26714 * 26715 * Context: Called under taskq thread context. 26716 */ 26717 26718 static void 26719 sd_read_modify_write_task(void *arg) 26720 { 26721 struct sd_mapblocksize_info *bsp; 26722 struct buf *bp; 26723 struct sd_xbuf *xp; 26724 struct sd_lun *un; 26725 26726 bp = arg; /* The bp is given in arg */ 26727 ASSERT(bp != NULL); 26728 26729 /* Get the pointer to the layer-private data struct */ 26730 xp = SD_GET_XBUF(bp); 26731 ASSERT(xp != NULL); 26732 bsp = xp->xb_private; 26733 ASSERT(bsp != NULL); 26734 26735 un = SD_GET_UN(bp); 26736 ASSERT(un != NULL); 26737 ASSERT(!mutex_owned(SD_MUTEX(un))); 26738 26739 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26740 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26741 26742 /* 26743 * This is the write phase of a read-modify-write request, called 26744 * under the context of a taskq thread in response to the completion 26745 * of the read portion of the rmw request completing under interrupt 26746 * context. The write request must be sent from here down the iostart 26747 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26748 * we use the layer index saved in the layer-private data area. 26749 */ 26750 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26751 26752 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26753 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26754 } 26755 26756 26757 /* 26758 * Function: sddump_do_read_of_rmw() 26759 * 26760 * Description: This routine will be called from sddump, If sddump is called 26761 * with an I/O which not aligned on device blocksize boundary 26762 * then the write has to be converted to read-modify-write. 26763 * Do the read part here in order to keep sddump simple. 26764 * Note - That the sd_mutex is held across the call to this 26765 * routine. 26766 * 26767 * Arguments: un - sd_lun 26768 * blkno - block number in terms of media block size. 26769 * nblk - number of blocks. 26770 * bpp - pointer to pointer to the buf structure. On return 26771 * from this function, *bpp points to the valid buffer 26772 * to which the write has to be done. 26773 * 26774 * Return Code: 0 for success or errno-type return code 26775 */ 26776 26777 static int 26778 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26779 struct buf **bpp) 26780 { 26781 int err; 26782 int i; 26783 int rval; 26784 struct buf *bp; 26785 struct scsi_pkt *pkt = NULL; 26786 uint32_t target_blocksize; 26787 26788 ASSERT(un != NULL); 26789 ASSERT(mutex_owned(SD_MUTEX(un))); 26790 26791 target_blocksize = un->un_tgt_blocksize; 26792 26793 mutex_exit(SD_MUTEX(un)); 26794 26795 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26796 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26797 if (bp == NULL) { 26798 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26799 "no resources for dumping; giving up"); 26800 err = ENOMEM; 26801 goto done; 26802 } 26803 26804 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26805 blkno, nblk); 26806 if (rval != 0) { 26807 scsi_free_consistent_buf(bp); 26808 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26809 "no resources for dumping; giving up"); 26810 err = ENOMEM; 26811 goto done; 26812 } 26813 26814 pkt->pkt_flags |= FLAG_NOINTR; 26815 26816 err = EIO; 26817 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26818 26819 /* 26820 * Scsi_poll returns 0 (success) if the command completes and 26821 * the status block is STATUS_GOOD. We should only check 26822 * errors if this condition is not true. Even then we should 26823 * send our own request sense packet only if we have a check 26824 * condition and auto request sense has not been performed by 26825 * the hba. 26826 */ 26827 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26828 26829 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26830 err = 0; 26831 break; 26832 } 26833 26834 /* 26835 * Check CMD_DEV_GONE 1st, give up if device is gone, 26836 * no need to read RQS data. 26837 */ 26838 if (pkt->pkt_reason == CMD_DEV_GONE) { 26839 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26840 "Error while dumping state with rmw..." 26841 "Device is gone\n"); 26842 break; 26843 } 26844 26845 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26846 SD_INFO(SD_LOG_DUMP, un, 26847 "sddump: read failed with CHECK, try # %d\n", i); 26848 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26849 (void) sd_send_polled_RQS(un); 26850 } 26851 26852 continue; 26853 } 26854 26855 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26856 int reset_retval = 0; 26857 26858 SD_INFO(SD_LOG_DUMP, un, 26859 "sddump: read failed with BUSY, try # %d\n", i); 26860 26861 if (un->un_f_lun_reset_enabled == TRUE) { 26862 reset_retval = scsi_reset(SD_ADDRESS(un), 26863 RESET_LUN); 26864 } 26865 if (reset_retval == 0) { 26866 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26867 } 26868 (void) sd_send_polled_RQS(un); 26869 26870 } else { 26871 SD_INFO(SD_LOG_DUMP, un, 26872 "sddump: read failed with 0x%x, try # %d\n", 26873 SD_GET_PKT_STATUS(pkt), i); 26874 mutex_enter(SD_MUTEX(un)); 26875 sd_reset_target(un, pkt); 26876 mutex_exit(SD_MUTEX(un)); 26877 } 26878 26879 /* 26880 * If we are not getting anywhere with lun/target resets, 26881 * let's reset the bus. 26882 */ 26883 if (i > SD_NDUMP_RETRIES/2) { 26884 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26885 (void) sd_send_polled_RQS(un); 26886 } 26887 26888 } 26889 scsi_destroy_pkt(pkt); 26890 26891 if (err != 0) { 26892 scsi_free_consistent_buf(bp); 26893 *bpp = NULL; 26894 } else { 26895 *bpp = bp; 26896 } 26897 26898 done: 26899 mutex_enter(SD_MUTEX(un)); 26900 return (err); 26901 } 26902 26903 26904 /* 26905 * Function: sd_failfast_flushq 26906 * 26907 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26908 * in b_flags and move them onto the failfast queue, then kick 26909 * off a thread to return all bp's on the failfast queue to 26910 * their owners with an error set. 26911 * 26912 * Arguments: un - pointer to the soft state struct for the instance. 26913 * 26914 * Context: may execute in interrupt context. 26915 */ 26916 26917 static void 26918 sd_failfast_flushq(struct sd_lun *un) 26919 { 26920 struct buf *bp; 26921 struct buf *next_waitq_bp; 26922 struct buf *prev_waitq_bp = NULL; 26923 26924 ASSERT(un != NULL); 26925 ASSERT(mutex_owned(SD_MUTEX(un))); 26926 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26927 ASSERT(un->un_failfast_bp == NULL); 26928 26929 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26930 "sd_failfast_flushq: entry: un:0x%p\n", un); 26931 26932 /* 26933 * Check if we should flush all bufs when entering failfast state, or 26934 * just those with B_FAILFAST set. 26935 */ 26936 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26937 /* 26938 * Move *all* bp's on the wait queue to the failfast flush 26939 * queue, including those that do NOT have B_FAILFAST set. 26940 */ 26941 if (un->un_failfast_headp == NULL) { 26942 ASSERT(un->un_failfast_tailp == NULL); 26943 un->un_failfast_headp = un->un_waitq_headp; 26944 } else { 26945 ASSERT(un->un_failfast_tailp != NULL); 26946 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26947 } 26948 26949 un->un_failfast_tailp = un->un_waitq_tailp; 26950 26951 /* update kstat for each bp moved out of the waitq */ 26952 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26953 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26954 } 26955 26956 /* empty the waitq */ 26957 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26958 26959 } else { 26960 /* 26961 * Go thru the wait queue, pick off all entries with 26962 * B_FAILFAST set, and move these onto the failfast queue. 26963 */ 26964 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26965 /* 26966 * Save the pointer to the next bp on the wait queue, 26967 * so we get to it on the next iteration of this loop. 26968 */ 26969 next_waitq_bp = bp->av_forw; 26970 26971 /* 26972 * If this bp from the wait queue does NOT have 26973 * B_FAILFAST set, just move on to the next element 26974 * in the wait queue. Note, this is the only place 26975 * where it is correct to set prev_waitq_bp. 26976 */ 26977 if ((bp->b_flags & B_FAILFAST) == 0) { 26978 prev_waitq_bp = bp; 26979 continue; 26980 } 26981 26982 /* 26983 * Remove the bp from the wait queue. 26984 */ 26985 if (bp == un->un_waitq_headp) { 26986 /* The bp is the first element of the waitq. */ 26987 un->un_waitq_headp = next_waitq_bp; 26988 if (un->un_waitq_headp == NULL) { 26989 /* The wait queue is now empty */ 26990 un->un_waitq_tailp = NULL; 26991 } 26992 } else { 26993 /* 26994 * The bp is either somewhere in the middle 26995 * or at the end of the wait queue. 26996 */ 26997 ASSERT(un->un_waitq_headp != NULL); 26998 ASSERT(prev_waitq_bp != NULL); 26999 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 27000 == 0); 27001 if (bp == un->un_waitq_tailp) { 27002 /* bp is the last entry on the waitq. */ 27003 ASSERT(next_waitq_bp == NULL); 27004 un->un_waitq_tailp = prev_waitq_bp; 27005 } 27006 prev_waitq_bp->av_forw = next_waitq_bp; 27007 } 27008 bp->av_forw = NULL; 27009 27010 /* 27011 * update kstat since the bp is moved out of 27012 * the waitq 27013 */ 27014 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27015 27016 /* 27017 * Now put the bp onto the failfast queue. 27018 */ 27019 if (un->un_failfast_headp == NULL) { 27020 /* failfast queue is currently empty */ 27021 ASSERT(un->un_failfast_tailp == NULL); 27022 un->un_failfast_headp = 27023 un->un_failfast_tailp = bp; 27024 } else { 27025 /* Add the bp to the end of the failfast q */ 27026 ASSERT(un->un_failfast_tailp != NULL); 27027 ASSERT(un->un_failfast_tailp->b_flags & 27028 B_FAILFAST); 27029 un->un_failfast_tailp->av_forw = bp; 27030 un->un_failfast_tailp = bp; 27031 } 27032 } 27033 } 27034 27035 /* 27036 * Now return all bp's on the failfast queue to their owners. 27037 */ 27038 while ((bp = un->un_failfast_headp) != NULL) { 27039 27040 un->un_failfast_headp = bp->av_forw; 27041 if (un->un_failfast_headp == NULL) { 27042 un->un_failfast_tailp = NULL; 27043 } 27044 27045 /* 27046 * We want to return the bp with a failure error code, but 27047 * we do not want a call to sd_start_cmds() to occur here, 27048 * so use sd_return_failed_command_no_restart() instead of 27049 * sd_return_failed_command(). 27050 */ 27051 sd_return_failed_command_no_restart(un, bp, EIO); 27052 } 27053 27054 /* Flush the xbuf queues if required. */ 27055 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27056 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27057 } 27058 27059 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27060 "sd_failfast_flushq: exit: un:0x%p\n", un); 27061 } 27062 27063 27064 /* 27065 * Function: sd_failfast_flushq_callback 27066 * 27067 * Description: Return TRUE if the given bp meets the criteria for failfast 27068 * flushing. Used with ddi_xbuf_flushq(9F). 27069 * 27070 * Arguments: bp - ptr to buf struct to be examined. 27071 * 27072 * Context: Any 27073 */ 27074 27075 static int 27076 sd_failfast_flushq_callback(struct buf *bp) 27077 { 27078 /* 27079 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27080 * state is entered; OR (2) the given bp has B_FAILFAST set. 27081 */ 27082 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27083 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27084 } 27085 27086 27087 27088 /* 27089 * Function: sd_setup_next_xfer 27090 * 27091 * Description: Prepare next I/O operation using DMA_PARTIAL 27092 * 27093 */ 27094 27095 static int 27096 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27097 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27098 { 27099 ssize_t num_blks_not_xfered; 27100 daddr_t strt_blk_num; 27101 ssize_t bytes_not_xfered; 27102 int rval; 27103 27104 ASSERT(pkt->pkt_resid == 0); 27105 27106 /* 27107 * Calculate next block number and amount to be transferred. 27108 * 27109 * How much data NOT transfered to the HBA yet. 27110 */ 27111 bytes_not_xfered = xp->xb_dma_resid; 27112 27113 /* 27114 * figure how many blocks NOT transfered to the HBA yet. 27115 */ 27116 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27117 27118 /* 27119 * set starting block number to the end of what WAS transfered. 27120 */ 27121 strt_blk_num = xp->xb_blkno + 27122 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27123 27124 /* 27125 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27126 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27127 * the disk mutex here. 27128 */ 27129 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27130 strt_blk_num, num_blks_not_xfered); 27131 27132 if (rval == 0) { 27133 27134 /* 27135 * Success. 27136 * 27137 * Adjust things if there are still more blocks to be 27138 * transfered. 27139 */ 27140 xp->xb_dma_resid = pkt->pkt_resid; 27141 pkt->pkt_resid = 0; 27142 27143 return (1); 27144 } 27145 27146 /* 27147 * There's really only one possible return value from 27148 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27149 * returns NULL. 27150 */ 27151 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27152 27153 bp->b_resid = bp->b_bcount; 27154 bp->b_flags |= B_ERROR; 27155 27156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27157 "Error setting up next portion of DMA transfer\n"); 27158 27159 return (0); 27160 } 27161 27162 /* 27163 * Function: sd_panic_for_res_conflict 27164 * 27165 * Description: Call panic with a string formatted with "Reservation Conflict" 27166 * and a human readable identifier indicating the SD instance 27167 * that experienced the reservation conflict. 27168 * 27169 * Arguments: un - pointer to the soft state struct for the instance. 27170 * 27171 * Context: may execute in interrupt context. 27172 */ 27173 27174 #define SD_RESV_CONFLICT_FMT_LEN 40 27175 void 27176 sd_panic_for_res_conflict(struct sd_lun *un) 27177 { 27178 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27179 char path_str[MAXPATHLEN]; 27180 27181 (void) snprintf(panic_str, sizeof (panic_str), 27182 "Reservation Conflict\nDisk: %s", 27183 ddi_pathname(SD_DEVINFO(un), path_str)); 27184 27185 panic(panic_str); 27186 } 27187 27188 /* 27189 * Note: The following sd_faultinjection_ioctl( ) routines implement 27190 * driver support for handling fault injection for error analysis 27191 * causing faults in multiple layers of the driver. 27192 * 27193 */ 27194 27195 #ifdef SD_FAULT_INJECTION 27196 static uint_t sd_fault_injection_on = 0; 27197 27198 /* 27199 * Function: sd_faultinjection_ioctl() 27200 * 27201 * Description: This routine is the driver entry point for handling 27202 * faultinjection ioctls to inject errors into the 27203 * layer model 27204 * 27205 * Arguments: cmd - the ioctl cmd received 27206 * arg - the arguments from user and returns 27207 */ 27208 27209 static void 27210 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27211 27212 uint_t i; 27213 uint_t rval; 27214 27215 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27216 27217 mutex_enter(SD_MUTEX(un)); 27218 27219 switch (cmd) { 27220 case SDIOCRUN: 27221 /* Allow pushed faults to be injected */ 27222 SD_INFO(SD_LOG_SDTEST, un, 27223 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27224 27225 sd_fault_injection_on = 1; 27226 27227 SD_INFO(SD_LOG_IOERR, un, 27228 "sd_faultinjection_ioctl: run finished\n"); 27229 break; 27230 27231 case SDIOCSTART: 27232 /* Start Injection Session */ 27233 SD_INFO(SD_LOG_SDTEST, un, 27234 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27235 27236 sd_fault_injection_on = 0; 27237 un->sd_injection_mask = 0xFFFFFFFF; 27238 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27239 un->sd_fi_fifo_pkt[i] = NULL; 27240 un->sd_fi_fifo_xb[i] = NULL; 27241 un->sd_fi_fifo_un[i] = NULL; 27242 un->sd_fi_fifo_arq[i] = NULL; 27243 } 27244 un->sd_fi_fifo_start = 0; 27245 un->sd_fi_fifo_end = 0; 27246 27247 mutex_enter(&(un->un_fi_mutex)); 27248 un->sd_fi_log[0] = '\0'; 27249 un->sd_fi_buf_len = 0; 27250 mutex_exit(&(un->un_fi_mutex)); 27251 27252 SD_INFO(SD_LOG_IOERR, un, 27253 "sd_faultinjection_ioctl: start finished\n"); 27254 break; 27255 27256 case SDIOCSTOP: 27257 /* Stop Injection Session */ 27258 SD_INFO(SD_LOG_SDTEST, un, 27259 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27260 sd_fault_injection_on = 0; 27261 un->sd_injection_mask = 0x0; 27262 27263 /* Empty stray or unuseds structs from fifo */ 27264 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27265 if (un->sd_fi_fifo_pkt[i] != NULL) { 27266 kmem_free(un->sd_fi_fifo_pkt[i], 27267 sizeof (struct sd_fi_pkt)); 27268 } 27269 if (un->sd_fi_fifo_xb[i] != NULL) { 27270 kmem_free(un->sd_fi_fifo_xb[i], 27271 sizeof (struct sd_fi_xb)); 27272 } 27273 if (un->sd_fi_fifo_un[i] != NULL) { 27274 kmem_free(un->sd_fi_fifo_un[i], 27275 sizeof (struct sd_fi_un)); 27276 } 27277 if (un->sd_fi_fifo_arq[i] != NULL) { 27278 kmem_free(un->sd_fi_fifo_arq[i], 27279 sizeof (struct sd_fi_arq)); 27280 } 27281 un->sd_fi_fifo_pkt[i] = NULL; 27282 un->sd_fi_fifo_un[i] = NULL; 27283 un->sd_fi_fifo_xb[i] = NULL; 27284 un->sd_fi_fifo_arq[i] = NULL; 27285 } 27286 un->sd_fi_fifo_start = 0; 27287 un->sd_fi_fifo_end = 0; 27288 27289 SD_INFO(SD_LOG_IOERR, un, 27290 "sd_faultinjection_ioctl: stop finished\n"); 27291 break; 27292 27293 case SDIOCINSERTPKT: 27294 /* Store a packet struct to be pushed onto fifo */ 27295 SD_INFO(SD_LOG_SDTEST, un, 27296 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27297 27298 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27299 27300 sd_fault_injection_on = 0; 27301 27302 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27303 if (un->sd_fi_fifo_pkt[i] != NULL) { 27304 kmem_free(un->sd_fi_fifo_pkt[i], 27305 sizeof (struct sd_fi_pkt)); 27306 } 27307 if (arg != NULL) { 27308 un->sd_fi_fifo_pkt[i] = 27309 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27310 if (un->sd_fi_fifo_pkt[i] == NULL) { 27311 /* Alloc failed don't store anything */ 27312 break; 27313 } 27314 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27315 sizeof (struct sd_fi_pkt), 0); 27316 if (rval == -1) { 27317 kmem_free(un->sd_fi_fifo_pkt[i], 27318 sizeof (struct sd_fi_pkt)); 27319 un->sd_fi_fifo_pkt[i] = NULL; 27320 } 27321 } else { 27322 SD_INFO(SD_LOG_IOERR, un, 27323 "sd_faultinjection_ioctl: pkt null\n"); 27324 } 27325 break; 27326 27327 case SDIOCINSERTXB: 27328 /* Store a xb struct to be pushed onto fifo */ 27329 SD_INFO(SD_LOG_SDTEST, un, 27330 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27331 27332 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27333 27334 sd_fault_injection_on = 0; 27335 27336 if (un->sd_fi_fifo_xb[i] != NULL) { 27337 kmem_free(un->sd_fi_fifo_xb[i], 27338 sizeof (struct sd_fi_xb)); 27339 un->sd_fi_fifo_xb[i] = NULL; 27340 } 27341 if (arg != NULL) { 27342 un->sd_fi_fifo_xb[i] = 27343 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27344 if (un->sd_fi_fifo_xb[i] == NULL) { 27345 /* Alloc failed don't store anything */ 27346 break; 27347 } 27348 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27349 sizeof (struct sd_fi_xb), 0); 27350 27351 if (rval == -1) { 27352 kmem_free(un->sd_fi_fifo_xb[i], 27353 sizeof (struct sd_fi_xb)); 27354 un->sd_fi_fifo_xb[i] = NULL; 27355 } 27356 } else { 27357 SD_INFO(SD_LOG_IOERR, un, 27358 "sd_faultinjection_ioctl: xb null\n"); 27359 } 27360 break; 27361 27362 case SDIOCINSERTUN: 27363 /* Store a un struct to be pushed onto fifo */ 27364 SD_INFO(SD_LOG_SDTEST, un, 27365 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27366 27367 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27368 27369 sd_fault_injection_on = 0; 27370 27371 if (un->sd_fi_fifo_un[i] != NULL) { 27372 kmem_free(un->sd_fi_fifo_un[i], 27373 sizeof (struct sd_fi_un)); 27374 un->sd_fi_fifo_un[i] = NULL; 27375 } 27376 if (arg != NULL) { 27377 un->sd_fi_fifo_un[i] = 27378 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27379 if (un->sd_fi_fifo_un[i] == NULL) { 27380 /* Alloc failed don't store anything */ 27381 break; 27382 } 27383 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27384 sizeof (struct sd_fi_un), 0); 27385 if (rval == -1) { 27386 kmem_free(un->sd_fi_fifo_un[i], 27387 sizeof (struct sd_fi_un)); 27388 un->sd_fi_fifo_un[i] = NULL; 27389 } 27390 27391 } else { 27392 SD_INFO(SD_LOG_IOERR, un, 27393 "sd_faultinjection_ioctl: un null\n"); 27394 } 27395 27396 break; 27397 27398 case SDIOCINSERTARQ: 27399 /* Store a arq struct to be pushed onto fifo */ 27400 SD_INFO(SD_LOG_SDTEST, un, 27401 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27402 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27403 27404 sd_fault_injection_on = 0; 27405 27406 if (un->sd_fi_fifo_arq[i] != NULL) { 27407 kmem_free(un->sd_fi_fifo_arq[i], 27408 sizeof (struct sd_fi_arq)); 27409 un->sd_fi_fifo_arq[i] = NULL; 27410 } 27411 if (arg != NULL) { 27412 un->sd_fi_fifo_arq[i] = 27413 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27414 if (un->sd_fi_fifo_arq[i] == NULL) { 27415 /* Alloc failed don't store anything */ 27416 break; 27417 } 27418 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27419 sizeof (struct sd_fi_arq), 0); 27420 if (rval == -1) { 27421 kmem_free(un->sd_fi_fifo_arq[i], 27422 sizeof (struct sd_fi_arq)); 27423 un->sd_fi_fifo_arq[i] = NULL; 27424 } 27425 27426 } else { 27427 SD_INFO(SD_LOG_IOERR, un, 27428 "sd_faultinjection_ioctl: arq null\n"); 27429 } 27430 27431 break; 27432 27433 case SDIOCPUSH: 27434 /* Push stored xb, pkt, un, and arq onto fifo */ 27435 sd_fault_injection_on = 0; 27436 27437 if (arg != NULL) { 27438 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27439 if (rval != -1 && 27440 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27441 un->sd_fi_fifo_end += i; 27442 } 27443 } else { 27444 SD_INFO(SD_LOG_IOERR, un, 27445 "sd_faultinjection_ioctl: push arg null\n"); 27446 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27447 un->sd_fi_fifo_end++; 27448 } 27449 } 27450 SD_INFO(SD_LOG_IOERR, un, 27451 "sd_faultinjection_ioctl: push to end=%d\n", 27452 un->sd_fi_fifo_end); 27453 break; 27454 27455 case SDIOCRETRIEVE: 27456 /* Return buffer of log from Injection session */ 27457 SD_INFO(SD_LOG_SDTEST, un, 27458 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27459 27460 sd_fault_injection_on = 0; 27461 27462 mutex_enter(&(un->un_fi_mutex)); 27463 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27464 un->sd_fi_buf_len+1, 0); 27465 mutex_exit(&(un->un_fi_mutex)); 27466 27467 if (rval == -1) { 27468 /* 27469 * arg is possibly invalid setting 27470 * it to NULL for return 27471 */ 27472 arg = NULL; 27473 } 27474 break; 27475 } 27476 27477 mutex_exit(SD_MUTEX(un)); 27478 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27479 " exit\n"); 27480 } 27481 27482 27483 /* 27484 * Function: sd_injection_log() 27485 * 27486 * Description: This routine adds buff to the already existing injection log 27487 * for retrieval via faultinjection_ioctl for use in fault 27488 * detection and recovery 27489 * 27490 * Arguments: buf - the string to add to the log 27491 */ 27492 27493 static void 27494 sd_injection_log(char *buf, struct sd_lun *un) 27495 { 27496 uint_t len; 27497 27498 ASSERT(un != NULL); 27499 ASSERT(buf != NULL); 27500 27501 mutex_enter(&(un->un_fi_mutex)); 27502 27503 len = min(strlen(buf), 255); 27504 /* Add logged value to Injection log to be returned later */ 27505 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27506 uint_t offset = strlen((char *)un->sd_fi_log); 27507 char *destp = (char *)un->sd_fi_log + offset; 27508 int i; 27509 for (i = 0; i < len; i++) { 27510 *destp++ = *buf++; 27511 } 27512 un->sd_fi_buf_len += len; 27513 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27514 } 27515 27516 mutex_exit(&(un->un_fi_mutex)); 27517 } 27518 27519 27520 /* 27521 * Function: sd_faultinjection() 27522 * 27523 * Description: This routine takes the pkt and changes its 27524 * content based on error injection scenerio. 27525 * 27526 * Arguments: pktp - packet to be changed 27527 */ 27528 27529 static void 27530 sd_faultinjection(struct scsi_pkt *pktp) 27531 { 27532 uint_t i; 27533 struct sd_fi_pkt *fi_pkt; 27534 struct sd_fi_xb *fi_xb; 27535 struct sd_fi_un *fi_un; 27536 struct sd_fi_arq *fi_arq; 27537 struct buf *bp; 27538 struct sd_xbuf *xb; 27539 struct sd_lun *un; 27540 27541 ASSERT(pktp != NULL); 27542 27543 /* pull bp xb and un from pktp */ 27544 bp = (struct buf *)pktp->pkt_private; 27545 xb = SD_GET_XBUF(bp); 27546 un = SD_GET_UN(bp); 27547 27548 ASSERT(un != NULL); 27549 27550 mutex_enter(SD_MUTEX(un)); 27551 27552 SD_TRACE(SD_LOG_SDTEST, un, 27553 "sd_faultinjection: entry Injection from sdintr\n"); 27554 27555 /* if injection is off return */ 27556 if (sd_fault_injection_on == 0 || 27557 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27558 mutex_exit(SD_MUTEX(un)); 27559 return; 27560 } 27561 27562 27563 /* take next set off fifo */ 27564 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27565 27566 fi_pkt = un->sd_fi_fifo_pkt[i]; 27567 fi_xb = un->sd_fi_fifo_xb[i]; 27568 fi_un = un->sd_fi_fifo_un[i]; 27569 fi_arq = un->sd_fi_fifo_arq[i]; 27570 27571 27572 /* set variables accordingly */ 27573 /* set pkt if it was on fifo */ 27574 if (fi_pkt != NULL) { 27575 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27576 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27577 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27578 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27579 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27580 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27581 27582 } 27583 27584 /* set xb if it was on fifo */ 27585 if (fi_xb != NULL) { 27586 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27587 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27588 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27589 SD_CONDSET(xb, xb, xb_victim_retry_count, 27590 "xb_victim_retry_count"); 27591 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27592 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27593 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27594 27595 /* copy in block data from sense */ 27596 if (fi_xb->xb_sense_data[0] != -1) { 27597 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27598 SENSE_LENGTH); 27599 } 27600 27601 /* copy in extended sense codes */ 27602 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27603 "es_code"); 27604 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27605 "es_key"); 27606 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27607 "es_add_code"); 27608 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27609 es_qual_code, "es_qual_code"); 27610 } 27611 27612 /* set un if it was on fifo */ 27613 if (fi_un != NULL) { 27614 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27615 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27616 SD_CONDSET(un, un, un_reset_retry_count, 27617 "un_reset_retry_count"); 27618 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27619 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27620 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27621 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27622 "un_f_allow_bus_device_reset"); 27623 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27624 27625 } 27626 27627 /* copy in auto request sense if it was on fifo */ 27628 if (fi_arq != NULL) { 27629 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27630 } 27631 27632 /* free structs */ 27633 if (un->sd_fi_fifo_pkt[i] != NULL) { 27634 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27635 } 27636 if (un->sd_fi_fifo_xb[i] != NULL) { 27637 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27638 } 27639 if (un->sd_fi_fifo_un[i] != NULL) { 27640 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27641 } 27642 if (un->sd_fi_fifo_arq[i] != NULL) { 27643 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27644 } 27645 27646 /* 27647 * kmem_free does not gurantee to set to NULL 27648 * since we uses these to determine if we set 27649 * values or not lets confirm they are always 27650 * NULL after free 27651 */ 27652 un->sd_fi_fifo_pkt[i] = NULL; 27653 un->sd_fi_fifo_un[i] = NULL; 27654 un->sd_fi_fifo_xb[i] = NULL; 27655 un->sd_fi_fifo_arq[i] = NULL; 27656 27657 un->sd_fi_fifo_start++; 27658 27659 mutex_exit(SD_MUTEX(un)); 27660 27661 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27662 } 27663 27664 #endif /* SD_FAULT_INJECTION */ 27665 27666 /* 27667 * This routine is invoked in sd_unit_attach(). Before calling it, the 27668 * properties in conf file should be processed already, and "hotpluggable" 27669 * property was processed also. 27670 * 27671 * The sd driver distinguishes 3 different type of devices: removable media, 27672 * non-removable media, and hotpluggable. Below the differences are defined: 27673 * 27674 * 1. Device ID 27675 * 27676 * The device ID of a device is used to identify this device. Refer to 27677 * ddi_devid_register(9F). 27678 * 27679 * For a non-removable media disk device which can provide 0x80 or 0x83 27680 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27681 * device ID is created to identify this device. For other non-removable 27682 * media devices, a default device ID is created only if this device has 27683 * at least 2 alter cylinders. Otherwise, this device has no devid. 27684 * 27685 * ------------------------------------------------------- 27686 * removable media hotpluggable | Can Have Device ID 27687 * ------------------------------------------------------- 27688 * false false | Yes 27689 * false true | Yes 27690 * true x | No 27691 * ------------------------------------------------------ 27692 * 27693 * 27694 * 2. SCSI group 4 commands 27695 * 27696 * In SCSI specs, only some commands in group 4 command set can use 27697 * 8-byte addresses that can be used to access >2TB storage spaces. 27698 * Other commands have no such capability. Without supporting group4, 27699 * it is impossible to make full use of storage spaces of a disk with 27700 * capacity larger than 2TB. 27701 * 27702 * ----------------------------------------------- 27703 * removable media hotpluggable LP64 | Group 27704 * ----------------------------------------------- 27705 * false false false | 1 27706 * false false true | 4 27707 * false true false | 1 27708 * false true true | 4 27709 * true x x | 5 27710 * ----------------------------------------------- 27711 * 27712 * 27713 * 3. Check for VTOC Label 27714 * 27715 * If a direct-access disk has no EFI label, sd will check if it has a 27716 * valid VTOC label. Now, sd also does that check for removable media 27717 * and hotpluggable devices. 27718 * 27719 * -------------------------------------------------------------- 27720 * Direct-Access removable media hotpluggable | Check Label 27721 * ------------------------------------------------------------- 27722 * false false false | No 27723 * false false true | No 27724 * false true false | Yes 27725 * false true true | Yes 27726 * true x x | Yes 27727 * -------------------------------------------------------------- 27728 * 27729 * 27730 * 4. Building default VTOC label 27731 * 27732 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27733 * If those devices have no valid VTOC label, sd(7d) will attempt to 27734 * create default VTOC for them. Currently sd creates default VTOC label 27735 * for all devices on x86 platform (VTOC_16), but only for removable 27736 * media devices on SPARC (VTOC_8). 27737 * 27738 * ----------------------------------------------------------- 27739 * removable media hotpluggable platform | Default Label 27740 * ----------------------------------------------------------- 27741 * false false sparc | No 27742 * false true x86 | Yes 27743 * false true sparc | Yes 27744 * true x x | Yes 27745 * ---------------------------------------------------------- 27746 * 27747 * 27748 * 5. Supported blocksizes of target devices 27749 * 27750 * Sd supports non-512-byte blocksize for removable media devices only. 27751 * For other devices, only 512-byte blocksize is supported. This may be 27752 * changed in near future because some RAID devices require non-512-byte 27753 * blocksize 27754 * 27755 * ----------------------------------------------------------- 27756 * removable media hotpluggable | non-512-byte blocksize 27757 * ----------------------------------------------------------- 27758 * false false | No 27759 * false true | No 27760 * true x | Yes 27761 * ----------------------------------------------------------- 27762 * 27763 * 27764 * 6. Automatic mount & unmount 27765 * 27766 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27767 * if a device is removable media device. It return 1 for removable media 27768 * devices, and 0 for others. 27769 * 27770 * The automatic mounting subsystem should distinguish between the types 27771 * of devices and apply automounting policies to each. 27772 * 27773 * 27774 * 7. fdisk partition management 27775 * 27776 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27777 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27778 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27779 * fdisk partitions on both x86 and SPARC platform. 27780 * 27781 * ----------------------------------------------------------- 27782 * platform removable media USB/1394 | fdisk supported 27783 * ----------------------------------------------------------- 27784 * x86 X X | true 27785 * ------------------------------------------------------------ 27786 * sparc X X | false 27787 * ------------------------------------------------------------ 27788 * 27789 * 27790 * 8. MBOOT/MBR 27791 * 27792 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27793 * read/write mboot for removable media devices on sparc platform. 27794 * 27795 * ----------------------------------------------------------- 27796 * platform removable media USB/1394 | mboot supported 27797 * ----------------------------------------------------------- 27798 * x86 X X | true 27799 * ------------------------------------------------------------ 27800 * sparc false false | false 27801 * sparc false true | true 27802 * sparc true false | true 27803 * sparc true true | true 27804 * ------------------------------------------------------------ 27805 * 27806 * 27807 * 9. error handling during opening device 27808 * 27809 * If failed to open a disk device, an errno is returned. For some kinds 27810 * of errors, different errno is returned depending on if this device is 27811 * a removable media device. This brings USB/1394 hard disks in line with 27812 * expected hard disk behavior. It is not expected that this breaks any 27813 * application. 27814 * 27815 * ------------------------------------------------------ 27816 * removable media hotpluggable | errno 27817 * ------------------------------------------------------ 27818 * false false | EIO 27819 * false true | EIO 27820 * true x | ENXIO 27821 * ------------------------------------------------------ 27822 * 27823 * 27824 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27825 * 27826 * These IOCTLs are applicable only to removable media devices. 27827 * 27828 * ----------------------------------------------------------- 27829 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27830 * ----------------------------------------------------------- 27831 * false false | No 27832 * false true | No 27833 * true x | Yes 27834 * ----------------------------------------------------------- 27835 * 27836 * 27837 * 12. Kstats for partitions 27838 * 27839 * sd creates partition kstat for non-removable media devices. USB and 27840 * Firewire hard disks now have partition kstats 27841 * 27842 * ------------------------------------------------------ 27843 * removable media hotpluggable | kstat 27844 * ------------------------------------------------------ 27845 * false false | Yes 27846 * false true | Yes 27847 * true x | No 27848 * ------------------------------------------------------ 27849 * 27850 * 27851 * 13. Removable media & hotpluggable properties 27852 * 27853 * Sd driver creates a "removable-media" property for removable media 27854 * devices. Parent nexus drivers create a "hotpluggable" property if 27855 * it supports hotplugging. 27856 * 27857 * --------------------------------------------------------------------- 27858 * removable media hotpluggable | "removable-media" " hotpluggable" 27859 * --------------------------------------------------------------------- 27860 * false false | No No 27861 * false true | No Yes 27862 * true false | Yes No 27863 * true true | Yes Yes 27864 * --------------------------------------------------------------------- 27865 * 27866 * 27867 * 14. Power Management 27868 * 27869 * sd only power manages removable media devices or devices that support 27870 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27871 * 27872 * A parent nexus that supports hotplugging can also set "pm-capable" 27873 * if the disk can be power managed. 27874 * 27875 * ------------------------------------------------------------ 27876 * removable media hotpluggable pm-capable | power manage 27877 * ------------------------------------------------------------ 27878 * false false false | No 27879 * false false true | Yes 27880 * false true false | No 27881 * false true true | Yes 27882 * true x x | Yes 27883 * ------------------------------------------------------------ 27884 * 27885 * USB and firewire hard disks can now be power managed independently 27886 * of the framebuffer 27887 * 27888 * 27889 * 15. Support for USB disks with capacity larger than 1TB 27890 * 27891 * Currently, sd doesn't permit a fixed disk device with capacity 27892 * larger than 1TB to be used in a 32-bit operating system environment. 27893 * However, sd doesn't do that for removable media devices. Instead, it 27894 * assumes that removable media devices cannot have a capacity larger 27895 * than 1TB. Therefore, using those devices on 32-bit system is partially 27896 * supported, which can cause some unexpected results. 27897 * 27898 * --------------------------------------------------------------------- 27899 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27900 * --------------------------------------------------------------------- 27901 * false false | true | no 27902 * false true | true | no 27903 * true false | true | Yes 27904 * true true | true | Yes 27905 * --------------------------------------------------------------------- 27906 * 27907 * 27908 * 16. Check write-protection at open time 27909 * 27910 * When a removable media device is being opened for writing without NDELAY 27911 * flag, sd will check if this device is writable. If attempting to open 27912 * without NDELAY flag a write-protected device, this operation will abort. 27913 * 27914 * ------------------------------------------------------------ 27915 * removable media USB/1394 | WP Check 27916 * ------------------------------------------------------------ 27917 * false false | No 27918 * false true | No 27919 * true false | Yes 27920 * true true | Yes 27921 * ------------------------------------------------------------ 27922 * 27923 * 27924 * 17. syslog when corrupted VTOC is encountered 27925 * 27926 * Currently, if an invalid VTOC is encountered, sd only print syslog 27927 * for fixed SCSI disks. 27928 * ------------------------------------------------------------ 27929 * removable media USB/1394 | print syslog 27930 * ------------------------------------------------------------ 27931 * false false | Yes 27932 * false true | No 27933 * true false | No 27934 * true true | No 27935 * ------------------------------------------------------------ 27936 */ 27937 static void 27938 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27939 { 27940 int pm_capable_prop; 27941 27942 ASSERT(un->un_sd); 27943 ASSERT(un->un_sd->sd_inq); 27944 27945 /* 27946 * Enable SYNC CACHE support for all devices. 27947 */ 27948 un->un_f_sync_cache_supported = TRUE; 27949 27950 if (un->un_sd->sd_inq->inq_rmb) { 27951 /* 27952 * The media of this device is removable. And for this kind 27953 * of devices, it is possible to change medium after opening 27954 * devices. Thus we should support this operation. 27955 */ 27956 un->un_f_has_removable_media = TRUE; 27957 27958 /* 27959 * support non-512-byte blocksize of removable media devices 27960 */ 27961 un->un_f_non_devbsize_supported = TRUE; 27962 27963 /* 27964 * Assume that all removable media devices support DOOR_LOCK 27965 */ 27966 un->un_f_doorlock_supported = TRUE; 27967 27968 /* 27969 * For a removable media device, it is possible to be opened 27970 * with NDELAY flag when there is no media in drive, in this 27971 * case we don't care if device is writable. But if without 27972 * NDELAY flag, we need to check if media is write-protected. 27973 */ 27974 un->un_f_chk_wp_open = TRUE; 27975 27976 /* 27977 * need to start a SCSI watch thread to monitor media state, 27978 * when media is being inserted or ejected, notify syseventd. 27979 */ 27980 un->un_f_monitor_media_state = TRUE; 27981 27982 /* 27983 * Some devices don't support START_STOP_UNIT command. 27984 * Therefore, we'd better check if a device supports it 27985 * before sending it. 27986 */ 27987 un->un_f_check_start_stop = TRUE; 27988 27989 /* 27990 * support eject media ioctl: 27991 * FDEJECT, DKIOCEJECT, CDROMEJECT 27992 */ 27993 un->un_f_eject_media_supported = TRUE; 27994 27995 /* 27996 * Because many removable-media devices don't support 27997 * LOG_SENSE, we couldn't use this command to check if 27998 * a removable media device support power-management. 27999 * We assume that they support power-management via 28000 * START_STOP_UNIT command and can be spun up and down 28001 * without limitations. 28002 */ 28003 un->un_f_pm_supported = TRUE; 28004 28005 /* 28006 * Need to create a zero length (Boolean) property 28007 * removable-media for the removable media devices. 28008 * Note that the return value of the property is not being 28009 * checked, since if unable to create the property 28010 * then do not want the attach to fail altogether. Consistent 28011 * with other property creation in attach. 28012 */ 28013 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28014 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28015 28016 } else { 28017 /* 28018 * create device ID for device 28019 */ 28020 un->un_f_devid_supported = TRUE; 28021 28022 /* 28023 * Spin up non-removable-media devices once it is attached 28024 */ 28025 un->un_f_attach_spinup = TRUE; 28026 28027 /* 28028 * According to SCSI specification, Sense data has two kinds of 28029 * format: fixed format, and descriptor format. At present, we 28030 * don't support descriptor format sense data for removable 28031 * media. 28032 */ 28033 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28034 un->un_f_descr_format_supported = TRUE; 28035 } 28036 28037 /* 28038 * kstats are created only for non-removable media devices. 28039 * 28040 * Set this in sd.conf to 0 in order to disable kstats. The 28041 * default is 1, so they are enabled by default. 28042 */ 28043 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28044 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28045 "enable-partition-kstats", 1)); 28046 28047 /* 28048 * Check if HBA has set the "pm-capable" property. 28049 * If "pm-capable" exists and is non-zero then we can 28050 * power manage the device without checking the start/stop 28051 * cycle count log sense page. 28052 * 28053 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28054 * then we should not power manage the device. 28055 * 28056 * If "pm-capable" doesn't exist then pm_capable_prop will 28057 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28058 * sd will check the start/stop cycle count log sense page 28059 * and power manage the device if the cycle count limit has 28060 * not been exceeded. 28061 */ 28062 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28063 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28064 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28065 un->un_f_log_sense_supported = TRUE; 28066 } else { 28067 /* 28068 * pm-capable property exists. 28069 * 28070 * Convert "TRUE" values for pm_capable_prop to 28071 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28072 * later. "TRUE" values are any values except 28073 * SD_PM_CAPABLE_FALSE (0) and 28074 * SD_PM_CAPABLE_UNDEFINED (-1) 28075 */ 28076 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28077 un->un_f_log_sense_supported = FALSE; 28078 } else { 28079 un->un_f_pm_supported = TRUE; 28080 } 28081 28082 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28083 "sd_unit_attach: un:0x%p pm-capable " 28084 "property set to %d.\n", un, un->un_f_pm_supported); 28085 } 28086 } 28087 28088 if (un->un_f_is_hotpluggable) { 28089 28090 /* 28091 * Have to watch hotpluggable devices as well, since 28092 * that's the only way for userland applications to 28093 * detect hot removal while device is busy/mounted. 28094 */ 28095 un->un_f_monitor_media_state = TRUE; 28096 28097 un->un_f_check_start_stop = TRUE; 28098 28099 } 28100 } 28101 28102 /* 28103 * sd_tg_rdwr: 28104 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28105 * in sys block size, req_length in bytes. 28106 * 28107 */ 28108 static int 28109 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28110 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28111 { 28112 struct sd_lun *un; 28113 int path_flag = (int)(uintptr_t)tg_cookie; 28114 char *dkl = NULL; 28115 diskaddr_t real_addr = start_block; 28116 diskaddr_t first_byte, end_block; 28117 28118 size_t buffer_size = reqlength; 28119 int rval; 28120 diskaddr_t cap; 28121 uint32_t lbasize; 28122 28123 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28124 if (un == NULL) 28125 return (ENXIO); 28126 28127 if (cmd != TG_READ && cmd != TG_WRITE) 28128 return (EINVAL); 28129 28130 mutex_enter(SD_MUTEX(un)); 28131 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28132 mutex_exit(SD_MUTEX(un)); 28133 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28134 &lbasize, path_flag); 28135 if (rval != 0) 28136 return (rval); 28137 mutex_enter(SD_MUTEX(un)); 28138 sd_update_block_info(un, lbasize, cap); 28139 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28140 mutex_exit(SD_MUTEX(un)); 28141 return (EIO); 28142 } 28143 } 28144 28145 if (NOT_DEVBSIZE(un)) { 28146 /* 28147 * sys_blocksize != tgt_blocksize, need to re-adjust 28148 * blkno and save the index to beginning of dk_label 28149 */ 28150 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28151 real_addr = first_byte / un->un_tgt_blocksize; 28152 28153 end_block = (first_byte + reqlength + 28154 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28155 28156 /* round up buffer size to multiple of target block size */ 28157 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28158 28159 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28160 "label_addr: 0x%x allocation size: 0x%x\n", 28161 real_addr, buffer_size); 28162 28163 if (((first_byte % un->un_tgt_blocksize) != 0) || 28164 (reqlength % un->un_tgt_blocksize) != 0) 28165 /* the request is not aligned */ 28166 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28167 } 28168 28169 /* 28170 * The MMC standard allows READ CAPACITY to be 28171 * inaccurate by a bounded amount (in the interest of 28172 * response latency). As a result, failed READs are 28173 * commonplace (due to the reading of metadata and not 28174 * data). Depending on the per-Vendor/drive Sense data, 28175 * the failed READ can cause many (unnecessary) retries. 28176 */ 28177 28178 if (ISCD(un) && (cmd == TG_READ) && 28179 (un->un_f_blockcount_is_valid == TRUE) && 28180 ((start_block == (un->un_blockcount - 1))|| 28181 (start_block == (un->un_blockcount - 2)))) { 28182 path_flag = SD_PATH_DIRECT_PRIORITY; 28183 } 28184 28185 mutex_exit(SD_MUTEX(un)); 28186 if (cmd == TG_READ) { 28187 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28188 buffer_size, real_addr, path_flag); 28189 if (dkl != NULL) 28190 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28191 real_addr), bufaddr, reqlength); 28192 } else { 28193 if (dkl) { 28194 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28195 real_addr, path_flag); 28196 if (rval) { 28197 kmem_free(dkl, buffer_size); 28198 return (rval); 28199 } 28200 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28201 real_addr), reqlength); 28202 } 28203 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28204 buffer_size, real_addr, path_flag); 28205 } 28206 28207 if (dkl != NULL) 28208 kmem_free(dkl, buffer_size); 28209 28210 return (rval); 28211 } 28212 28213 28214 static int 28215 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28216 { 28217 28218 struct sd_lun *un; 28219 diskaddr_t cap; 28220 uint32_t lbasize; 28221 int path_flag = (int)(uintptr_t)tg_cookie; 28222 int ret = 0; 28223 28224 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28225 if (un == NULL) 28226 return (ENXIO); 28227 28228 switch (cmd) { 28229 case TG_GETPHYGEOM: 28230 case TG_GETVIRTGEOM: 28231 case TG_GETCAPACITY: 28232 case TG_GETBLOCKSIZE: 28233 mutex_enter(SD_MUTEX(un)); 28234 28235 if ((un->un_f_blockcount_is_valid == TRUE) && 28236 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28237 cap = un->un_blockcount; 28238 lbasize = un->un_tgt_blocksize; 28239 mutex_exit(SD_MUTEX(un)); 28240 } else { 28241 mutex_exit(SD_MUTEX(un)); 28242 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28243 &lbasize, path_flag); 28244 if (ret != 0) 28245 return (ret); 28246 mutex_enter(SD_MUTEX(un)); 28247 sd_update_block_info(un, lbasize, cap); 28248 if ((un->un_f_blockcount_is_valid == FALSE) || 28249 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28250 mutex_exit(SD_MUTEX(un)); 28251 return (EIO); 28252 } 28253 mutex_exit(SD_MUTEX(un)); 28254 } 28255 28256 if (cmd == TG_GETCAPACITY) { 28257 *(diskaddr_t *)arg = cap; 28258 return (0); 28259 } 28260 28261 if (cmd == TG_GETBLOCKSIZE) { 28262 *(uint32_t *)arg = lbasize; 28263 return (0); 28264 } 28265 28266 if (cmd == TG_GETPHYGEOM) 28267 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28268 cap, lbasize, path_flag); 28269 else 28270 /* TG_GETVIRTGEOM */ 28271 ret = sd_get_virtual_geometry(un, 28272 (cmlb_geom_t *)arg, cap, lbasize); 28273 28274 return (ret); 28275 28276 case TG_GETATTR: 28277 mutex_enter(SD_MUTEX(un)); 28278 ((tg_attribute_t *)arg)->media_is_writable = 28279 un->un_f_mmc_writable_media; 28280 mutex_exit(SD_MUTEX(un)); 28281 return (0); 28282 default: 28283 return (ENOTTY); 28284 28285 } 28286 28287 } 28288