1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 #include <sys/sysevent/eventdefs.h> 68 #include <sys/sysevent/dev.h> 69 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #endif 212 213 214 #ifdef SDDEBUG 215 int sd_force_pm_supported = 0; 216 #endif /* SDDEBUG */ 217 218 void *sd_state = NULL; 219 int sd_io_time = SD_IO_TIME; 220 int sd_failfast_enable = 1; 221 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 222 int sd_report_pfa = 1; 223 int sd_max_throttle = SD_MAX_THROTTLE; 224 int sd_min_throttle = SD_MIN_THROTTLE; 225 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 226 int sd_qfull_throttle_enable = TRUE; 227 228 int sd_retry_on_reservation_conflict = 1; 229 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 230 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 231 232 static int sd_dtype_optical_bind = -1; 233 234 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 235 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 236 237 /* 238 * Global data for debug logging. To enable debug printing, sd_component_mask 239 * and sd_level_mask should be set to the desired bit patterns as outlined in 240 * sddef.h. 241 */ 242 uint_t sd_component_mask = 0x0; 243 uint_t sd_level_mask = 0x0; 244 struct sd_lun *sd_debug_un = NULL; 245 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 246 247 /* Note: these may go away in the future... */ 248 static uint32_t sd_xbuf_active_limit = 512; 249 static uint32_t sd_xbuf_reserve_limit = 16; 250 251 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 252 253 /* 254 * Timer value used to reset the throttle after it has been reduced 255 * (typically in response to TRAN_BUSY or STATUS_QFULL) 256 */ 257 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 258 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 259 260 /* 261 * Interval value associated with the media change scsi watch. 262 */ 263 static int sd_check_media_time = 3000000; 264 265 /* 266 * Wait value used for in progress operations during a DDI_SUSPEND 267 */ 268 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 269 270 /* 271 * sd_label_mutex protects a static buffer used in the disk label 272 * component of the driver 273 */ 274 static kmutex_t sd_label_mutex; 275 276 /* 277 * sd_detach_mutex protects un_layer_count, un_detach_count, and 278 * un_opens_in_progress in the sd_lun structure. 279 */ 280 static kmutex_t sd_detach_mutex; 281 282 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 283 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 284 285 /* 286 * Global buffer and mutex for debug logging 287 */ 288 static char sd_log_buf[1024]; 289 static kmutex_t sd_log_mutex; 290 291 /* 292 * Structs and globals for recording attached lun information. 293 * This maintains a chain. Each node in the chain represents a SCSI controller. 294 * The structure records the number of luns attached to each target connected 295 * with the controller. 296 * For parallel scsi device only. 297 */ 298 struct sd_scsi_hba_tgt_lun { 299 struct sd_scsi_hba_tgt_lun *next; 300 dev_info_t *pdip; 301 int nlun[NTARGETS_WIDE]; 302 }; 303 304 /* 305 * Flag to indicate the lun is attached or detached 306 */ 307 #define SD_SCSI_LUN_ATTACH 0 308 #define SD_SCSI_LUN_DETACH 1 309 310 static kmutex_t sd_scsi_target_lun_mutex; 311 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 312 313 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 314 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 315 316 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 317 sd_scsi_target_lun_head)) 318 319 /* 320 * "Smart" Probe Caching structs, globals, #defines, etc. 321 * For parallel scsi and non-self-identify device only. 322 */ 323 324 /* 325 * The following resources and routines are implemented to support 326 * "smart" probing, which caches the scsi_probe() results in an array, 327 * in order to help avoid long probe times. 328 */ 329 struct sd_scsi_probe_cache { 330 struct sd_scsi_probe_cache *next; 331 dev_info_t *pdip; 332 int cache[NTARGETS_WIDE]; 333 }; 334 335 static kmutex_t sd_scsi_probe_cache_mutex; 336 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 337 338 /* 339 * Really we only need protection on the head of the linked list, but 340 * better safe than sorry. 341 */ 342 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 343 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 344 345 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 346 sd_scsi_probe_cache_head)) 347 348 349 /* 350 * Vendor specific data name property declarations 351 */ 352 353 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 354 355 static sd_tunables seagate_properties = { 356 SEAGATE_THROTTLE_VALUE, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0, 363 0, 364 0 365 }; 366 367 368 static sd_tunables fujitsu_properties = { 369 FUJITSU_THROTTLE_VALUE, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0, 376 0, 377 0 378 }; 379 380 static sd_tunables ibm_properties = { 381 IBM_THROTTLE_VALUE, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0 390 }; 391 392 static sd_tunables purple_properties = { 393 PURPLE_THROTTLE_VALUE, 394 0, 395 0, 396 PURPLE_BUSY_RETRIES, 397 PURPLE_RESET_RETRY_COUNT, 398 PURPLE_RESERVE_RELEASE_TIME, 399 0, 400 0, 401 0 402 }; 403 404 static sd_tunables sve_properties = { 405 SVE_THROTTLE_VALUE, 406 0, 407 0, 408 SVE_BUSY_RETRIES, 409 SVE_RESET_RETRY_COUNT, 410 SVE_RESERVE_RELEASE_TIME, 411 SVE_MIN_THROTTLE_VALUE, 412 SVE_DISKSORT_DISABLED_FLAG, 413 0 414 }; 415 416 static sd_tunables maserati_properties = { 417 0, 418 0, 419 0, 420 0, 421 0, 422 0, 423 0, 424 MASERATI_DISKSORT_DISABLED_FLAG, 425 MASERATI_LUN_RESET_ENABLED_FLAG 426 }; 427 428 static sd_tunables pirus_properties = { 429 PIRUS_THROTTLE_VALUE, 430 0, 431 PIRUS_NRR_COUNT, 432 PIRUS_BUSY_RETRIES, 433 PIRUS_RESET_RETRY_COUNT, 434 0, 435 PIRUS_MIN_THROTTLE_VALUE, 436 PIRUS_DISKSORT_DISABLED_FLAG, 437 PIRUS_LUN_RESET_ENABLED_FLAG 438 }; 439 440 #endif 441 442 #if (defined(__sparc) && !defined(__fibre)) || \ 443 (defined(__i386) || defined(__amd64)) 444 445 446 static sd_tunables elite_properties = { 447 ELITE_THROTTLE_VALUE, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0, 454 0, 455 0 456 }; 457 458 static sd_tunables st31200n_properties = { 459 ST31200N_THROTTLE_VALUE, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0, 467 0 468 }; 469 470 #endif /* Fibre or not */ 471 472 static sd_tunables lsi_properties_scsi = { 473 LSI_THROTTLE_VALUE, 474 0, 475 LSI_NOTREADY_RETRIES, 476 0, 477 0, 478 0, 479 0, 480 0, 481 0 482 }; 483 484 static sd_tunables symbios_properties = { 485 SYMBIOS_THROTTLE_VALUE, 486 0, 487 SYMBIOS_NOTREADY_RETRIES, 488 0, 489 0, 490 0, 491 0, 492 0, 493 0 494 }; 495 496 static sd_tunables lsi_properties = { 497 0, 498 0, 499 LSI_NOTREADY_RETRIES, 500 0, 501 0, 502 0, 503 0, 504 0, 505 0 506 }; 507 508 static sd_tunables lsi_oem_properties = { 509 0, 510 0, 511 LSI_OEM_NOTREADY_RETRIES, 512 0, 513 0, 514 0, 515 0, 516 0, 517 0, 518 1 519 }; 520 521 522 523 #if (defined(SD_PROP_TST)) 524 525 #define SD_TST_CTYPE_VAL CTYPE_CDROM 526 #define SD_TST_THROTTLE_VAL 16 527 #define SD_TST_NOTREADY_VAL 12 528 #define SD_TST_BUSY_VAL 60 529 #define SD_TST_RST_RETRY_VAL 36 530 #define SD_TST_RSV_REL_TIME 60 531 532 static sd_tunables tst_properties = { 533 SD_TST_THROTTLE_VAL, 534 SD_TST_CTYPE_VAL, 535 SD_TST_NOTREADY_VAL, 536 SD_TST_BUSY_VAL, 537 SD_TST_RST_RETRY_VAL, 538 SD_TST_RSV_REL_TIME, 539 0, 540 0, 541 0 542 }; 543 #endif 544 545 /* This is similar to the ANSI toupper implementation */ 546 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 547 548 /* 549 * Static Driver Configuration Table 550 * 551 * This is the table of disks which need throttle adjustment (or, perhaps 552 * something else as defined by the flags at a future time.) device_id 553 * is a string consisting of concatenated vid (vendor), pid (product/model) 554 * and revision strings as defined in the scsi_inquiry structure. Offsets of 555 * the parts of the string are as defined by the sizes in the scsi_inquiry 556 * structure. Device type is searched as far as the device_id string is 557 * defined. Flags defines which values are to be set in the driver from the 558 * properties list. 559 * 560 * Entries below which begin and end with a "*" are a special case. 561 * These do not have a specific vendor, and the string which follows 562 * can appear anywhere in the 16 byte PID portion of the inquiry data. 563 * 564 * Entries below which begin and end with a " " (blank) are a special 565 * case. The comparison function will treat multiple consecutive blanks 566 * as equivalent to a single blank. For example, this causes a 567 * sd_disk_table entry of " NEC CDROM " to match a device's id string 568 * of "NEC CDROM". 569 * 570 * Note: The MD21 controller type has been obsoleted. 571 * ST318202F is a Legacy device 572 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 573 * made with an FC connection. The entries here are a legacy. 574 */ 575 static sd_disk_config_t sd_disk_table[] = { 576 #if defined(__fibre) || defined(__i386) || defined(__amd64) 577 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 599 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 600 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 603 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 604 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 627 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 628 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 629 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 630 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 631 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 632 { "SUN T3", SD_CONF_BSET_THROTTLE | 633 SD_CONF_BSET_BSY_RETRY_COUNT| 634 SD_CONF_BSET_RST_RETRIES| 635 SD_CONF_BSET_RSV_REL_TIME, 636 &purple_properties }, 637 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 638 SD_CONF_BSET_BSY_RETRY_COUNT| 639 SD_CONF_BSET_RST_RETRIES| 640 SD_CONF_BSET_RSV_REL_TIME| 641 SD_CONF_BSET_MIN_THROTTLE| 642 SD_CONF_BSET_DISKSORT_DISABLED, 643 &sve_properties }, 644 { "SUN T4", SD_CONF_BSET_THROTTLE | 645 SD_CONF_BSET_BSY_RETRY_COUNT| 646 SD_CONF_BSET_RST_RETRIES| 647 SD_CONF_BSET_RSV_REL_TIME, 648 &purple_properties }, 649 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 650 SD_CONF_BSET_LUN_RESET_ENABLED, 651 &maserati_properties }, 652 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 653 SD_CONF_BSET_NRR_COUNT| 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_MIN_THROTTLE| 657 SD_CONF_BSET_DISKSORT_DISABLED| 658 SD_CONF_BSET_LUN_RESET_ENABLED, 659 &pirus_properties }, 660 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 661 SD_CONF_BSET_NRR_COUNT| 662 SD_CONF_BSET_BSY_RETRY_COUNT| 663 SD_CONF_BSET_RST_RETRIES| 664 SD_CONF_BSET_MIN_THROTTLE| 665 SD_CONF_BSET_DISKSORT_DISABLED| 666 SD_CONF_BSET_LUN_RESET_ENABLED, 667 &pirus_properties }, 668 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 669 SD_CONF_BSET_NRR_COUNT| 670 SD_CONF_BSET_BSY_RETRY_COUNT| 671 SD_CONF_BSET_RST_RETRIES| 672 SD_CONF_BSET_MIN_THROTTLE| 673 SD_CONF_BSET_DISKSORT_DISABLED| 674 SD_CONF_BSET_LUN_RESET_ENABLED, 675 &pirus_properties }, 676 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 677 SD_CONF_BSET_NRR_COUNT| 678 SD_CONF_BSET_BSY_RETRY_COUNT| 679 SD_CONF_BSET_RST_RETRIES| 680 SD_CONF_BSET_MIN_THROTTLE| 681 SD_CONF_BSET_DISKSORT_DISABLED| 682 SD_CONF_BSET_LUN_RESET_ENABLED, 683 &pirus_properties }, 684 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 685 SD_CONF_BSET_NRR_COUNT| 686 SD_CONF_BSET_BSY_RETRY_COUNT| 687 SD_CONF_BSET_RST_RETRIES| 688 SD_CONF_BSET_MIN_THROTTLE| 689 SD_CONF_BSET_DISKSORT_DISABLED| 690 SD_CONF_BSET_LUN_RESET_ENABLED, 691 &pirus_properties }, 692 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 693 SD_CONF_BSET_NRR_COUNT| 694 SD_CONF_BSET_BSY_RETRY_COUNT| 695 SD_CONF_BSET_RST_RETRIES| 696 SD_CONF_BSET_MIN_THROTTLE| 697 SD_CONF_BSET_DISKSORT_DISABLED| 698 SD_CONF_BSET_LUN_RESET_ENABLED, 699 &pirus_properties }, 700 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 704 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 705 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 706 #endif /* fibre or NON-sparc platforms */ 707 #if ((defined(__sparc) && !defined(__fibre)) ||\ 708 (defined(__i386) || defined(__amd64))) 709 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 710 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 711 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 712 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 713 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 720 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 721 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 722 &symbios_properties }, 723 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 724 &lsi_properties_scsi }, 725 #if defined(__i386) || defined(__amd64) 726 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 727 | SD_CONF_BSET_READSUB_BCD 728 | SD_CONF_BSET_READ_TOC_ADDR_BCD 729 | SD_CONF_BSET_NO_READ_HEADER 730 | SD_CONF_BSET_READ_CD_XD4), NULL }, 731 732 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 733 | SD_CONF_BSET_READSUB_BCD 734 | SD_CONF_BSET_READ_TOC_ADDR_BCD 735 | SD_CONF_BSET_NO_READ_HEADER 736 | SD_CONF_BSET_READ_CD_XD4), NULL }, 737 #endif /* __i386 || __amd64 */ 738 #endif /* sparc NON-fibre or NON-sparc platforms */ 739 740 #if (defined(SD_PROP_TST)) 741 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 742 | SD_CONF_BSET_CTYPE 743 | SD_CONF_BSET_NRR_COUNT 744 | SD_CONF_BSET_FAB_DEVID 745 | SD_CONF_BSET_NOCACHE 746 | SD_CONF_BSET_BSY_RETRY_COUNT 747 | SD_CONF_BSET_PLAYMSF_BCD 748 | SD_CONF_BSET_READSUB_BCD 749 | SD_CONF_BSET_READ_TOC_TRK_BCD 750 | SD_CONF_BSET_READ_TOC_ADDR_BCD 751 | SD_CONF_BSET_NO_READ_HEADER 752 | SD_CONF_BSET_READ_CD_XD4 753 | SD_CONF_BSET_RST_RETRIES 754 | SD_CONF_BSET_RSV_REL_TIME 755 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 756 #endif 757 }; 758 759 static const int sd_disk_table_size = 760 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 761 762 763 764 #define SD_INTERCONNECT_PARALLEL 0 765 #define SD_INTERCONNECT_FABRIC 1 766 #define SD_INTERCONNECT_FIBRE 2 767 #define SD_INTERCONNECT_SSA 3 768 #define SD_INTERCONNECT_SATA 4 769 #define SD_IS_PARALLEL_SCSI(un) \ 770 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 771 #define SD_IS_SERIAL(un) \ 772 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 773 774 /* 775 * Definitions used by device id registration routines 776 */ 777 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 778 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 779 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 780 781 static kmutex_t sd_sense_mutex = {0}; 782 783 /* 784 * Macros for updates of the driver state 785 */ 786 #define New_state(un, s) \ 787 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 788 #define Restore_state(un) \ 789 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 790 791 static struct sd_cdbinfo sd_cdbtab[] = { 792 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 793 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 794 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 795 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 796 }; 797 798 /* 799 * Specifies the number of seconds that must have elapsed since the last 800 * cmd. has completed for a device to be declared idle to the PM framework. 801 */ 802 static int sd_pm_idletime = 1; 803 804 /* 805 * Internal function prototypes 806 */ 807 808 #if (defined(__fibre)) 809 /* 810 * These #defines are to avoid namespace collisions that occur because this 811 * code is currently used to compile two separate driver modules: sd and ssd. 812 * All function names need to be treated this way (even if declared static) 813 * in order to allow the debugger to resolve the names properly. 814 * It is anticipated that in the near future the ssd module will be obsoleted, 815 * at which time this ugliness should go away. 816 */ 817 #define sd_log_trace ssd_log_trace 818 #define sd_log_info ssd_log_info 819 #define sd_log_err ssd_log_err 820 #define sdprobe ssdprobe 821 #define sdinfo ssdinfo 822 #define sd_prop_op ssd_prop_op 823 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 824 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 825 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 826 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 827 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 828 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 829 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 830 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 831 #define sd_spin_up_unit ssd_spin_up_unit 832 #define sd_enable_descr_sense ssd_enable_descr_sense 833 #define sd_reenable_dsense_task ssd_reenable_dsense_task 834 #define sd_set_mmc_caps ssd_set_mmc_caps 835 #define sd_read_unit_properties ssd_read_unit_properties 836 #define sd_process_sdconf_file ssd_process_sdconf_file 837 #define sd_process_sdconf_table ssd_process_sdconf_table 838 #define sd_sdconf_id_match ssd_sdconf_id_match 839 #define sd_blank_cmp ssd_blank_cmp 840 #define sd_chk_vers1_data ssd_chk_vers1_data 841 #define sd_set_vers1_properties ssd_set_vers1_properties 842 843 #define sd_get_physical_geometry ssd_get_physical_geometry 844 #define sd_get_virtual_geometry ssd_get_virtual_geometry 845 #define sd_update_block_info ssd_update_block_info 846 #define sd_register_devid ssd_register_devid 847 #define sd_get_devid ssd_get_devid 848 #define sd_create_devid ssd_create_devid 849 #define sd_write_deviceid ssd_write_deviceid 850 #define sd_check_vpd_page_support ssd_check_vpd_page_support 851 #define sd_setup_pm ssd_setup_pm 852 #define sd_create_pm_components ssd_create_pm_components 853 #define sd_ddi_suspend ssd_ddi_suspend 854 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 855 #define sd_ddi_resume ssd_ddi_resume 856 #define sd_ddi_pm_resume ssd_ddi_pm_resume 857 #define sdpower ssdpower 858 #define sdattach ssdattach 859 #define sddetach ssddetach 860 #define sd_unit_attach ssd_unit_attach 861 #define sd_unit_detach ssd_unit_detach 862 #define sd_set_unit_attributes ssd_set_unit_attributes 863 #define sd_create_errstats ssd_create_errstats 864 #define sd_set_errstats ssd_set_errstats 865 #define sd_set_pstats ssd_set_pstats 866 #define sddump ssddump 867 #define sd_scsi_poll ssd_scsi_poll 868 #define sd_send_polled_RQS ssd_send_polled_RQS 869 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 870 #define sd_init_event_callbacks ssd_init_event_callbacks 871 #define sd_event_callback ssd_event_callback 872 #define sd_cache_control ssd_cache_control 873 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 874 #define sd_get_nv_sup ssd_get_nv_sup 875 #define sd_make_device ssd_make_device 876 #define sdopen ssdopen 877 #define sdclose ssdclose 878 #define sd_ready_and_valid ssd_ready_and_valid 879 #define sdmin ssdmin 880 #define sdread ssdread 881 #define sdwrite ssdwrite 882 #define sdaread ssdaread 883 #define sdawrite ssdawrite 884 #define sdstrategy ssdstrategy 885 #define sdioctl ssdioctl 886 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 887 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 888 #define sd_checksum_iostart ssd_checksum_iostart 889 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 890 #define sd_pm_iostart ssd_pm_iostart 891 #define sd_core_iostart ssd_core_iostart 892 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 893 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 894 #define sd_checksum_iodone ssd_checksum_iodone 895 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 896 #define sd_pm_iodone ssd_pm_iodone 897 #define sd_initpkt_for_buf ssd_initpkt_for_buf 898 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 899 #define sd_setup_rw_pkt ssd_setup_rw_pkt 900 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 901 #define sd_buf_iodone ssd_buf_iodone 902 #define sd_uscsi_strategy ssd_uscsi_strategy 903 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 904 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 905 #define sd_uscsi_iodone ssd_uscsi_iodone 906 #define sd_xbuf_strategy ssd_xbuf_strategy 907 #define sd_xbuf_init ssd_xbuf_init 908 #define sd_pm_entry ssd_pm_entry 909 #define sd_pm_exit ssd_pm_exit 910 911 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 912 #define sd_pm_timeout_handler ssd_pm_timeout_handler 913 914 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 915 #define sdintr ssdintr 916 #define sd_start_cmds ssd_start_cmds 917 #define sd_send_scsi_cmd ssd_send_scsi_cmd 918 #define sd_bioclone_alloc ssd_bioclone_alloc 919 #define sd_bioclone_free ssd_bioclone_free 920 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 921 #define sd_shadow_buf_free ssd_shadow_buf_free 922 #define sd_print_transport_rejected_message \ 923 ssd_print_transport_rejected_message 924 #define sd_retry_command ssd_retry_command 925 #define sd_set_retry_bp ssd_set_retry_bp 926 #define sd_send_request_sense_command ssd_send_request_sense_command 927 #define sd_start_retry_command ssd_start_retry_command 928 #define sd_start_direct_priority_command \ 929 ssd_start_direct_priority_command 930 #define sd_return_failed_command ssd_return_failed_command 931 #define sd_return_failed_command_no_restart \ 932 ssd_return_failed_command_no_restart 933 #define sd_return_command ssd_return_command 934 #define sd_sync_with_callback ssd_sync_with_callback 935 #define sdrunout ssdrunout 936 #define sd_mark_rqs_busy ssd_mark_rqs_busy 937 #define sd_mark_rqs_idle ssd_mark_rqs_idle 938 #define sd_reduce_throttle ssd_reduce_throttle 939 #define sd_restore_throttle ssd_restore_throttle 940 #define sd_print_incomplete_msg ssd_print_incomplete_msg 941 #define sd_init_cdb_limits ssd_init_cdb_limits 942 #define sd_pkt_status_good ssd_pkt_status_good 943 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 944 #define sd_pkt_status_busy ssd_pkt_status_busy 945 #define sd_pkt_status_reservation_conflict \ 946 ssd_pkt_status_reservation_conflict 947 #define sd_pkt_status_qfull ssd_pkt_status_qfull 948 #define sd_handle_request_sense ssd_handle_request_sense 949 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 950 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 951 #define sd_validate_sense_data ssd_validate_sense_data 952 #define sd_decode_sense ssd_decode_sense 953 #define sd_print_sense_msg ssd_print_sense_msg 954 #define sd_sense_key_no_sense ssd_sense_key_no_sense 955 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 956 #define sd_sense_key_not_ready ssd_sense_key_not_ready 957 #define sd_sense_key_medium_or_hardware_error \ 958 ssd_sense_key_medium_or_hardware_error 959 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 960 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 961 #define sd_sense_key_fail_command ssd_sense_key_fail_command 962 #define sd_sense_key_blank_check ssd_sense_key_blank_check 963 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 964 #define sd_sense_key_default ssd_sense_key_default 965 #define sd_print_retry_msg ssd_print_retry_msg 966 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 967 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 968 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 969 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 970 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 971 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 972 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 973 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 974 #define sd_pkt_reason_default ssd_pkt_reason_default 975 #define sd_reset_target ssd_reset_target 976 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 977 #define sd_start_stop_unit_task ssd_start_stop_unit_task 978 #define sd_taskq_create ssd_taskq_create 979 #define sd_taskq_delete ssd_taskq_delete 980 #define sd_target_change_task ssd_target_change_task 981 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 982 #define sd_media_change_task ssd_media_change_task 983 #define sd_handle_mchange ssd_handle_mchange 984 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 985 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 986 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 987 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 988 #define sd_send_scsi_feature_GET_CONFIGURATION \ 989 sd_send_scsi_feature_GET_CONFIGURATION 990 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 991 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 992 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 993 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 994 ssd_send_scsi_PERSISTENT_RESERVE_IN 995 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 996 ssd_send_scsi_PERSISTENT_RESERVE_OUT 997 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 998 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 999 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1000 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1001 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1002 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1003 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1004 #define sd_alloc_rqs ssd_alloc_rqs 1005 #define sd_free_rqs ssd_free_rqs 1006 #define sd_dump_memory ssd_dump_memory 1007 #define sd_get_media_info ssd_get_media_info 1008 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1009 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1010 #define sd_setup_next_xfer ssd_setup_next_xfer 1011 #define sd_dkio_get_temp ssd_dkio_get_temp 1012 #define sd_check_mhd ssd_check_mhd 1013 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1014 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1015 #define sd_sname ssd_sname 1016 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1017 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1018 #define sd_take_ownership ssd_take_ownership 1019 #define sd_reserve_release ssd_reserve_release 1020 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1021 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1022 #define sd_persistent_reservation_in_read_keys \ 1023 ssd_persistent_reservation_in_read_keys 1024 #define sd_persistent_reservation_in_read_resv \ 1025 ssd_persistent_reservation_in_read_resv 1026 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1027 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1028 #define sd_mhdioc_release ssd_mhdioc_release 1029 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1030 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1031 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1032 #define sr_change_blkmode ssr_change_blkmode 1033 #define sr_change_speed ssr_change_speed 1034 #define sr_atapi_change_speed ssr_atapi_change_speed 1035 #define sr_pause_resume ssr_pause_resume 1036 #define sr_play_msf ssr_play_msf 1037 #define sr_play_trkind ssr_play_trkind 1038 #define sr_read_all_subcodes ssr_read_all_subcodes 1039 #define sr_read_subchannel ssr_read_subchannel 1040 #define sr_read_tocentry ssr_read_tocentry 1041 #define sr_read_tochdr ssr_read_tochdr 1042 #define sr_read_cdda ssr_read_cdda 1043 #define sr_read_cdxa ssr_read_cdxa 1044 #define sr_read_mode1 ssr_read_mode1 1045 #define sr_read_mode2 ssr_read_mode2 1046 #define sr_read_cd_mode2 ssr_read_cd_mode2 1047 #define sr_sector_mode ssr_sector_mode 1048 #define sr_eject ssr_eject 1049 #define sr_ejected ssr_ejected 1050 #define sr_check_wp ssr_check_wp 1051 #define sd_check_media ssd_check_media 1052 #define sd_media_watch_cb ssd_media_watch_cb 1053 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1054 #define sr_volume_ctrl ssr_volume_ctrl 1055 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1056 #define sd_log_page_supported ssd_log_page_supported 1057 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1058 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1059 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1060 #define sd_range_lock ssd_range_lock 1061 #define sd_get_range ssd_get_range 1062 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1063 #define sd_range_unlock ssd_range_unlock 1064 #define sd_read_modify_write_task ssd_read_modify_write_task 1065 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1066 1067 #define sd_iostart_chain ssd_iostart_chain 1068 #define sd_iodone_chain ssd_iodone_chain 1069 #define sd_initpkt_map ssd_initpkt_map 1070 #define sd_destroypkt_map ssd_destroypkt_map 1071 #define sd_chain_type_map ssd_chain_type_map 1072 #define sd_chain_index_map ssd_chain_index_map 1073 1074 #define sd_failfast_flushctl ssd_failfast_flushctl 1075 #define sd_failfast_flushq ssd_failfast_flushq 1076 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1077 1078 #define sd_is_lsi ssd_is_lsi 1079 #define sd_tg_rdwr ssd_tg_rdwr 1080 #define sd_tg_getinfo ssd_tg_getinfo 1081 1082 #endif /* #if (defined(__fibre)) */ 1083 1084 1085 int _init(void); 1086 int _fini(void); 1087 int _info(struct modinfo *modinfop); 1088 1089 /*PRINTFLIKE3*/ 1090 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1091 /*PRINTFLIKE3*/ 1092 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1093 /*PRINTFLIKE3*/ 1094 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1095 1096 static int sdprobe(dev_info_t *devi); 1097 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1098 void **result); 1099 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1100 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1101 1102 /* 1103 * Smart probe for parallel scsi 1104 */ 1105 static void sd_scsi_probe_cache_init(void); 1106 static void sd_scsi_probe_cache_fini(void); 1107 static void sd_scsi_clear_probe_cache(void); 1108 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1109 1110 /* 1111 * Attached luns on target for parallel scsi 1112 */ 1113 static void sd_scsi_target_lun_init(void); 1114 static void sd_scsi_target_lun_fini(void); 1115 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1116 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1117 1118 static int sd_spin_up_unit(struct sd_lun *un); 1119 #ifdef _LP64 1120 static void sd_enable_descr_sense(struct sd_lun *un); 1121 static void sd_reenable_dsense_task(void *arg); 1122 #endif /* _LP64 */ 1123 1124 static void sd_set_mmc_caps(struct sd_lun *un); 1125 1126 static void sd_read_unit_properties(struct sd_lun *un); 1127 static int sd_process_sdconf_file(struct sd_lun *un); 1128 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1129 int *data_list, sd_tunables *values); 1130 static void sd_process_sdconf_table(struct sd_lun *un); 1131 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1132 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1133 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1134 int list_len, char *dataname_ptr); 1135 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1136 sd_tunables *prop_list); 1137 1138 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1139 int reservation_flag); 1140 static int sd_get_devid(struct sd_lun *un); 1141 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1142 static int sd_write_deviceid(struct sd_lun *un); 1143 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1144 static int sd_check_vpd_page_support(struct sd_lun *un); 1145 1146 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1148 1149 static int sd_ddi_suspend(dev_info_t *devi); 1150 static int sd_ddi_pm_suspend(struct sd_lun *un); 1151 static int sd_ddi_resume(dev_info_t *devi); 1152 static int sd_ddi_pm_resume(struct sd_lun *un); 1153 static int sdpower(dev_info_t *devi, int component, int level); 1154 1155 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1156 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1157 static int sd_unit_attach(dev_info_t *devi); 1158 static int sd_unit_detach(dev_info_t *devi); 1159 1160 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1161 static void sd_create_errstats(struct sd_lun *un, int instance); 1162 static void sd_set_errstats(struct sd_lun *un); 1163 static void sd_set_pstats(struct sd_lun *un); 1164 1165 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1166 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1167 static int sd_send_polled_RQS(struct sd_lun *un); 1168 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1169 1170 #if (defined(__fibre)) 1171 /* 1172 * Event callbacks (photon) 1173 */ 1174 static void sd_init_event_callbacks(struct sd_lun *un); 1175 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1176 #endif 1177 1178 /* 1179 * Defines for sd_cache_control 1180 */ 1181 1182 #define SD_CACHE_ENABLE 1 1183 #define SD_CACHE_DISABLE 0 1184 #define SD_CACHE_NOCHANGE -1 1185 1186 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1187 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1188 static void sd_get_nv_sup(struct sd_lun *un); 1189 static dev_t sd_make_device(dev_info_t *devi); 1190 1191 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1192 uint64_t capacity); 1193 1194 /* 1195 * Driver entry point functions. 1196 */ 1197 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1198 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1199 static int sd_ready_and_valid(struct sd_lun *un); 1200 1201 static void sdmin(struct buf *bp); 1202 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1203 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1204 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1205 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1206 1207 static int sdstrategy(struct buf *bp); 1208 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1209 1210 /* 1211 * Function prototypes for layering functions in the iostart chain. 1212 */ 1213 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1221 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1222 1223 /* 1224 * Function prototypes for layering functions in the iodone chain. 1225 */ 1226 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1227 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1228 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1229 struct buf *bp); 1230 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1231 struct buf *bp); 1232 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1233 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1234 struct buf *bp); 1235 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1236 1237 /* 1238 * Prototypes for functions to support buf(9S) based IO. 1239 */ 1240 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1241 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_buf(struct buf *); 1243 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1244 struct buf *bp, int flags, 1245 int (*callback)(caddr_t), caddr_t callback_arg, 1246 diskaddr_t lba, uint32_t blockcount); 1247 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1248 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1249 1250 /* 1251 * Prototypes for functions to support USCSI IO. 1252 */ 1253 static int sd_uscsi_strategy(struct buf *bp); 1254 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1255 static void sd_destroypkt_for_uscsi(struct buf *); 1256 1257 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1258 uchar_t chain_type, void *pktinfop); 1259 1260 static int sd_pm_entry(struct sd_lun *un); 1261 static void sd_pm_exit(struct sd_lun *un); 1262 1263 static void sd_pm_idletimeout_handler(void *arg); 1264 1265 /* 1266 * sd_core internal functions (used at the sd_core_io layer). 1267 */ 1268 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1269 static void sdintr(struct scsi_pkt *pktp); 1270 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1271 1272 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1273 enum uio_seg dataspace, int path_flag); 1274 1275 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1276 daddr_t blkno, int (*func)(struct buf *)); 1277 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1278 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1279 static void sd_bioclone_free(struct buf *bp); 1280 static void sd_shadow_buf_free(struct buf *bp); 1281 1282 static void sd_print_transport_rejected_message(struct sd_lun *un, 1283 struct sd_xbuf *xp, int code); 1284 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1285 void *arg, int code); 1286 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1287 void *arg, int code); 1288 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1289 void *arg, int code); 1290 1291 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1292 int retry_check_flag, 1293 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1294 int c), 1295 void *user_arg, int failure_code, clock_t retry_delay, 1296 void (*statp)(kstat_io_t *)); 1297 1298 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1299 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1300 1301 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1302 struct scsi_pkt *pktp); 1303 static void sd_start_retry_command(void *arg); 1304 static void sd_start_direct_priority_command(void *arg); 1305 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1306 int errcode); 1307 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1308 struct buf *bp, int errcode); 1309 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1310 static void sd_sync_with_callback(struct sd_lun *un); 1311 static int sdrunout(caddr_t arg); 1312 1313 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1314 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1315 1316 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1317 static void sd_restore_throttle(void *arg); 1318 1319 static void sd_init_cdb_limits(struct sd_lun *un); 1320 1321 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 1324 /* 1325 * Error handling functions 1326 */ 1327 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1332 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1334 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1335 1336 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, size_t actual_len); 1342 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 1345 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1346 void *arg, int code); 1347 1348 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1349 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_not_ready(struct sd_lun *un, 1354 uint8_t *sense_datap, 1355 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1357 uint8_t *sense_datap, 1358 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1359 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1360 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1361 static void sd_sense_key_unit_attention(struct sd_lun *un, 1362 uint8_t *sense_datap, 1363 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_sense_key_default(struct sd_lun *un, 1371 uint8_t *sense_datap, 1372 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 1374 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1375 void *arg, int flag); 1376 1377 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 1394 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1395 1396 static void sd_start_stop_unit_callback(void *arg); 1397 static void sd_start_stop_unit_task(void *arg); 1398 1399 static void sd_taskq_create(void); 1400 static void sd_taskq_delete(void); 1401 static void sd_target_change_task(void *arg); 1402 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1403 static void sd_media_change_task(void *arg); 1404 1405 static int sd_handle_mchange(struct sd_lun *un); 1406 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1407 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1408 uint32_t *lbap, int path_flag); 1409 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1410 uint32_t *lbap, int path_flag); 1411 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1412 int path_flag); 1413 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1414 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1415 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1416 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1417 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1418 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1419 uchar_t usr_cmd, uchar_t *usr_bufp); 1420 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1421 struct dk_callback *dkc); 1422 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1423 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1424 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1425 uchar_t *bufaddr, uint_t buflen, int path_flag); 1426 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1427 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1428 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1429 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1430 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1431 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1432 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1433 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1434 size_t buflen, daddr_t start_block, int path_flag); 1435 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1436 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1437 path_flag) 1438 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1439 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1440 path_flag) 1441 1442 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1443 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1444 uint16_t param_ptr, int path_flag); 1445 1446 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1447 static void sd_free_rqs(struct sd_lun *un); 1448 1449 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1450 uchar_t *data, int len, int fmt); 1451 static void sd_panic_for_res_conflict(struct sd_lun *un); 1452 1453 /* 1454 * Disk Ioctl Function Prototypes 1455 */ 1456 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1457 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1458 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1459 1460 /* 1461 * Multi-host Ioctl Prototypes 1462 */ 1463 static int sd_check_mhd(dev_t dev, int interval); 1464 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1465 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1466 static char *sd_sname(uchar_t status); 1467 static void sd_mhd_resvd_recover(void *arg); 1468 static void sd_resv_reclaim_thread(); 1469 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1470 static int sd_reserve_release(dev_t dev, int cmd); 1471 static void sd_rmv_resv_reclaim_req(dev_t dev); 1472 static void sd_mhd_reset_notify_cb(caddr_t arg); 1473 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1474 mhioc_inkeys_t *usrp, int flag); 1475 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1476 mhioc_inresvs_t *usrp, int flag); 1477 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1478 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1479 static int sd_mhdioc_release(dev_t dev); 1480 static int sd_mhdioc_register_devid(dev_t dev); 1481 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1482 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1483 1484 /* 1485 * SCSI removable prototypes 1486 */ 1487 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1488 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1489 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1490 static int sr_pause_resume(dev_t dev, int mode); 1491 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1492 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1494 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1496 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1501 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1502 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1503 static int sr_eject(dev_t dev); 1504 static void sr_ejected(register struct sd_lun *un); 1505 static int sr_check_wp(dev_t dev); 1506 static int sd_check_media(dev_t dev, enum dkio_state state); 1507 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1508 static void sd_delayed_cv_broadcast(void *arg); 1509 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1510 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1511 1512 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1513 1514 /* 1515 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1516 */ 1517 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1518 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1519 static void sd_wm_cache_destructor(void *wm, void *un); 1520 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1521 daddr_t endb, ushort_t typ); 1522 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1523 daddr_t endb); 1524 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1525 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1526 static void sd_read_modify_write_task(void * arg); 1527 static int 1528 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1529 struct buf **bpp); 1530 1531 1532 /* 1533 * Function prototypes for failfast support. 1534 */ 1535 static void sd_failfast_flushq(struct sd_lun *un); 1536 static int sd_failfast_flushq_callback(struct buf *bp); 1537 1538 /* 1539 * Function prototypes to check for lsi devices 1540 */ 1541 static void sd_is_lsi(struct sd_lun *un); 1542 1543 /* 1544 * Function prototypes for partial DMA support 1545 */ 1546 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1547 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1548 1549 1550 /* Function prototypes for cmlb */ 1551 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1552 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1553 1554 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1555 1556 /* 1557 * Constants for failfast support: 1558 * 1559 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1560 * failfast processing being performed. 1561 * 1562 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1563 * failfast processing on all bufs with B_FAILFAST set. 1564 */ 1565 1566 #define SD_FAILFAST_INACTIVE 0 1567 #define SD_FAILFAST_ACTIVE 1 1568 1569 /* 1570 * Bitmask to control behavior of buf(9S) flushes when a transition to 1571 * the failfast state occurs. Optional bits include: 1572 * 1573 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1574 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1575 * be flushed. 1576 * 1577 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1578 * driver, in addition to the regular wait queue. This includes the xbuf 1579 * queues. When clear, only the driver's wait queue will be flushed. 1580 */ 1581 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1582 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1583 1584 /* 1585 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1586 * to flush all queues within the driver. 1587 */ 1588 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1589 1590 1591 /* 1592 * SD Testing Fault Injection 1593 */ 1594 #ifdef SD_FAULT_INJECTION 1595 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1596 static void sd_faultinjection(struct scsi_pkt *pktp); 1597 static void sd_injection_log(char *buf, struct sd_lun *un); 1598 #endif 1599 1600 /* 1601 * Device driver ops vector 1602 */ 1603 static struct cb_ops sd_cb_ops = { 1604 sdopen, /* open */ 1605 sdclose, /* close */ 1606 sdstrategy, /* strategy */ 1607 nodev, /* print */ 1608 sddump, /* dump */ 1609 sdread, /* read */ 1610 sdwrite, /* write */ 1611 sdioctl, /* ioctl */ 1612 nodev, /* devmap */ 1613 nodev, /* mmap */ 1614 nodev, /* segmap */ 1615 nochpoll, /* poll */ 1616 sd_prop_op, /* cb_prop_op */ 1617 0, /* streamtab */ 1618 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1619 CB_REV, /* cb_rev */ 1620 sdaread, /* async I/O read entry point */ 1621 sdawrite /* async I/O write entry point */ 1622 }; 1623 1624 static struct dev_ops sd_ops = { 1625 DEVO_REV, /* devo_rev, */ 1626 0, /* refcnt */ 1627 sdinfo, /* info */ 1628 nulldev, /* identify */ 1629 sdprobe, /* probe */ 1630 sdattach, /* attach */ 1631 sddetach, /* detach */ 1632 nodev, /* reset */ 1633 &sd_cb_ops, /* driver operations */ 1634 NULL, /* bus operations */ 1635 sdpower /* power */ 1636 }; 1637 1638 1639 /* 1640 * This is the loadable module wrapper. 1641 */ 1642 #include <sys/modctl.h> 1643 1644 static struct modldrv modldrv = { 1645 &mod_driverops, /* Type of module. This one is a driver */ 1646 SD_MODULE_NAME, /* Module name. */ 1647 &sd_ops /* driver ops */ 1648 }; 1649 1650 1651 static struct modlinkage modlinkage = { 1652 MODREV_1, 1653 &modldrv, 1654 NULL 1655 }; 1656 1657 static cmlb_tg_ops_t sd_tgops = { 1658 TG_DK_OPS_VERSION_1, 1659 sd_tg_rdwr, 1660 sd_tg_getinfo 1661 }; 1662 1663 static struct scsi_asq_key_strings sd_additional_codes[] = { 1664 0x81, 0, "Logical Unit is Reserved", 1665 0x85, 0, "Audio Address Not Valid", 1666 0xb6, 0, "Media Load Mechanism Failed", 1667 0xB9, 0, "Audio Play Operation Aborted", 1668 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1669 0x53, 2, "Medium removal prevented", 1670 0x6f, 0, "Authentication failed during key exchange", 1671 0x6f, 1, "Key not present", 1672 0x6f, 2, "Key not established", 1673 0x6f, 3, "Read without proper authentication", 1674 0x6f, 4, "Mismatched region to this logical unit", 1675 0x6f, 5, "Region reset count error", 1676 0xffff, 0x0, NULL 1677 }; 1678 1679 1680 /* 1681 * Struct for passing printing information for sense data messages 1682 */ 1683 struct sd_sense_info { 1684 int ssi_severity; 1685 int ssi_pfa_flag; 1686 }; 1687 1688 /* 1689 * Table of function pointers for iostart-side routines. Separate "chains" 1690 * of layered function calls are formed by placing the function pointers 1691 * sequentially in the desired order. Functions are called according to an 1692 * incrementing table index ordering. The last function in each chain must 1693 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1694 * in the sd_iodone_chain[] array. 1695 * 1696 * Note: It may seem more natural to organize both the iostart and iodone 1697 * functions together, into an array of structures (or some similar 1698 * organization) with a common index, rather than two separate arrays which 1699 * must be maintained in synchronization. The purpose of this division is 1700 * to achieve improved performance: individual arrays allows for more 1701 * effective cache line utilization on certain platforms. 1702 */ 1703 1704 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1705 1706 1707 static sd_chain_t sd_iostart_chain[] = { 1708 1709 /* Chain for buf IO for disk drive targets (PM enabled) */ 1710 sd_mapblockaddr_iostart, /* Index: 0 */ 1711 sd_pm_iostart, /* Index: 1 */ 1712 sd_core_iostart, /* Index: 2 */ 1713 1714 /* Chain for buf IO for disk drive targets (PM disabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 3 */ 1716 sd_core_iostart, /* Index: 4 */ 1717 1718 /* Chain for buf IO for removable-media targets (PM enabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 5 */ 1720 sd_mapblocksize_iostart, /* Index: 6 */ 1721 sd_pm_iostart, /* Index: 7 */ 1722 sd_core_iostart, /* Index: 8 */ 1723 1724 /* Chain for buf IO for removable-media targets (PM disabled) */ 1725 sd_mapblockaddr_iostart, /* Index: 9 */ 1726 sd_mapblocksize_iostart, /* Index: 10 */ 1727 sd_core_iostart, /* Index: 11 */ 1728 1729 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1730 sd_mapblockaddr_iostart, /* Index: 12 */ 1731 sd_checksum_iostart, /* Index: 13 */ 1732 sd_pm_iostart, /* Index: 14 */ 1733 sd_core_iostart, /* Index: 15 */ 1734 1735 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1736 sd_mapblockaddr_iostart, /* Index: 16 */ 1737 sd_checksum_iostart, /* Index: 17 */ 1738 sd_core_iostart, /* Index: 18 */ 1739 1740 /* Chain for USCSI commands (all targets) */ 1741 sd_pm_iostart, /* Index: 19 */ 1742 sd_core_iostart, /* Index: 20 */ 1743 1744 /* Chain for checksumming USCSI commands (all targets) */ 1745 sd_checksum_uscsi_iostart, /* Index: 21 */ 1746 sd_pm_iostart, /* Index: 22 */ 1747 sd_core_iostart, /* Index: 23 */ 1748 1749 /* Chain for "direct" USCSI commands (all targets) */ 1750 sd_core_iostart, /* Index: 24 */ 1751 1752 /* Chain for "direct priority" USCSI commands (all targets) */ 1753 sd_core_iostart, /* Index: 25 */ 1754 }; 1755 1756 /* 1757 * Macros to locate the first function of each iostart chain in the 1758 * sd_iostart_chain[] array. These are located by the index in the array. 1759 */ 1760 #define SD_CHAIN_DISK_IOSTART 0 1761 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1762 #define SD_CHAIN_RMMEDIA_IOSTART 5 1763 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1764 #define SD_CHAIN_CHKSUM_IOSTART 12 1765 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1766 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1767 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1768 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1769 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1770 1771 1772 /* 1773 * Table of function pointers for the iodone-side routines for the driver- 1774 * internal layering mechanism. The calling sequence for iodone routines 1775 * uses a decrementing table index, so the last routine called in a chain 1776 * must be at the lowest array index location for that chain. The last 1777 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1778 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1779 * of the functions in an iodone side chain must correspond to the ordering 1780 * of the iostart routines for that chain. Note that there is no iodone 1781 * side routine that corresponds to sd_core_iostart(), so there is no 1782 * entry in the table for this. 1783 */ 1784 1785 static sd_chain_t sd_iodone_chain[] = { 1786 1787 /* Chain for buf IO for disk drive targets (PM enabled) */ 1788 sd_buf_iodone, /* Index: 0 */ 1789 sd_mapblockaddr_iodone, /* Index: 1 */ 1790 sd_pm_iodone, /* Index: 2 */ 1791 1792 /* Chain for buf IO for disk drive targets (PM disabled) */ 1793 sd_buf_iodone, /* Index: 3 */ 1794 sd_mapblockaddr_iodone, /* Index: 4 */ 1795 1796 /* Chain for buf IO for removable-media targets (PM enabled) */ 1797 sd_buf_iodone, /* Index: 5 */ 1798 sd_mapblockaddr_iodone, /* Index: 6 */ 1799 sd_mapblocksize_iodone, /* Index: 7 */ 1800 sd_pm_iodone, /* Index: 8 */ 1801 1802 /* Chain for buf IO for removable-media targets (PM disabled) */ 1803 sd_buf_iodone, /* Index: 9 */ 1804 sd_mapblockaddr_iodone, /* Index: 10 */ 1805 sd_mapblocksize_iodone, /* Index: 11 */ 1806 1807 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1808 sd_buf_iodone, /* Index: 12 */ 1809 sd_mapblockaddr_iodone, /* Index: 13 */ 1810 sd_checksum_iodone, /* Index: 14 */ 1811 sd_pm_iodone, /* Index: 15 */ 1812 1813 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1814 sd_buf_iodone, /* Index: 16 */ 1815 sd_mapblockaddr_iodone, /* Index: 17 */ 1816 sd_checksum_iodone, /* Index: 18 */ 1817 1818 /* Chain for USCSI commands (non-checksum targets) */ 1819 sd_uscsi_iodone, /* Index: 19 */ 1820 sd_pm_iodone, /* Index: 20 */ 1821 1822 /* Chain for USCSI commands (checksum targets) */ 1823 sd_uscsi_iodone, /* Index: 21 */ 1824 sd_checksum_uscsi_iodone, /* Index: 22 */ 1825 sd_pm_iodone, /* Index: 22 */ 1826 1827 /* Chain for "direct" USCSI commands (all targets) */ 1828 sd_uscsi_iodone, /* Index: 24 */ 1829 1830 /* Chain for "direct priority" USCSI commands (all targets) */ 1831 sd_uscsi_iodone, /* Index: 25 */ 1832 }; 1833 1834 1835 /* 1836 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1837 * each iodone-side chain. These are located by the array index, but as the 1838 * iodone side functions are called in a decrementing-index order, the 1839 * highest index number in each chain must be specified (as these correspond 1840 * to the first function in the iodone chain that will be called by the core 1841 * at IO completion time). 1842 */ 1843 1844 #define SD_CHAIN_DISK_IODONE 2 1845 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1846 #define SD_CHAIN_RMMEDIA_IODONE 8 1847 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1848 #define SD_CHAIN_CHKSUM_IODONE 15 1849 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1850 #define SD_CHAIN_USCSI_CMD_IODONE 20 1851 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1852 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1853 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1854 1855 1856 1857 1858 /* 1859 * Array to map a layering chain index to the appropriate initpkt routine. 1860 * The redundant entries are present so that the index used for accessing 1861 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1862 * with this table as well. 1863 */ 1864 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1865 1866 static sd_initpkt_t sd_initpkt_map[] = { 1867 1868 /* Chain for buf IO for disk drive targets (PM enabled) */ 1869 sd_initpkt_for_buf, /* Index: 0 */ 1870 sd_initpkt_for_buf, /* Index: 1 */ 1871 sd_initpkt_for_buf, /* Index: 2 */ 1872 1873 /* Chain for buf IO for disk drive targets (PM disabled) */ 1874 sd_initpkt_for_buf, /* Index: 3 */ 1875 sd_initpkt_for_buf, /* Index: 4 */ 1876 1877 /* Chain for buf IO for removable-media targets (PM enabled) */ 1878 sd_initpkt_for_buf, /* Index: 5 */ 1879 sd_initpkt_for_buf, /* Index: 6 */ 1880 sd_initpkt_for_buf, /* Index: 7 */ 1881 sd_initpkt_for_buf, /* Index: 8 */ 1882 1883 /* Chain for buf IO for removable-media targets (PM disabled) */ 1884 sd_initpkt_for_buf, /* Index: 9 */ 1885 sd_initpkt_for_buf, /* Index: 10 */ 1886 sd_initpkt_for_buf, /* Index: 11 */ 1887 1888 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1889 sd_initpkt_for_buf, /* Index: 12 */ 1890 sd_initpkt_for_buf, /* Index: 13 */ 1891 sd_initpkt_for_buf, /* Index: 14 */ 1892 sd_initpkt_for_buf, /* Index: 15 */ 1893 1894 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1895 sd_initpkt_for_buf, /* Index: 16 */ 1896 sd_initpkt_for_buf, /* Index: 17 */ 1897 sd_initpkt_for_buf, /* Index: 18 */ 1898 1899 /* Chain for USCSI commands (non-checksum targets) */ 1900 sd_initpkt_for_uscsi, /* Index: 19 */ 1901 sd_initpkt_for_uscsi, /* Index: 20 */ 1902 1903 /* Chain for USCSI commands (checksum targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 21 */ 1905 sd_initpkt_for_uscsi, /* Index: 22 */ 1906 sd_initpkt_for_uscsi, /* Index: 22 */ 1907 1908 /* Chain for "direct" USCSI commands (all targets) */ 1909 sd_initpkt_for_uscsi, /* Index: 24 */ 1910 1911 /* Chain for "direct priority" USCSI commands (all targets) */ 1912 sd_initpkt_for_uscsi, /* Index: 25 */ 1913 1914 }; 1915 1916 1917 /* 1918 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1919 * The redundant entries are present so that the index used for accessing 1920 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1921 * with this table as well. 1922 */ 1923 typedef void (*sd_destroypkt_t)(struct buf *); 1924 1925 static sd_destroypkt_t sd_destroypkt_map[] = { 1926 1927 /* Chain for buf IO for disk drive targets (PM enabled) */ 1928 sd_destroypkt_for_buf, /* Index: 0 */ 1929 sd_destroypkt_for_buf, /* Index: 1 */ 1930 sd_destroypkt_for_buf, /* Index: 2 */ 1931 1932 /* Chain for buf IO for disk drive targets (PM disabled) */ 1933 sd_destroypkt_for_buf, /* Index: 3 */ 1934 sd_destroypkt_for_buf, /* Index: 4 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM enabled) */ 1937 sd_destroypkt_for_buf, /* Index: 5 */ 1938 sd_destroypkt_for_buf, /* Index: 6 */ 1939 sd_destroypkt_for_buf, /* Index: 7 */ 1940 sd_destroypkt_for_buf, /* Index: 8 */ 1941 1942 /* Chain for buf IO for removable-media targets (PM disabled) */ 1943 sd_destroypkt_for_buf, /* Index: 9 */ 1944 sd_destroypkt_for_buf, /* Index: 10 */ 1945 sd_destroypkt_for_buf, /* Index: 11 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1948 sd_destroypkt_for_buf, /* Index: 12 */ 1949 sd_destroypkt_for_buf, /* Index: 13 */ 1950 sd_destroypkt_for_buf, /* Index: 14 */ 1951 sd_destroypkt_for_buf, /* Index: 15 */ 1952 1953 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1954 sd_destroypkt_for_buf, /* Index: 16 */ 1955 sd_destroypkt_for_buf, /* Index: 17 */ 1956 sd_destroypkt_for_buf, /* Index: 18 */ 1957 1958 /* Chain for USCSI commands (non-checksum targets) */ 1959 sd_destroypkt_for_uscsi, /* Index: 19 */ 1960 sd_destroypkt_for_uscsi, /* Index: 20 */ 1961 1962 /* Chain for USCSI commands (checksum targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 21 */ 1964 sd_destroypkt_for_uscsi, /* Index: 22 */ 1965 sd_destroypkt_for_uscsi, /* Index: 22 */ 1966 1967 /* Chain for "direct" USCSI commands (all targets) */ 1968 sd_destroypkt_for_uscsi, /* Index: 24 */ 1969 1970 /* Chain for "direct priority" USCSI commands (all targets) */ 1971 sd_destroypkt_for_uscsi, /* Index: 25 */ 1972 1973 }; 1974 1975 1976 1977 /* 1978 * Array to map a layering chain index to the appropriate chain "type". 1979 * The chain type indicates a specific property/usage of the chain. 1980 * The redundant entries are present so that the index used for accessing 1981 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1982 * with this table as well. 1983 */ 1984 1985 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1986 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1987 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1988 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1989 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1990 /* (for error recovery) */ 1991 1992 static int sd_chain_type_map[] = { 1993 1994 /* Chain for buf IO for disk drive targets (PM enabled) */ 1995 SD_CHAIN_BUFIO, /* Index: 0 */ 1996 SD_CHAIN_BUFIO, /* Index: 1 */ 1997 SD_CHAIN_BUFIO, /* Index: 2 */ 1998 1999 /* Chain for buf IO for disk drive targets (PM disabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 3 */ 2001 SD_CHAIN_BUFIO, /* Index: 4 */ 2002 2003 /* Chain for buf IO for removable-media targets (PM enabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 5 */ 2005 SD_CHAIN_BUFIO, /* Index: 6 */ 2006 SD_CHAIN_BUFIO, /* Index: 7 */ 2007 SD_CHAIN_BUFIO, /* Index: 8 */ 2008 2009 /* Chain for buf IO for removable-media targets (PM disabled) */ 2010 SD_CHAIN_BUFIO, /* Index: 9 */ 2011 SD_CHAIN_BUFIO, /* Index: 10 */ 2012 SD_CHAIN_BUFIO, /* Index: 11 */ 2013 2014 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2015 SD_CHAIN_BUFIO, /* Index: 12 */ 2016 SD_CHAIN_BUFIO, /* Index: 13 */ 2017 SD_CHAIN_BUFIO, /* Index: 14 */ 2018 SD_CHAIN_BUFIO, /* Index: 15 */ 2019 2020 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2021 SD_CHAIN_BUFIO, /* Index: 16 */ 2022 SD_CHAIN_BUFIO, /* Index: 17 */ 2023 SD_CHAIN_BUFIO, /* Index: 18 */ 2024 2025 /* Chain for USCSI commands (non-checksum targets) */ 2026 SD_CHAIN_USCSI, /* Index: 19 */ 2027 SD_CHAIN_USCSI, /* Index: 20 */ 2028 2029 /* Chain for USCSI commands (checksum targets) */ 2030 SD_CHAIN_USCSI, /* Index: 21 */ 2031 SD_CHAIN_USCSI, /* Index: 22 */ 2032 SD_CHAIN_USCSI, /* Index: 22 */ 2033 2034 /* Chain for "direct" USCSI commands (all targets) */ 2035 SD_CHAIN_DIRECT, /* Index: 24 */ 2036 2037 /* Chain for "direct priority" USCSI commands (all targets) */ 2038 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2039 }; 2040 2041 2042 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2043 #define SD_IS_BUFIO(xp) \ 2044 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2045 2046 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2047 #define SD_IS_DIRECT_PRIORITY(xp) \ 2048 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2049 2050 2051 2052 /* 2053 * Struct, array, and macros to map a specific chain to the appropriate 2054 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2055 * 2056 * The sd_chain_index_map[] array is used at attach time to set the various 2057 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2058 * chain to be used with the instance. This allows different instances to use 2059 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2060 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2061 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2062 * dynamically & without the use of locking; and (2) a layer may update the 2063 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2064 * to allow for deferred processing of an IO within the same chain from a 2065 * different execution context. 2066 */ 2067 2068 struct sd_chain_index { 2069 int sci_iostart_index; 2070 int sci_iodone_index; 2071 }; 2072 2073 static struct sd_chain_index sd_chain_index_map[] = { 2074 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2075 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2076 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2077 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2078 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2079 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2080 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2081 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2082 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2083 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2084 }; 2085 2086 2087 /* 2088 * The following are indexes into the sd_chain_index_map[] array. 2089 */ 2090 2091 /* un->un_buf_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_DISK 0 2093 #define SD_CHAIN_INFO_DISK_NO_PM 1 2094 #define SD_CHAIN_INFO_RMMEDIA 2 2095 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2096 #define SD_CHAIN_INFO_CHKSUM 4 2097 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2098 2099 /* un->un_uscsi_chain_type must be set to one of these */ 2100 #define SD_CHAIN_INFO_USCSI_CMD 6 2101 /* USCSI with PM disabled is the same as DIRECT */ 2102 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2103 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2104 2105 /* un->un_direct_chain_type must be set to one of these */ 2106 #define SD_CHAIN_INFO_DIRECT_CMD 8 2107 2108 /* un->un_priority_chain_type must be set to one of these */ 2109 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2110 2111 /* size for devid inquiries */ 2112 #define MAX_INQUIRY_SIZE 0xF0 2113 2114 /* 2115 * Macros used by functions to pass a given buf(9S) struct along to the 2116 * next function in the layering chain for further processing. 2117 * 2118 * In the following macros, passing more than three arguments to the called 2119 * routines causes the optimizer for the SPARC compiler to stop doing tail 2120 * call elimination which results in significant performance degradation. 2121 */ 2122 #define SD_BEGIN_IOSTART(index, un, bp) \ 2123 ((*(sd_iostart_chain[index]))(index, un, bp)) 2124 2125 #define SD_BEGIN_IODONE(index, un, bp) \ 2126 ((*(sd_iodone_chain[index]))(index, un, bp)) 2127 2128 #define SD_NEXT_IOSTART(index, un, bp) \ 2129 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2130 2131 #define SD_NEXT_IODONE(index, un, bp) \ 2132 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2133 2134 /* 2135 * Function: _init 2136 * 2137 * Description: This is the driver _init(9E) entry point. 2138 * 2139 * Return Code: Returns the value from mod_install(9F) or 2140 * ddi_soft_state_init(9F) as appropriate. 2141 * 2142 * Context: Called when driver module loaded. 2143 */ 2144 2145 int 2146 _init(void) 2147 { 2148 int err; 2149 2150 /* establish driver name from module name */ 2151 sd_label = (char *)mod_modname(&modlinkage); 2152 2153 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2154 SD_MAXUNIT); 2155 2156 if (err != 0) { 2157 return (err); 2158 } 2159 2160 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2161 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2162 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2163 2164 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2165 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2166 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2167 2168 /* 2169 * it's ok to init here even for fibre device 2170 */ 2171 sd_scsi_probe_cache_init(); 2172 2173 sd_scsi_target_lun_init(); 2174 2175 /* 2176 * Creating taskq before mod_install ensures that all callers (threads) 2177 * that enter the module after a successfull mod_install encounter 2178 * a valid taskq. 2179 */ 2180 sd_taskq_create(); 2181 2182 err = mod_install(&modlinkage); 2183 if (err != 0) { 2184 /* delete taskq if install fails */ 2185 sd_taskq_delete(); 2186 2187 mutex_destroy(&sd_detach_mutex); 2188 mutex_destroy(&sd_log_mutex); 2189 mutex_destroy(&sd_label_mutex); 2190 2191 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2192 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2193 cv_destroy(&sd_tr.srq_inprocess_cv); 2194 2195 sd_scsi_probe_cache_fini(); 2196 2197 sd_scsi_target_lun_fini(); 2198 2199 ddi_soft_state_fini(&sd_state); 2200 return (err); 2201 } 2202 2203 return (err); 2204 } 2205 2206 2207 /* 2208 * Function: _fini 2209 * 2210 * Description: This is the driver _fini(9E) entry point. 2211 * 2212 * Return Code: Returns the value from mod_remove(9F) 2213 * 2214 * Context: Called when driver module is unloaded. 2215 */ 2216 2217 int 2218 _fini(void) 2219 { 2220 int err; 2221 2222 if ((err = mod_remove(&modlinkage)) != 0) { 2223 return (err); 2224 } 2225 2226 sd_taskq_delete(); 2227 2228 mutex_destroy(&sd_detach_mutex); 2229 mutex_destroy(&sd_log_mutex); 2230 mutex_destroy(&sd_label_mutex); 2231 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2232 2233 sd_scsi_probe_cache_fini(); 2234 2235 sd_scsi_target_lun_fini(); 2236 2237 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2238 cv_destroy(&sd_tr.srq_inprocess_cv); 2239 2240 ddi_soft_state_fini(&sd_state); 2241 2242 return (err); 2243 } 2244 2245 2246 /* 2247 * Function: _info 2248 * 2249 * Description: This is the driver _info(9E) entry point. 2250 * 2251 * Arguments: modinfop - pointer to the driver modinfo structure 2252 * 2253 * Return Code: Returns the value from mod_info(9F). 2254 * 2255 * Context: Kernel thread context 2256 */ 2257 2258 int 2259 _info(struct modinfo *modinfop) 2260 { 2261 return (mod_info(&modlinkage, modinfop)); 2262 } 2263 2264 2265 /* 2266 * The following routines implement the driver message logging facility. 2267 * They provide component- and level- based debug output filtering. 2268 * Output may also be restricted to messages for a single instance by 2269 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2270 * to NULL, then messages for all instances are printed. 2271 * 2272 * These routines have been cloned from each other due to the language 2273 * constraints of macros and variable argument list processing. 2274 */ 2275 2276 2277 /* 2278 * Function: sd_log_err 2279 * 2280 * Description: This routine is called by the SD_ERROR macro for debug 2281 * logging of error conditions. 2282 * 2283 * Arguments: comp - driver component being logged 2284 * dev - pointer to driver info structure 2285 * fmt - error string and format to be logged 2286 */ 2287 2288 static void 2289 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2290 { 2291 va_list ap; 2292 dev_info_t *dev; 2293 2294 ASSERT(un != NULL); 2295 dev = SD_DEVINFO(un); 2296 ASSERT(dev != NULL); 2297 2298 /* 2299 * Filter messages based on the global component and level masks. 2300 * Also print if un matches the value of sd_debug_un, or if 2301 * sd_debug_un is set to NULL. 2302 */ 2303 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2304 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2305 mutex_enter(&sd_log_mutex); 2306 va_start(ap, fmt); 2307 (void) vsprintf(sd_log_buf, fmt, ap); 2308 va_end(ap); 2309 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2310 mutex_exit(&sd_log_mutex); 2311 } 2312 #ifdef SD_FAULT_INJECTION 2313 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2314 if (un->sd_injection_mask & comp) { 2315 mutex_enter(&sd_log_mutex); 2316 va_start(ap, fmt); 2317 (void) vsprintf(sd_log_buf, fmt, ap); 2318 va_end(ap); 2319 sd_injection_log(sd_log_buf, un); 2320 mutex_exit(&sd_log_mutex); 2321 } 2322 #endif 2323 } 2324 2325 2326 /* 2327 * Function: sd_log_info 2328 * 2329 * Description: This routine is called by the SD_INFO macro for debug 2330 * logging of general purpose informational conditions. 2331 * 2332 * Arguments: comp - driver component being logged 2333 * dev - pointer to driver info structure 2334 * fmt - info string and format to be logged 2335 */ 2336 2337 static void 2338 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2339 { 2340 va_list ap; 2341 dev_info_t *dev; 2342 2343 ASSERT(un != NULL); 2344 dev = SD_DEVINFO(un); 2345 ASSERT(dev != NULL); 2346 2347 /* 2348 * Filter messages based on the global component and level masks. 2349 * Also print if un matches the value of sd_debug_un, or if 2350 * sd_debug_un is set to NULL. 2351 */ 2352 if ((sd_component_mask & component) && 2353 (sd_level_mask & SD_LOGMASK_INFO) && 2354 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2355 mutex_enter(&sd_log_mutex); 2356 va_start(ap, fmt); 2357 (void) vsprintf(sd_log_buf, fmt, ap); 2358 va_end(ap); 2359 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2360 mutex_exit(&sd_log_mutex); 2361 } 2362 #ifdef SD_FAULT_INJECTION 2363 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2364 if (un->sd_injection_mask & component) { 2365 mutex_enter(&sd_log_mutex); 2366 va_start(ap, fmt); 2367 (void) vsprintf(sd_log_buf, fmt, ap); 2368 va_end(ap); 2369 sd_injection_log(sd_log_buf, un); 2370 mutex_exit(&sd_log_mutex); 2371 } 2372 #endif 2373 } 2374 2375 2376 /* 2377 * Function: sd_log_trace 2378 * 2379 * Description: This routine is called by the SD_TRACE macro for debug 2380 * logging of trace conditions (i.e. function entry/exit). 2381 * 2382 * Arguments: comp - driver component being logged 2383 * dev - pointer to driver info structure 2384 * fmt - trace string and format to be logged 2385 */ 2386 2387 static void 2388 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2389 { 2390 va_list ap; 2391 dev_info_t *dev; 2392 2393 ASSERT(un != NULL); 2394 dev = SD_DEVINFO(un); 2395 ASSERT(dev != NULL); 2396 2397 /* 2398 * Filter messages based on the global component and level masks. 2399 * Also print if un matches the value of sd_debug_un, or if 2400 * sd_debug_un is set to NULL. 2401 */ 2402 if ((sd_component_mask & component) && 2403 (sd_level_mask & SD_LOGMASK_TRACE) && 2404 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2405 mutex_enter(&sd_log_mutex); 2406 va_start(ap, fmt); 2407 (void) vsprintf(sd_log_buf, fmt, ap); 2408 va_end(ap); 2409 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2410 mutex_exit(&sd_log_mutex); 2411 } 2412 #ifdef SD_FAULT_INJECTION 2413 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2414 if (un->sd_injection_mask & component) { 2415 mutex_enter(&sd_log_mutex); 2416 va_start(ap, fmt); 2417 (void) vsprintf(sd_log_buf, fmt, ap); 2418 va_end(ap); 2419 sd_injection_log(sd_log_buf, un); 2420 mutex_exit(&sd_log_mutex); 2421 } 2422 #endif 2423 } 2424 2425 2426 /* 2427 * Function: sdprobe 2428 * 2429 * Description: This is the driver probe(9e) entry point function. 2430 * 2431 * Arguments: devi - opaque device info handle 2432 * 2433 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2434 * DDI_PROBE_FAILURE: If the probe failed. 2435 * DDI_PROBE_PARTIAL: If the instance is not present now, 2436 * but may be present in the future. 2437 */ 2438 2439 static int 2440 sdprobe(dev_info_t *devi) 2441 { 2442 struct scsi_device *devp; 2443 int rval; 2444 int instance; 2445 2446 /* 2447 * if it wasn't for pln, sdprobe could actually be nulldev 2448 * in the "__fibre" case. 2449 */ 2450 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2451 return (DDI_PROBE_DONTCARE); 2452 } 2453 2454 devp = ddi_get_driver_private(devi); 2455 2456 if (devp == NULL) { 2457 /* Ooops... nexus driver is mis-configured... */ 2458 return (DDI_PROBE_FAILURE); 2459 } 2460 2461 instance = ddi_get_instance(devi); 2462 2463 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2464 return (DDI_PROBE_PARTIAL); 2465 } 2466 2467 /* 2468 * Call the SCSA utility probe routine to see if we actually 2469 * have a target at this SCSI nexus. 2470 */ 2471 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2472 case SCSIPROBE_EXISTS: 2473 switch (devp->sd_inq->inq_dtype) { 2474 case DTYPE_DIRECT: 2475 rval = DDI_PROBE_SUCCESS; 2476 break; 2477 case DTYPE_RODIRECT: 2478 /* CDs etc. Can be removable media */ 2479 rval = DDI_PROBE_SUCCESS; 2480 break; 2481 case DTYPE_OPTICAL: 2482 /* 2483 * Rewritable optical driver HP115AA 2484 * Can also be removable media 2485 */ 2486 2487 /* 2488 * Do not attempt to bind to DTYPE_OPTICAL if 2489 * pre solaris 9 sparc sd behavior is required 2490 * 2491 * If first time through and sd_dtype_optical_bind 2492 * has not been set in /etc/system check properties 2493 */ 2494 2495 if (sd_dtype_optical_bind < 0) { 2496 sd_dtype_optical_bind = ddi_prop_get_int 2497 (DDI_DEV_T_ANY, devi, 0, 2498 "optical-device-bind", 1); 2499 } 2500 2501 if (sd_dtype_optical_bind == 0) { 2502 rval = DDI_PROBE_FAILURE; 2503 } else { 2504 rval = DDI_PROBE_SUCCESS; 2505 } 2506 break; 2507 2508 case DTYPE_NOTPRESENT: 2509 default: 2510 rval = DDI_PROBE_FAILURE; 2511 break; 2512 } 2513 break; 2514 default: 2515 rval = DDI_PROBE_PARTIAL; 2516 break; 2517 } 2518 2519 /* 2520 * This routine checks for resource allocation prior to freeing, 2521 * so it will take care of the "smart probing" case where a 2522 * scsi_probe() may or may not have been issued and will *not* 2523 * free previously-freed resources. 2524 */ 2525 scsi_unprobe(devp); 2526 return (rval); 2527 } 2528 2529 2530 /* 2531 * Function: sdinfo 2532 * 2533 * Description: This is the driver getinfo(9e) entry point function. 2534 * Given the device number, return the devinfo pointer from 2535 * the scsi_device structure or the instance number 2536 * associated with the dev_t. 2537 * 2538 * Arguments: dip - pointer to device info structure 2539 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2540 * DDI_INFO_DEVT2INSTANCE) 2541 * arg - driver dev_t 2542 * resultp - user buffer for request response 2543 * 2544 * Return Code: DDI_SUCCESS 2545 * DDI_FAILURE 2546 */ 2547 /* ARGSUSED */ 2548 static int 2549 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2550 { 2551 struct sd_lun *un; 2552 dev_t dev; 2553 int instance; 2554 int error; 2555 2556 switch (infocmd) { 2557 case DDI_INFO_DEVT2DEVINFO: 2558 dev = (dev_t)arg; 2559 instance = SDUNIT(dev); 2560 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2561 return (DDI_FAILURE); 2562 } 2563 *result = (void *) SD_DEVINFO(un); 2564 error = DDI_SUCCESS; 2565 break; 2566 case DDI_INFO_DEVT2INSTANCE: 2567 dev = (dev_t)arg; 2568 instance = SDUNIT(dev); 2569 *result = (void *)(uintptr_t)instance; 2570 error = DDI_SUCCESS; 2571 break; 2572 default: 2573 error = DDI_FAILURE; 2574 } 2575 return (error); 2576 } 2577 2578 /* 2579 * Function: sd_prop_op 2580 * 2581 * Description: This is the driver prop_op(9e) entry point function. 2582 * Return the number of blocks for the partition in question 2583 * or forward the request to the property facilities. 2584 * 2585 * Arguments: dev - device number 2586 * dip - pointer to device info structure 2587 * prop_op - property operator 2588 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2589 * name - pointer to property name 2590 * valuep - pointer or address of the user buffer 2591 * lengthp - property length 2592 * 2593 * Return Code: DDI_PROP_SUCCESS 2594 * DDI_PROP_NOT_FOUND 2595 * DDI_PROP_UNDEFINED 2596 * DDI_PROP_NO_MEMORY 2597 * DDI_PROP_BUF_TOO_SMALL 2598 */ 2599 2600 static int 2601 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2602 char *name, caddr_t valuep, int *lengthp) 2603 { 2604 int instance = ddi_get_instance(dip); 2605 struct sd_lun *un; 2606 uint64_t nblocks64; 2607 uint_t dblk; 2608 2609 /* 2610 * Our dynamic properties are all device specific and size oriented. 2611 * Requests issued under conditions where size is valid are passed 2612 * to ddi_prop_op_nblocks with the size information, otherwise the 2613 * request is passed to ddi_prop_op. Size depends on valid geometry. 2614 */ 2615 un = ddi_get_soft_state(sd_state, instance); 2616 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2617 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2618 name, valuep, lengthp)); 2619 } else if (!SD_IS_VALID_LABEL(un)) { 2620 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2621 valuep, lengthp)); 2622 } 2623 2624 /* get nblocks value */ 2625 ASSERT(!mutex_owned(SD_MUTEX(un))); 2626 2627 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2628 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2629 2630 /* report size in target size blocks */ 2631 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2632 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2633 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2634 } 2635 2636 /* 2637 * The following functions are for smart probing: 2638 * sd_scsi_probe_cache_init() 2639 * sd_scsi_probe_cache_fini() 2640 * sd_scsi_clear_probe_cache() 2641 * sd_scsi_probe_with_cache() 2642 */ 2643 2644 /* 2645 * Function: sd_scsi_probe_cache_init 2646 * 2647 * Description: Initializes the probe response cache mutex and head pointer. 2648 * 2649 * Context: Kernel thread context 2650 */ 2651 2652 static void 2653 sd_scsi_probe_cache_init(void) 2654 { 2655 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2656 sd_scsi_probe_cache_head = NULL; 2657 } 2658 2659 2660 /* 2661 * Function: sd_scsi_probe_cache_fini 2662 * 2663 * Description: Frees all resources associated with the probe response cache. 2664 * 2665 * Context: Kernel thread context 2666 */ 2667 2668 static void 2669 sd_scsi_probe_cache_fini(void) 2670 { 2671 struct sd_scsi_probe_cache *cp; 2672 struct sd_scsi_probe_cache *ncp; 2673 2674 /* Clean up our smart probing linked list */ 2675 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2676 ncp = cp->next; 2677 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2678 } 2679 sd_scsi_probe_cache_head = NULL; 2680 mutex_destroy(&sd_scsi_probe_cache_mutex); 2681 } 2682 2683 2684 /* 2685 * Function: sd_scsi_clear_probe_cache 2686 * 2687 * Description: This routine clears the probe response cache. This is 2688 * done when open() returns ENXIO so that when deferred 2689 * attach is attempted (possibly after a device has been 2690 * turned on) we will retry the probe. Since we don't know 2691 * which target we failed to open, we just clear the 2692 * entire cache. 2693 * 2694 * Context: Kernel thread context 2695 */ 2696 2697 static void 2698 sd_scsi_clear_probe_cache(void) 2699 { 2700 struct sd_scsi_probe_cache *cp; 2701 int i; 2702 2703 mutex_enter(&sd_scsi_probe_cache_mutex); 2704 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2705 /* 2706 * Reset all entries to SCSIPROBE_EXISTS. This will 2707 * force probing to be performed the next time 2708 * sd_scsi_probe_with_cache is called. 2709 */ 2710 for (i = 0; i < NTARGETS_WIDE; i++) { 2711 cp->cache[i] = SCSIPROBE_EXISTS; 2712 } 2713 } 2714 mutex_exit(&sd_scsi_probe_cache_mutex); 2715 } 2716 2717 2718 /* 2719 * Function: sd_scsi_probe_with_cache 2720 * 2721 * Description: This routine implements support for a scsi device probe 2722 * with cache. The driver maintains a cache of the target 2723 * responses to scsi probes. If we get no response from a 2724 * target during a probe inquiry, we remember that, and we 2725 * avoid additional calls to scsi_probe on non-zero LUNs 2726 * on the same target until the cache is cleared. By doing 2727 * so we avoid the 1/4 sec selection timeout for nonzero 2728 * LUNs. lun0 of a target is always probed. 2729 * 2730 * Arguments: devp - Pointer to a scsi_device(9S) structure 2731 * waitfunc - indicates what the allocator routines should 2732 * do when resources are not available. This value 2733 * is passed on to scsi_probe() when that routine 2734 * is called. 2735 * 2736 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2737 * otherwise the value returned by scsi_probe(9F). 2738 * 2739 * Context: Kernel thread context 2740 */ 2741 2742 static int 2743 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2744 { 2745 struct sd_scsi_probe_cache *cp; 2746 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2747 int lun, tgt; 2748 2749 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2750 SCSI_ADDR_PROP_LUN, 0); 2751 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2752 SCSI_ADDR_PROP_TARGET, -1); 2753 2754 /* Make sure caching enabled and target in range */ 2755 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2756 /* do it the old way (no cache) */ 2757 return (scsi_probe(devp, waitfn)); 2758 } 2759 2760 mutex_enter(&sd_scsi_probe_cache_mutex); 2761 2762 /* Find the cache for this scsi bus instance */ 2763 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2764 if (cp->pdip == pdip) { 2765 break; 2766 } 2767 } 2768 2769 /* If we can't find a cache for this pdip, create one */ 2770 if (cp == NULL) { 2771 int i; 2772 2773 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2774 KM_SLEEP); 2775 cp->pdip = pdip; 2776 cp->next = sd_scsi_probe_cache_head; 2777 sd_scsi_probe_cache_head = cp; 2778 for (i = 0; i < NTARGETS_WIDE; i++) { 2779 cp->cache[i] = SCSIPROBE_EXISTS; 2780 } 2781 } 2782 2783 mutex_exit(&sd_scsi_probe_cache_mutex); 2784 2785 /* Recompute the cache for this target if LUN zero */ 2786 if (lun == 0) { 2787 cp->cache[tgt] = SCSIPROBE_EXISTS; 2788 } 2789 2790 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2791 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2792 return (SCSIPROBE_NORESP); 2793 } 2794 2795 /* Do the actual probe; save & return the result */ 2796 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2797 } 2798 2799 2800 /* 2801 * Function: sd_scsi_target_lun_init 2802 * 2803 * Description: Initializes the attached lun chain mutex and head pointer. 2804 * 2805 * Context: Kernel thread context 2806 */ 2807 2808 static void 2809 sd_scsi_target_lun_init(void) 2810 { 2811 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2812 sd_scsi_target_lun_head = NULL; 2813 } 2814 2815 2816 /* 2817 * Function: sd_scsi_target_lun_fini 2818 * 2819 * Description: Frees all resources associated with the attached lun 2820 * chain 2821 * 2822 * Context: Kernel thread context 2823 */ 2824 2825 static void 2826 sd_scsi_target_lun_fini(void) 2827 { 2828 struct sd_scsi_hba_tgt_lun *cp; 2829 struct sd_scsi_hba_tgt_lun *ncp; 2830 2831 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2832 ncp = cp->next; 2833 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2834 } 2835 sd_scsi_target_lun_head = NULL; 2836 mutex_destroy(&sd_scsi_target_lun_mutex); 2837 } 2838 2839 2840 /* 2841 * Function: sd_scsi_get_target_lun_count 2842 * 2843 * Description: This routine will check in the attached lun chain to see 2844 * how many luns are attached on the required SCSI controller 2845 * and target. Currently, some capabilities like tagged queue 2846 * are supported per target based by HBA. So all luns in a 2847 * target have the same capabilities. Based on this assumption, 2848 * sd should only set these capabilities once per target. This 2849 * function is called when sd needs to decide how many luns 2850 * already attached on a target. 2851 * 2852 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2853 * controller device. 2854 * target - The target ID on the controller's SCSI bus. 2855 * 2856 * Return Code: The number of luns attached on the required target and 2857 * controller. 2858 * -1 if target ID is not in parallel SCSI scope or the given 2859 * dip is not in the chain. 2860 * 2861 * Context: Kernel thread context 2862 */ 2863 2864 static int 2865 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2866 { 2867 struct sd_scsi_hba_tgt_lun *cp; 2868 2869 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2870 return (-1); 2871 } 2872 2873 mutex_enter(&sd_scsi_target_lun_mutex); 2874 2875 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2876 if (cp->pdip == dip) { 2877 break; 2878 } 2879 } 2880 2881 mutex_exit(&sd_scsi_target_lun_mutex); 2882 2883 if (cp == NULL) { 2884 return (-1); 2885 } 2886 2887 return (cp->nlun[target]); 2888 } 2889 2890 2891 /* 2892 * Function: sd_scsi_update_lun_on_target 2893 * 2894 * Description: This routine is used to update the attached lun chain when a 2895 * lun is attached or detached on a target. 2896 * 2897 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2898 * controller device. 2899 * target - The target ID on the controller's SCSI bus. 2900 * flag - Indicate the lun is attached or detached. 2901 * 2902 * Context: Kernel thread context 2903 */ 2904 2905 static void 2906 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2907 { 2908 struct sd_scsi_hba_tgt_lun *cp; 2909 2910 mutex_enter(&sd_scsi_target_lun_mutex); 2911 2912 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2913 if (cp->pdip == dip) { 2914 break; 2915 } 2916 } 2917 2918 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2919 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2920 KM_SLEEP); 2921 cp->pdip = dip; 2922 cp->next = sd_scsi_target_lun_head; 2923 sd_scsi_target_lun_head = cp; 2924 } 2925 2926 mutex_exit(&sd_scsi_target_lun_mutex); 2927 2928 if (cp != NULL) { 2929 if (flag == SD_SCSI_LUN_ATTACH) { 2930 cp->nlun[target] ++; 2931 } else { 2932 cp->nlun[target] --; 2933 } 2934 } 2935 } 2936 2937 2938 /* 2939 * Function: sd_spin_up_unit 2940 * 2941 * Description: Issues the following commands to spin-up the device: 2942 * START STOP UNIT, and INQUIRY. 2943 * 2944 * Arguments: un - driver soft state (unit) structure 2945 * 2946 * Return Code: 0 - success 2947 * EIO - failure 2948 * EACCES - reservation conflict 2949 * 2950 * Context: Kernel thread context 2951 */ 2952 2953 static int 2954 sd_spin_up_unit(struct sd_lun *un) 2955 { 2956 size_t resid = 0; 2957 int has_conflict = FALSE; 2958 uchar_t *bufaddr; 2959 2960 ASSERT(un != NULL); 2961 2962 /* 2963 * Send a throwaway START UNIT command. 2964 * 2965 * If we fail on this, we don't care presently what precisely 2966 * is wrong. EMC's arrays will also fail this with a check 2967 * condition (0x2/0x4/0x3) if the device is "inactive," but 2968 * we don't want to fail the attach because it may become 2969 * "active" later. 2970 */ 2971 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2972 == EACCES) 2973 has_conflict = TRUE; 2974 2975 /* 2976 * Send another INQUIRY command to the target. This is necessary for 2977 * non-removable media direct access devices because their INQUIRY data 2978 * may not be fully qualified until they are spun up (perhaps via the 2979 * START command above). Note: This seems to be needed for some 2980 * legacy devices only.) The INQUIRY command should succeed even if a 2981 * Reservation Conflict is present. 2982 */ 2983 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2984 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2985 kmem_free(bufaddr, SUN_INQSIZE); 2986 return (EIO); 2987 } 2988 2989 /* 2990 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2991 * Note that this routine does not return a failure here even if the 2992 * INQUIRY command did not return any data. This is a legacy behavior. 2993 */ 2994 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2995 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2996 } 2997 2998 kmem_free(bufaddr, SUN_INQSIZE); 2999 3000 /* If we hit a reservation conflict above, tell the caller. */ 3001 if (has_conflict == TRUE) { 3002 return (EACCES); 3003 } 3004 3005 return (0); 3006 } 3007 3008 #ifdef _LP64 3009 /* 3010 * Function: sd_enable_descr_sense 3011 * 3012 * Description: This routine attempts to select descriptor sense format 3013 * using the Control mode page. Devices that support 64 bit 3014 * LBAs (for >2TB luns) should also implement descriptor 3015 * sense data so we will call this function whenever we see 3016 * a lun larger than 2TB. If for some reason the device 3017 * supports 64 bit LBAs but doesn't support descriptor sense 3018 * presumably the mode select will fail. Everything will 3019 * continue to work normally except that we will not get 3020 * complete sense data for commands that fail with an LBA 3021 * larger than 32 bits. 3022 * 3023 * Arguments: un - driver soft state (unit) structure 3024 * 3025 * Context: Kernel thread context only 3026 */ 3027 3028 static void 3029 sd_enable_descr_sense(struct sd_lun *un) 3030 { 3031 uchar_t *header; 3032 struct mode_control_scsi3 *ctrl_bufp; 3033 size_t buflen; 3034 size_t bd_len; 3035 3036 /* 3037 * Read MODE SENSE page 0xA, Control Mode Page 3038 */ 3039 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3040 sizeof (struct mode_control_scsi3); 3041 header = kmem_zalloc(buflen, KM_SLEEP); 3042 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3043 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3044 SD_ERROR(SD_LOG_COMMON, un, 3045 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3046 goto eds_exit; 3047 } 3048 3049 /* 3050 * Determine size of Block Descriptors in order to locate 3051 * the mode page data. ATAPI devices return 0, SCSI devices 3052 * should return MODE_BLK_DESC_LENGTH. 3053 */ 3054 bd_len = ((struct mode_header *)header)->bdesc_length; 3055 3056 /* Clear the mode data length field for MODE SELECT */ 3057 ((struct mode_header *)header)->length = 0; 3058 3059 ctrl_bufp = (struct mode_control_scsi3 *) 3060 (header + MODE_HEADER_LENGTH + bd_len); 3061 3062 /* 3063 * If the page length is smaller than the expected value, 3064 * the target device doesn't support D_SENSE. Bail out here. 3065 */ 3066 if (ctrl_bufp->mode_page.length < 3067 sizeof (struct mode_control_scsi3) - 2) { 3068 SD_ERROR(SD_LOG_COMMON, un, 3069 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3070 goto eds_exit; 3071 } 3072 3073 /* 3074 * Clear PS bit for MODE SELECT 3075 */ 3076 ctrl_bufp->mode_page.ps = 0; 3077 3078 /* 3079 * Set D_SENSE to enable descriptor sense format. 3080 */ 3081 ctrl_bufp->d_sense = 1; 3082 3083 /* 3084 * Use MODE SELECT to commit the change to the D_SENSE bit 3085 */ 3086 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3087 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3088 SD_INFO(SD_LOG_COMMON, un, 3089 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3090 goto eds_exit; 3091 } 3092 3093 eds_exit: 3094 kmem_free(header, buflen); 3095 } 3096 3097 /* 3098 * Function: sd_reenable_dsense_task 3099 * 3100 * Description: Re-enable descriptor sense after device or bus reset 3101 * 3102 * Context: Executes in a taskq() thread context 3103 */ 3104 static void 3105 sd_reenable_dsense_task(void *arg) 3106 { 3107 struct sd_lun *un = arg; 3108 3109 ASSERT(un != NULL); 3110 sd_enable_descr_sense(un); 3111 } 3112 #endif /* _LP64 */ 3113 3114 /* 3115 * Function: sd_set_mmc_caps 3116 * 3117 * Description: This routine determines if the device is MMC compliant and if 3118 * the device supports CDDA via a mode sense of the CDVD 3119 * capabilities mode page. Also checks if the device is a 3120 * dvdram writable device. 3121 * 3122 * Arguments: un - driver soft state (unit) structure 3123 * 3124 * Context: Kernel thread context only 3125 */ 3126 3127 static void 3128 sd_set_mmc_caps(struct sd_lun *un) 3129 { 3130 struct mode_header_grp2 *sense_mhp; 3131 uchar_t *sense_page; 3132 caddr_t buf; 3133 int bd_len; 3134 int status; 3135 struct uscsi_cmd com; 3136 int rtn; 3137 uchar_t *out_data_rw, *out_data_hd; 3138 uchar_t *rqbuf_rw, *rqbuf_hd; 3139 3140 ASSERT(un != NULL); 3141 3142 /* 3143 * The flags which will be set in this function are - mmc compliant, 3144 * dvdram writable device, cdda support. Initialize them to FALSE 3145 * and if a capability is detected - it will be set to TRUE. 3146 */ 3147 un->un_f_mmc_cap = FALSE; 3148 un->un_f_dvdram_writable_device = FALSE; 3149 un->un_f_cfg_cdda = FALSE; 3150 3151 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3152 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3153 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3154 3155 if (status != 0) { 3156 /* command failed; just return */ 3157 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3158 return; 3159 } 3160 /* 3161 * If the mode sense request for the CDROM CAPABILITIES 3162 * page (0x2A) succeeds the device is assumed to be MMC. 3163 */ 3164 un->un_f_mmc_cap = TRUE; 3165 3166 /* Get to the page data */ 3167 sense_mhp = (struct mode_header_grp2 *)buf; 3168 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3169 sense_mhp->bdesc_length_lo; 3170 if (bd_len > MODE_BLK_DESC_LENGTH) { 3171 /* 3172 * We did not get back the expected block descriptor 3173 * length so we cannot determine if the device supports 3174 * CDDA. However, we still indicate the device is MMC 3175 * according to the successful response to the page 3176 * 0x2A mode sense request. 3177 */ 3178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3179 "sd_set_mmc_caps: Mode Sense returned " 3180 "invalid block descriptor length\n"); 3181 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3182 return; 3183 } 3184 3185 /* See if read CDDA is supported */ 3186 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3187 bd_len); 3188 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3189 3190 /* See if writing DVD RAM is supported. */ 3191 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3192 if (un->un_f_dvdram_writable_device == TRUE) { 3193 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3194 return; 3195 } 3196 3197 /* 3198 * If the device presents DVD or CD capabilities in the mode 3199 * page, we can return here since a RRD will not have 3200 * these capabilities. 3201 */ 3202 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3203 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3204 return; 3205 } 3206 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3207 3208 /* 3209 * If un->un_f_dvdram_writable_device is still FALSE, 3210 * check for a Removable Rigid Disk (RRD). A RRD 3211 * device is identified by the features RANDOM_WRITABLE and 3212 * HARDWARE_DEFECT_MANAGEMENT. 3213 */ 3214 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3215 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3216 3217 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3218 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3219 RANDOM_WRITABLE, SD_PATH_STANDARD); 3220 if (rtn != 0) { 3221 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3222 kmem_free(rqbuf_rw, SENSE_LENGTH); 3223 return; 3224 } 3225 3226 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3227 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3228 3229 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3230 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3231 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3232 if (rtn == 0) { 3233 /* 3234 * We have good information, check for random writable 3235 * and hardware defect features. 3236 */ 3237 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3238 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3239 un->un_f_dvdram_writable_device = TRUE; 3240 } 3241 } 3242 3243 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3244 kmem_free(rqbuf_rw, SENSE_LENGTH); 3245 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3246 kmem_free(rqbuf_hd, SENSE_LENGTH); 3247 } 3248 3249 /* 3250 * Function: sd_check_for_writable_cd 3251 * 3252 * Description: This routine determines if the media in the device is 3253 * writable or not. It uses the get configuration command (0x46) 3254 * to determine if the media is writable 3255 * 3256 * Arguments: un - driver soft state (unit) structure 3257 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3258 * chain and the normal command waitq, or 3259 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3260 * "direct" chain and bypass the normal command 3261 * waitq. 3262 * 3263 * Context: Never called at interrupt context. 3264 */ 3265 3266 static void 3267 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3268 { 3269 struct uscsi_cmd com; 3270 uchar_t *out_data; 3271 uchar_t *rqbuf; 3272 int rtn; 3273 uchar_t *out_data_rw, *out_data_hd; 3274 uchar_t *rqbuf_rw, *rqbuf_hd; 3275 struct mode_header_grp2 *sense_mhp; 3276 uchar_t *sense_page; 3277 caddr_t buf; 3278 int bd_len; 3279 int status; 3280 3281 ASSERT(un != NULL); 3282 ASSERT(mutex_owned(SD_MUTEX(un))); 3283 3284 /* 3285 * Initialize the writable media to false, if configuration info. 3286 * tells us otherwise then only we will set it. 3287 */ 3288 un->un_f_mmc_writable_media = FALSE; 3289 mutex_exit(SD_MUTEX(un)); 3290 3291 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3292 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3293 3294 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3295 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3296 3297 mutex_enter(SD_MUTEX(un)); 3298 if (rtn == 0) { 3299 /* 3300 * We have good information, check for writable DVD. 3301 */ 3302 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3303 un->un_f_mmc_writable_media = TRUE; 3304 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3305 kmem_free(rqbuf, SENSE_LENGTH); 3306 return; 3307 } 3308 } 3309 3310 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3311 kmem_free(rqbuf, SENSE_LENGTH); 3312 3313 /* 3314 * Determine if this is a RRD type device. 3315 */ 3316 mutex_exit(SD_MUTEX(un)); 3317 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3318 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3319 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3320 mutex_enter(SD_MUTEX(un)); 3321 if (status != 0) { 3322 /* command failed; just return */ 3323 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3324 return; 3325 } 3326 3327 /* Get to the page data */ 3328 sense_mhp = (struct mode_header_grp2 *)buf; 3329 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3330 if (bd_len > MODE_BLK_DESC_LENGTH) { 3331 /* 3332 * We did not get back the expected block descriptor length so 3333 * we cannot check the mode page. 3334 */ 3335 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3336 "sd_check_for_writable_cd: Mode Sense returned " 3337 "invalid block descriptor length\n"); 3338 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3339 return; 3340 } 3341 3342 /* 3343 * If the device presents DVD or CD capabilities in the mode 3344 * page, we can return here since a RRD device will not have 3345 * these capabilities. 3346 */ 3347 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3348 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3349 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3350 return; 3351 } 3352 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3353 3354 /* 3355 * If un->un_f_mmc_writable_media is still FALSE, 3356 * check for RRD type media. A RRD device is identified 3357 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3358 */ 3359 mutex_exit(SD_MUTEX(un)); 3360 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3361 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3362 3363 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3364 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3365 RANDOM_WRITABLE, path_flag); 3366 if (rtn != 0) { 3367 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3368 kmem_free(rqbuf_rw, SENSE_LENGTH); 3369 mutex_enter(SD_MUTEX(un)); 3370 return; 3371 } 3372 3373 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3374 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3375 3376 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3377 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3378 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3379 mutex_enter(SD_MUTEX(un)); 3380 if (rtn == 0) { 3381 /* 3382 * We have good information, check for random writable 3383 * and hardware defect features as current. 3384 */ 3385 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3386 (out_data_rw[10] & 0x1) && 3387 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3388 (out_data_hd[10] & 0x1)) { 3389 un->un_f_mmc_writable_media = TRUE; 3390 } 3391 } 3392 3393 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3394 kmem_free(rqbuf_rw, SENSE_LENGTH); 3395 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3396 kmem_free(rqbuf_hd, SENSE_LENGTH); 3397 } 3398 3399 /* 3400 * Function: sd_read_unit_properties 3401 * 3402 * Description: The following implements a property lookup mechanism. 3403 * Properties for particular disks (keyed on vendor, model 3404 * and rev numbers) are sought in the sd.conf file via 3405 * sd_process_sdconf_file(), and if not found there, are 3406 * looked for in a list hardcoded in this driver via 3407 * sd_process_sdconf_table() Once located the properties 3408 * are used to update the driver unit structure. 3409 * 3410 * Arguments: un - driver soft state (unit) structure 3411 */ 3412 3413 static void 3414 sd_read_unit_properties(struct sd_lun *un) 3415 { 3416 /* 3417 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3418 * the "sd-config-list" property (from the sd.conf file) or if 3419 * there was not a match for the inquiry vid/pid. If this event 3420 * occurs the static driver configuration table is searched for 3421 * a match. 3422 */ 3423 ASSERT(un != NULL); 3424 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3425 sd_process_sdconf_table(un); 3426 } 3427 3428 /* check for LSI device */ 3429 sd_is_lsi(un); 3430 3431 3432 } 3433 3434 3435 /* 3436 * Function: sd_process_sdconf_file 3437 * 3438 * Description: Use ddi_getlongprop to obtain the properties from the 3439 * driver's config file (ie, sd.conf) and update the driver 3440 * soft state structure accordingly. 3441 * 3442 * Arguments: un - driver soft state (unit) structure 3443 * 3444 * Return Code: SD_SUCCESS - The properties were successfully set according 3445 * to the driver configuration file. 3446 * SD_FAILURE - The driver config list was not obtained or 3447 * there was no vid/pid match. This indicates that 3448 * the static config table should be used. 3449 * 3450 * The config file has a property, "sd-config-list", which consists of 3451 * one or more duplets as follows: 3452 * 3453 * sd-config-list= 3454 * <duplet>, 3455 * [<duplet>,] 3456 * [<duplet>]; 3457 * 3458 * The structure of each duplet is as follows: 3459 * 3460 * <duplet>:= <vid+pid>,<data-property-name_list> 3461 * 3462 * The first entry of the duplet is the device ID string (the concatenated 3463 * vid & pid; not to be confused with a device_id). This is defined in 3464 * the same way as in the sd_disk_table. 3465 * 3466 * The second part of the duplet is a string that identifies a 3467 * data-property-name-list. The data-property-name-list is defined as 3468 * follows: 3469 * 3470 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3471 * 3472 * The syntax of <data-property-name> depends on the <version> field. 3473 * 3474 * If version = SD_CONF_VERSION_1 we have the following syntax: 3475 * 3476 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3477 * 3478 * where the prop0 value will be used to set prop0 if bit0 set in the 3479 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3480 * 3481 */ 3482 3483 static int 3484 sd_process_sdconf_file(struct sd_lun *un) 3485 { 3486 char *config_list = NULL; 3487 int config_list_len; 3488 int len; 3489 int dupletlen = 0; 3490 char *vidptr; 3491 int vidlen; 3492 char *dnlist_ptr; 3493 char *dataname_ptr; 3494 int dnlist_len; 3495 int dataname_len; 3496 int *data_list; 3497 int data_list_len; 3498 int rval = SD_FAILURE; 3499 int i; 3500 3501 ASSERT(un != NULL); 3502 3503 /* Obtain the configuration list associated with the .conf file */ 3504 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3505 sd_config_list, (caddr_t)&config_list, &config_list_len) 3506 != DDI_PROP_SUCCESS) { 3507 return (SD_FAILURE); 3508 } 3509 3510 /* 3511 * Compare vids in each duplet to the inquiry vid - if a match is 3512 * made, get the data value and update the soft state structure 3513 * accordingly. 3514 * 3515 * Note: This algorithm is complex and difficult to maintain. It should 3516 * be replaced with a more robust implementation. 3517 */ 3518 for (len = config_list_len, vidptr = config_list; len > 0; 3519 vidptr += dupletlen, len -= dupletlen) { 3520 /* 3521 * Note: The assumption here is that each vid entry is on 3522 * a unique line from its associated duplet. 3523 */ 3524 vidlen = dupletlen = (int)strlen(vidptr); 3525 if ((vidlen == 0) || 3526 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3527 dupletlen++; 3528 continue; 3529 } 3530 3531 /* 3532 * dnlist contains 1 or more blank separated 3533 * data-property-name entries 3534 */ 3535 dnlist_ptr = vidptr + vidlen + 1; 3536 dnlist_len = (int)strlen(dnlist_ptr); 3537 dupletlen += dnlist_len + 2; 3538 3539 /* 3540 * Set a pointer for the first data-property-name 3541 * entry in the list 3542 */ 3543 dataname_ptr = dnlist_ptr; 3544 dataname_len = 0; 3545 3546 /* 3547 * Loop through all data-property-name entries in the 3548 * data-property-name-list setting the properties for each. 3549 */ 3550 while (dataname_len < dnlist_len) { 3551 int version; 3552 3553 /* 3554 * Determine the length of the current 3555 * data-property-name entry by indexing until a 3556 * blank or NULL is encountered. When the space is 3557 * encountered reset it to a NULL for compliance 3558 * with ddi_getlongprop(). 3559 */ 3560 for (i = 0; ((dataname_ptr[i] != ' ') && 3561 (dataname_ptr[i] != '\0')); i++) { 3562 ; 3563 } 3564 3565 dataname_len += i; 3566 /* If not null terminated, Make it so */ 3567 if (dataname_ptr[i] == ' ') { 3568 dataname_ptr[i] = '\0'; 3569 } 3570 dataname_len++; 3571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3572 "sd_process_sdconf_file: disk:%s, data:%s\n", 3573 vidptr, dataname_ptr); 3574 3575 /* Get the data list */ 3576 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3577 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3578 != DDI_PROP_SUCCESS) { 3579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3580 "sd_process_sdconf_file: data property (%s)" 3581 " has no value\n", dataname_ptr); 3582 dataname_ptr = dnlist_ptr + dataname_len; 3583 continue; 3584 } 3585 3586 version = data_list[0]; 3587 3588 if (version == SD_CONF_VERSION_1) { 3589 sd_tunables values; 3590 3591 /* Set the properties */ 3592 if (sd_chk_vers1_data(un, data_list[1], 3593 &data_list[2], data_list_len, dataname_ptr) 3594 == SD_SUCCESS) { 3595 sd_get_tunables_from_conf(un, 3596 data_list[1], &data_list[2], 3597 &values); 3598 sd_set_vers1_properties(un, 3599 data_list[1], &values); 3600 rval = SD_SUCCESS; 3601 } else { 3602 rval = SD_FAILURE; 3603 } 3604 } else { 3605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3606 "data property %s version 0x%x is invalid.", 3607 dataname_ptr, version); 3608 rval = SD_FAILURE; 3609 } 3610 kmem_free(data_list, data_list_len); 3611 dataname_ptr = dnlist_ptr + dataname_len; 3612 } 3613 } 3614 3615 /* free up the memory allocated by ddi_getlongprop */ 3616 if (config_list) { 3617 kmem_free(config_list, config_list_len); 3618 } 3619 3620 return (rval); 3621 } 3622 3623 /* 3624 * Function: sd_get_tunables_from_conf() 3625 * 3626 * 3627 * This function reads the data list from the sd.conf file and pulls 3628 * the values that can have numeric values as arguments and places 3629 * the values in the appropriate sd_tunables member. 3630 * Since the order of the data list members varies across platforms 3631 * This function reads them from the data list in a platform specific 3632 * order and places them into the correct sd_tunable member that is 3633 * consistent across all platforms. 3634 */ 3635 static void 3636 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3637 sd_tunables *values) 3638 { 3639 int i; 3640 int mask; 3641 3642 bzero(values, sizeof (sd_tunables)); 3643 3644 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3645 3646 mask = 1 << i; 3647 if (mask > flags) { 3648 break; 3649 } 3650 3651 switch (mask & flags) { 3652 case 0: /* This mask bit not set in flags */ 3653 continue; 3654 case SD_CONF_BSET_THROTTLE: 3655 values->sdt_throttle = data_list[i]; 3656 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3657 "sd_get_tunables_from_conf: throttle = %d\n", 3658 values->sdt_throttle); 3659 break; 3660 case SD_CONF_BSET_CTYPE: 3661 values->sdt_ctype = data_list[i]; 3662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3663 "sd_get_tunables_from_conf: ctype = %d\n", 3664 values->sdt_ctype); 3665 break; 3666 case SD_CONF_BSET_NRR_COUNT: 3667 values->sdt_not_rdy_retries = data_list[i]; 3668 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3669 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3670 values->sdt_not_rdy_retries); 3671 break; 3672 case SD_CONF_BSET_BSY_RETRY_COUNT: 3673 values->sdt_busy_retries = data_list[i]; 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_get_tunables_from_conf: busy_retries = %d\n", 3676 values->sdt_busy_retries); 3677 break; 3678 case SD_CONF_BSET_RST_RETRIES: 3679 values->sdt_reset_retries = data_list[i]; 3680 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3681 "sd_get_tunables_from_conf: reset_retries = %d\n", 3682 values->sdt_reset_retries); 3683 break; 3684 case SD_CONF_BSET_RSV_REL_TIME: 3685 values->sdt_reserv_rel_time = data_list[i]; 3686 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3687 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3688 values->sdt_reserv_rel_time); 3689 break; 3690 case SD_CONF_BSET_MIN_THROTTLE: 3691 values->sdt_min_throttle = data_list[i]; 3692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3693 "sd_get_tunables_from_conf: min_throttle = %d\n", 3694 values->sdt_min_throttle); 3695 break; 3696 case SD_CONF_BSET_DISKSORT_DISABLED: 3697 values->sdt_disk_sort_dis = data_list[i]; 3698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3699 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3700 values->sdt_disk_sort_dis); 3701 break; 3702 case SD_CONF_BSET_LUN_RESET_ENABLED: 3703 values->sdt_lun_reset_enable = data_list[i]; 3704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3705 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3706 "\n", values->sdt_lun_reset_enable); 3707 break; 3708 case SD_CONF_BSET_CACHE_IS_NV: 3709 values->sdt_suppress_cache_flush = data_list[i]; 3710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3711 "sd_get_tunables_from_conf: \ 3712 suppress_cache_flush = %d" 3713 "\n", values->sdt_suppress_cache_flush); 3714 break; 3715 } 3716 } 3717 } 3718 3719 /* 3720 * Function: sd_process_sdconf_table 3721 * 3722 * Description: Search the static configuration table for a match on the 3723 * inquiry vid/pid and update the driver soft state structure 3724 * according to the table property values for the device. 3725 * 3726 * The form of a configuration table entry is: 3727 * <vid+pid>,<flags>,<property-data> 3728 * "SEAGATE ST42400N",1,0x40000, 3729 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3730 * 3731 * Arguments: un - driver soft state (unit) structure 3732 */ 3733 3734 static void 3735 sd_process_sdconf_table(struct sd_lun *un) 3736 { 3737 char *id = NULL; 3738 int table_index; 3739 int idlen; 3740 3741 ASSERT(un != NULL); 3742 for (table_index = 0; table_index < sd_disk_table_size; 3743 table_index++) { 3744 id = sd_disk_table[table_index].device_id; 3745 idlen = strlen(id); 3746 if (idlen == 0) { 3747 continue; 3748 } 3749 3750 /* 3751 * The static configuration table currently does not 3752 * implement version 10 properties. Additionally, 3753 * multiple data-property-name entries are not 3754 * implemented in the static configuration table. 3755 */ 3756 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3757 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3758 "sd_process_sdconf_table: disk %s\n", id); 3759 sd_set_vers1_properties(un, 3760 sd_disk_table[table_index].flags, 3761 sd_disk_table[table_index].properties); 3762 break; 3763 } 3764 } 3765 } 3766 3767 3768 /* 3769 * Function: sd_sdconf_id_match 3770 * 3771 * Description: This local function implements a case sensitive vid/pid 3772 * comparison as well as the boundary cases of wild card and 3773 * multiple blanks. 3774 * 3775 * Note: An implicit assumption made here is that the scsi 3776 * inquiry structure will always keep the vid, pid and 3777 * revision strings in consecutive sequence, so they can be 3778 * read as a single string. If this assumption is not the 3779 * case, a separate string, to be used for the check, needs 3780 * to be built with these strings concatenated. 3781 * 3782 * Arguments: un - driver soft state (unit) structure 3783 * id - table or config file vid/pid 3784 * idlen - length of the vid/pid (bytes) 3785 * 3786 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3787 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3788 */ 3789 3790 static int 3791 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3792 { 3793 struct scsi_inquiry *sd_inq; 3794 int rval = SD_SUCCESS; 3795 3796 ASSERT(un != NULL); 3797 sd_inq = un->un_sd->sd_inq; 3798 ASSERT(id != NULL); 3799 3800 /* 3801 * We use the inq_vid as a pointer to a buffer containing the 3802 * vid and pid and use the entire vid/pid length of the table 3803 * entry for the comparison. This works because the inq_pid 3804 * data member follows inq_vid in the scsi_inquiry structure. 3805 */ 3806 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3807 /* 3808 * The user id string is compared to the inquiry vid/pid 3809 * using a case insensitive comparison and ignoring 3810 * multiple spaces. 3811 */ 3812 rval = sd_blank_cmp(un, id, idlen); 3813 if (rval != SD_SUCCESS) { 3814 /* 3815 * User id strings that start and end with a "*" 3816 * are a special case. These do not have a 3817 * specific vendor, and the product string can 3818 * appear anywhere in the 16 byte PID portion of 3819 * the inquiry data. This is a simple strstr() 3820 * type search for the user id in the inquiry data. 3821 */ 3822 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3823 char *pidptr = &id[1]; 3824 int i; 3825 int j; 3826 int pidstrlen = idlen - 2; 3827 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3828 pidstrlen; 3829 3830 if (j < 0) { 3831 return (SD_FAILURE); 3832 } 3833 for (i = 0; i < j; i++) { 3834 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3835 pidptr, pidstrlen) == 0) { 3836 rval = SD_SUCCESS; 3837 break; 3838 } 3839 } 3840 } 3841 } 3842 } 3843 return (rval); 3844 } 3845 3846 3847 /* 3848 * Function: sd_blank_cmp 3849 * 3850 * Description: If the id string starts and ends with a space, treat 3851 * multiple consecutive spaces as equivalent to a single 3852 * space. For example, this causes a sd_disk_table entry 3853 * of " NEC CDROM " to match a device's id string of 3854 * "NEC CDROM". 3855 * 3856 * Note: The success exit condition for this routine is if 3857 * the pointer to the table entry is '\0' and the cnt of 3858 * the inquiry length is zero. This will happen if the inquiry 3859 * string returned by the device is padded with spaces to be 3860 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3861 * SCSI spec states that the inquiry string is to be padded with 3862 * spaces. 3863 * 3864 * Arguments: un - driver soft state (unit) structure 3865 * id - table or config file vid/pid 3866 * idlen - length of the vid/pid (bytes) 3867 * 3868 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3869 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3870 */ 3871 3872 static int 3873 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3874 { 3875 char *p1; 3876 char *p2; 3877 int cnt; 3878 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3879 sizeof (SD_INQUIRY(un)->inq_pid); 3880 3881 ASSERT(un != NULL); 3882 p2 = un->un_sd->sd_inq->inq_vid; 3883 ASSERT(id != NULL); 3884 p1 = id; 3885 3886 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3887 /* 3888 * Note: string p1 is terminated by a NUL but string p2 3889 * isn't. The end of p2 is determined by cnt. 3890 */ 3891 for (;;) { 3892 /* skip over any extra blanks in both strings */ 3893 while ((*p1 != '\0') && (*p1 == ' ')) { 3894 p1++; 3895 } 3896 while ((cnt != 0) && (*p2 == ' ')) { 3897 p2++; 3898 cnt--; 3899 } 3900 3901 /* compare the two strings */ 3902 if ((cnt == 0) || 3903 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3904 break; 3905 } 3906 while ((cnt > 0) && 3907 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3908 p1++; 3909 p2++; 3910 cnt--; 3911 } 3912 } 3913 } 3914 3915 /* return SD_SUCCESS if both strings match */ 3916 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3917 } 3918 3919 3920 /* 3921 * Function: sd_chk_vers1_data 3922 * 3923 * Description: Verify the version 1 device properties provided by the 3924 * user via the configuration file 3925 * 3926 * Arguments: un - driver soft state (unit) structure 3927 * flags - integer mask indicating properties to be set 3928 * prop_list - integer list of property values 3929 * list_len - length of user provided data 3930 * 3931 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3932 * SD_FAILURE - Indicates the user provided data is invalid 3933 */ 3934 3935 static int 3936 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3937 int list_len, char *dataname_ptr) 3938 { 3939 int i; 3940 int mask = 1; 3941 int index = 0; 3942 3943 ASSERT(un != NULL); 3944 3945 /* Check for a NULL property name and list */ 3946 if (dataname_ptr == NULL) { 3947 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3948 "sd_chk_vers1_data: NULL data property name."); 3949 return (SD_FAILURE); 3950 } 3951 if (prop_list == NULL) { 3952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3953 "sd_chk_vers1_data: %s NULL data property list.", 3954 dataname_ptr); 3955 return (SD_FAILURE); 3956 } 3957 3958 /* Display a warning if undefined bits are set in the flags */ 3959 if (flags & ~SD_CONF_BIT_MASK) { 3960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3961 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3962 "Properties not set.", 3963 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3964 return (SD_FAILURE); 3965 } 3966 3967 /* 3968 * Verify the length of the list by identifying the highest bit set 3969 * in the flags and validating that the property list has a length 3970 * up to the index of this bit. 3971 */ 3972 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3973 if (flags & mask) { 3974 index++; 3975 } 3976 mask = 1 << i; 3977 } 3978 if ((list_len / sizeof (int)) < (index + 2)) { 3979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3980 "sd_chk_vers1_data: " 3981 "Data property list %s size is incorrect. " 3982 "Properties not set.", dataname_ptr); 3983 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3984 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3985 return (SD_FAILURE); 3986 } 3987 return (SD_SUCCESS); 3988 } 3989 3990 3991 /* 3992 * Function: sd_set_vers1_properties 3993 * 3994 * Description: Set version 1 device properties based on a property list 3995 * retrieved from the driver configuration file or static 3996 * configuration table. Version 1 properties have the format: 3997 * 3998 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3999 * 4000 * where the prop0 value will be used to set prop0 if bit0 4001 * is set in the flags 4002 * 4003 * Arguments: un - driver soft state (unit) structure 4004 * flags - integer mask indicating properties to be set 4005 * prop_list - integer list of property values 4006 */ 4007 4008 static void 4009 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4010 { 4011 ASSERT(un != NULL); 4012 4013 /* 4014 * Set the flag to indicate cache is to be disabled. An attempt 4015 * to disable the cache via sd_cache_control() will be made 4016 * later during attach once the basic initialization is complete. 4017 */ 4018 if (flags & SD_CONF_BSET_NOCACHE) { 4019 un->un_f_opt_disable_cache = TRUE; 4020 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4021 "sd_set_vers1_properties: caching disabled flag set\n"); 4022 } 4023 4024 /* CD-specific configuration parameters */ 4025 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4026 un->un_f_cfg_playmsf_bcd = TRUE; 4027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4028 "sd_set_vers1_properties: playmsf_bcd set\n"); 4029 } 4030 if (flags & SD_CONF_BSET_READSUB_BCD) { 4031 un->un_f_cfg_readsub_bcd = TRUE; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_set_vers1_properties: readsub_bcd set\n"); 4034 } 4035 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4036 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4039 } 4040 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4041 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4044 } 4045 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4046 un->un_f_cfg_no_read_header = TRUE; 4047 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4048 "sd_set_vers1_properties: no_read_header set\n"); 4049 } 4050 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4051 un->un_f_cfg_read_cd_xd4 = TRUE; 4052 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4053 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4054 } 4055 4056 /* Support for devices which do not have valid/unique serial numbers */ 4057 if (flags & SD_CONF_BSET_FAB_DEVID) { 4058 un->un_f_opt_fab_devid = TRUE; 4059 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4060 "sd_set_vers1_properties: fab_devid bit set\n"); 4061 } 4062 4063 /* Support for user throttle configuration */ 4064 if (flags & SD_CONF_BSET_THROTTLE) { 4065 ASSERT(prop_list != NULL); 4066 un->un_saved_throttle = un->un_throttle = 4067 prop_list->sdt_throttle; 4068 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4069 "sd_set_vers1_properties: throttle set to %d\n", 4070 prop_list->sdt_throttle); 4071 } 4072 4073 /* Set the per disk retry count according to the conf file or table. */ 4074 if (flags & SD_CONF_BSET_NRR_COUNT) { 4075 ASSERT(prop_list != NULL); 4076 if (prop_list->sdt_not_rdy_retries) { 4077 un->un_notready_retry_count = 4078 prop_list->sdt_not_rdy_retries; 4079 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4080 "sd_set_vers1_properties: not ready retry count" 4081 " set to %d\n", un->un_notready_retry_count); 4082 } 4083 } 4084 4085 /* The controller type is reported for generic disk driver ioctls */ 4086 if (flags & SD_CONF_BSET_CTYPE) { 4087 ASSERT(prop_list != NULL); 4088 switch (prop_list->sdt_ctype) { 4089 case CTYPE_CDROM: 4090 un->un_ctype = prop_list->sdt_ctype; 4091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4092 "sd_set_vers1_properties: ctype set to " 4093 "CTYPE_CDROM\n"); 4094 break; 4095 case CTYPE_CCS: 4096 un->un_ctype = prop_list->sdt_ctype; 4097 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4098 "sd_set_vers1_properties: ctype set to " 4099 "CTYPE_CCS\n"); 4100 break; 4101 case CTYPE_ROD: /* RW optical */ 4102 un->un_ctype = prop_list->sdt_ctype; 4103 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4104 "sd_set_vers1_properties: ctype set to " 4105 "CTYPE_ROD\n"); 4106 break; 4107 default: 4108 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4109 "sd_set_vers1_properties: Could not set " 4110 "invalid ctype value (%d)", 4111 prop_list->sdt_ctype); 4112 } 4113 } 4114 4115 /* Purple failover timeout */ 4116 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4117 ASSERT(prop_list != NULL); 4118 un->un_busy_retry_count = 4119 prop_list->sdt_busy_retries; 4120 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4121 "sd_set_vers1_properties: " 4122 "busy retry count set to %d\n", 4123 un->un_busy_retry_count); 4124 } 4125 4126 /* Purple reset retry count */ 4127 if (flags & SD_CONF_BSET_RST_RETRIES) { 4128 ASSERT(prop_list != NULL); 4129 un->un_reset_retry_count = 4130 prop_list->sdt_reset_retries; 4131 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4132 "sd_set_vers1_properties: " 4133 "reset retry count set to %d\n", 4134 un->un_reset_retry_count); 4135 } 4136 4137 /* Purple reservation release timeout */ 4138 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4139 ASSERT(prop_list != NULL); 4140 un->un_reserve_release_time = 4141 prop_list->sdt_reserv_rel_time; 4142 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4143 "sd_set_vers1_properties: " 4144 "reservation release timeout set to %d\n", 4145 un->un_reserve_release_time); 4146 } 4147 4148 /* 4149 * Driver flag telling the driver to verify that no commands are pending 4150 * for a device before issuing a Test Unit Ready. This is a workaround 4151 * for a firmware bug in some Seagate eliteI drives. 4152 */ 4153 if (flags & SD_CONF_BSET_TUR_CHECK) { 4154 un->un_f_cfg_tur_check = TRUE; 4155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4156 "sd_set_vers1_properties: tur queue check set\n"); 4157 } 4158 4159 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4160 un->un_min_throttle = prop_list->sdt_min_throttle; 4161 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4162 "sd_set_vers1_properties: min throttle set to %d\n", 4163 un->un_min_throttle); 4164 } 4165 4166 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4167 un->un_f_disksort_disabled = 4168 (prop_list->sdt_disk_sort_dis != 0) ? 4169 TRUE : FALSE; 4170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4171 "sd_set_vers1_properties: disksort disabled " 4172 "flag set to %d\n", 4173 prop_list->sdt_disk_sort_dis); 4174 } 4175 4176 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4177 un->un_f_lun_reset_enabled = 4178 (prop_list->sdt_lun_reset_enable != 0) ? 4179 TRUE : FALSE; 4180 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4181 "sd_set_vers1_properties: lun reset enabled " 4182 "flag set to %d\n", 4183 prop_list->sdt_lun_reset_enable); 4184 } 4185 4186 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4187 un->un_f_suppress_cache_flush = 4188 (prop_list->sdt_suppress_cache_flush != 0) ? 4189 TRUE : FALSE; 4190 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4191 "sd_set_vers1_properties: suppress_cache_flush " 4192 "flag set to %d\n", 4193 prop_list->sdt_suppress_cache_flush); 4194 } 4195 4196 /* 4197 * Validate the throttle values. 4198 * If any of the numbers are invalid, set everything to defaults. 4199 */ 4200 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4201 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4202 (un->un_min_throttle > un->un_throttle)) { 4203 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4204 un->un_min_throttle = sd_min_throttle; 4205 } 4206 } 4207 4208 /* 4209 * Function: sd_is_lsi() 4210 * 4211 * Description: Check for lsi devices, step through the static device 4212 * table to match vid/pid. 4213 * 4214 * Args: un - ptr to sd_lun 4215 * 4216 * Notes: When creating new LSI property, need to add the new LSI property 4217 * to this function. 4218 */ 4219 static void 4220 sd_is_lsi(struct sd_lun *un) 4221 { 4222 char *id = NULL; 4223 int table_index; 4224 int idlen; 4225 void *prop; 4226 4227 ASSERT(un != NULL); 4228 for (table_index = 0; table_index < sd_disk_table_size; 4229 table_index++) { 4230 id = sd_disk_table[table_index].device_id; 4231 idlen = strlen(id); 4232 if (idlen == 0) { 4233 continue; 4234 } 4235 4236 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4237 prop = sd_disk_table[table_index].properties; 4238 if (prop == &lsi_properties || 4239 prop == &lsi_oem_properties || 4240 prop == &lsi_properties_scsi || 4241 prop == &symbios_properties) { 4242 un->un_f_cfg_is_lsi = TRUE; 4243 } 4244 break; 4245 } 4246 } 4247 } 4248 4249 /* 4250 * Function: sd_get_physical_geometry 4251 * 4252 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4253 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4254 * target, and use this information to initialize the physical 4255 * geometry cache specified by pgeom_p. 4256 * 4257 * MODE SENSE is an optional command, so failure in this case 4258 * does not necessarily denote an error. We want to use the 4259 * MODE SENSE commands to derive the physical geometry of the 4260 * device, but if either command fails, the logical geometry is 4261 * used as the fallback for disk label geometry in cmlb. 4262 * 4263 * This requires that un->un_blockcount and un->un_tgt_blocksize 4264 * have already been initialized for the current target and 4265 * that the current values be passed as args so that we don't 4266 * end up ever trying to use -1 as a valid value. This could 4267 * happen if either value is reset while we're not holding 4268 * the mutex. 4269 * 4270 * Arguments: un - driver soft state (unit) structure 4271 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4272 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4273 * to use the USCSI "direct" chain and bypass the normal 4274 * command waitq. 4275 * 4276 * Context: Kernel thread only (can sleep). 4277 */ 4278 4279 static int 4280 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4281 diskaddr_t capacity, int lbasize, int path_flag) 4282 { 4283 struct mode_format *page3p; 4284 struct mode_geometry *page4p; 4285 struct mode_header *headerp; 4286 int sector_size; 4287 int nsect; 4288 int nhead; 4289 int ncyl; 4290 int intrlv; 4291 int spc; 4292 diskaddr_t modesense_capacity; 4293 int rpm; 4294 int bd_len; 4295 int mode_header_length; 4296 uchar_t *p3bufp; 4297 uchar_t *p4bufp; 4298 int cdbsize; 4299 int ret = EIO; 4300 4301 ASSERT(un != NULL); 4302 4303 if (lbasize == 0) { 4304 if (ISCD(un)) { 4305 lbasize = 2048; 4306 } else { 4307 lbasize = un->un_sys_blocksize; 4308 } 4309 } 4310 pgeom_p->g_secsize = (unsigned short)lbasize; 4311 4312 /* 4313 * If the unit is a cd/dvd drive MODE SENSE page three 4314 * and MODE SENSE page four are reserved (see SBC spec 4315 * and MMC spec). To prevent soft errors just return 4316 * using the default LBA size. 4317 */ 4318 if (ISCD(un)) 4319 return (ret); 4320 4321 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4322 4323 /* 4324 * Retrieve MODE SENSE page 3 - Format Device Page 4325 */ 4326 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4327 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4328 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4329 != 0) { 4330 SD_ERROR(SD_LOG_COMMON, un, 4331 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4332 goto page3_exit; 4333 } 4334 4335 /* 4336 * Determine size of Block Descriptors in order to locate the mode 4337 * page data. ATAPI devices return 0, SCSI devices should return 4338 * MODE_BLK_DESC_LENGTH. 4339 */ 4340 headerp = (struct mode_header *)p3bufp; 4341 if (un->un_f_cfg_is_atapi == TRUE) { 4342 struct mode_header_grp2 *mhp = 4343 (struct mode_header_grp2 *)headerp; 4344 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4345 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4346 } else { 4347 mode_header_length = MODE_HEADER_LENGTH; 4348 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4349 } 4350 4351 if (bd_len > MODE_BLK_DESC_LENGTH) { 4352 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4353 "received unexpected bd_len of %d, page3\n", bd_len); 4354 goto page3_exit; 4355 } 4356 4357 page3p = (struct mode_format *) 4358 ((caddr_t)headerp + mode_header_length + bd_len); 4359 4360 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4361 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4362 "mode sense pg3 code mismatch %d\n", 4363 page3p->mode_page.code); 4364 goto page3_exit; 4365 } 4366 4367 /* 4368 * Use this physical geometry data only if BOTH MODE SENSE commands 4369 * complete successfully; otherwise, revert to the logical geometry. 4370 * So, we need to save everything in temporary variables. 4371 */ 4372 sector_size = BE_16(page3p->data_bytes_sect); 4373 4374 /* 4375 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4376 */ 4377 if (sector_size == 0) { 4378 sector_size = un->un_sys_blocksize; 4379 } else { 4380 sector_size &= ~(un->un_sys_blocksize - 1); 4381 } 4382 4383 nsect = BE_16(page3p->sect_track); 4384 intrlv = BE_16(page3p->interleave); 4385 4386 SD_INFO(SD_LOG_COMMON, un, 4387 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4388 SD_INFO(SD_LOG_COMMON, un, 4389 " mode page: %d; nsect: %d; sector size: %d;\n", 4390 page3p->mode_page.code, nsect, sector_size); 4391 SD_INFO(SD_LOG_COMMON, un, 4392 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4393 BE_16(page3p->track_skew), 4394 BE_16(page3p->cylinder_skew)); 4395 4396 4397 /* 4398 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4399 */ 4400 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4401 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4402 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4403 != 0) { 4404 SD_ERROR(SD_LOG_COMMON, un, 4405 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4406 goto page4_exit; 4407 } 4408 4409 /* 4410 * Determine size of Block Descriptors in order to locate the mode 4411 * page data. ATAPI devices return 0, SCSI devices should return 4412 * MODE_BLK_DESC_LENGTH. 4413 */ 4414 headerp = (struct mode_header *)p4bufp; 4415 if (un->un_f_cfg_is_atapi == TRUE) { 4416 struct mode_header_grp2 *mhp = 4417 (struct mode_header_grp2 *)headerp; 4418 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4419 } else { 4420 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4421 } 4422 4423 if (bd_len > MODE_BLK_DESC_LENGTH) { 4424 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4425 "received unexpected bd_len of %d, page4\n", bd_len); 4426 goto page4_exit; 4427 } 4428 4429 page4p = (struct mode_geometry *) 4430 ((caddr_t)headerp + mode_header_length + bd_len); 4431 4432 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4433 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4434 "mode sense pg4 code mismatch %d\n", 4435 page4p->mode_page.code); 4436 goto page4_exit; 4437 } 4438 4439 /* 4440 * Stash the data now, after we know that both commands completed. 4441 */ 4442 4443 4444 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4445 spc = nhead * nsect; 4446 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4447 rpm = BE_16(page4p->rpm); 4448 4449 modesense_capacity = spc * ncyl; 4450 4451 SD_INFO(SD_LOG_COMMON, un, 4452 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4453 SD_INFO(SD_LOG_COMMON, un, 4454 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4455 SD_INFO(SD_LOG_COMMON, un, 4456 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4457 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4458 (void *)pgeom_p, capacity); 4459 4460 /* 4461 * Compensate if the drive's geometry is not rectangular, i.e., 4462 * the product of C * H * S returned by MODE SENSE >= that returned 4463 * by read capacity. This is an idiosyncrasy of the original x86 4464 * disk subsystem. 4465 */ 4466 if (modesense_capacity >= capacity) { 4467 SD_INFO(SD_LOG_COMMON, un, 4468 "sd_get_physical_geometry: adjusting acyl; " 4469 "old: %d; new: %d\n", pgeom_p->g_acyl, 4470 (modesense_capacity - capacity + spc - 1) / spc); 4471 if (sector_size != 0) { 4472 /* 1243403: NEC D38x7 drives don't support sec size */ 4473 pgeom_p->g_secsize = (unsigned short)sector_size; 4474 } 4475 pgeom_p->g_nsect = (unsigned short)nsect; 4476 pgeom_p->g_nhead = (unsigned short)nhead; 4477 pgeom_p->g_capacity = capacity; 4478 pgeom_p->g_acyl = 4479 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4480 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4481 } 4482 4483 pgeom_p->g_rpm = (unsigned short)rpm; 4484 pgeom_p->g_intrlv = (unsigned short)intrlv; 4485 ret = 0; 4486 4487 SD_INFO(SD_LOG_COMMON, un, 4488 "sd_get_physical_geometry: mode sense geometry:\n"); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 " nsect: %d; sector size: %d; interlv: %d\n", 4491 nsect, sector_size, intrlv); 4492 SD_INFO(SD_LOG_COMMON, un, 4493 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4494 nhead, ncyl, rpm, modesense_capacity); 4495 SD_INFO(SD_LOG_COMMON, un, 4496 "sd_get_physical_geometry: (cached)\n"); 4497 SD_INFO(SD_LOG_COMMON, un, 4498 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4499 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4500 pgeom_p->g_nhead, pgeom_p->g_nsect); 4501 SD_INFO(SD_LOG_COMMON, un, 4502 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4503 pgeom_p->g_secsize, pgeom_p->g_capacity, 4504 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4505 4506 page4_exit: 4507 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4508 page3_exit: 4509 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4510 4511 return (ret); 4512 } 4513 4514 /* 4515 * Function: sd_get_virtual_geometry 4516 * 4517 * Description: Ask the controller to tell us about the target device. 4518 * 4519 * Arguments: un - pointer to softstate 4520 * capacity - disk capacity in #blocks 4521 * lbasize - disk block size in bytes 4522 * 4523 * Context: Kernel thread only 4524 */ 4525 4526 static int 4527 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4528 diskaddr_t capacity, int lbasize) 4529 { 4530 uint_t geombuf; 4531 int spc; 4532 4533 ASSERT(un != NULL); 4534 4535 /* Set sector size, and total number of sectors */ 4536 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4537 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4538 4539 /* Let the HBA tell us its geometry */ 4540 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4541 4542 /* A value of -1 indicates an undefined "geometry" property */ 4543 if (geombuf == (-1)) { 4544 return (EINVAL); 4545 } 4546 4547 /* Initialize the logical geometry cache. */ 4548 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4549 lgeom_p->g_nsect = geombuf & 0xffff; 4550 lgeom_p->g_secsize = un->un_sys_blocksize; 4551 4552 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4553 4554 /* 4555 * Note: The driver originally converted the capacity value from 4556 * target blocks to system blocks. However, the capacity value passed 4557 * to this routine is already in terms of system blocks (this scaling 4558 * is done when the READ CAPACITY command is issued and processed). 4559 * This 'error' may have gone undetected because the usage of g_ncyl 4560 * (which is based upon g_capacity) is very limited within the driver 4561 */ 4562 lgeom_p->g_capacity = capacity; 4563 4564 /* 4565 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4566 * hba may return zero values if the device has been removed. 4567 */ 4568 if (spc == 0) { 4569 lgeom_p->g_ncyl = 0; 4570 } else { 4571 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4572 } 4573 lgeom_p->g_acyl = 0; 4574 4575 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4576 return (0); 4577 4578 } 4579 /* 4580 * Function: sd_update_block_info 4581 * 4582 * Description: Calculate a byte count to sector count bitshift value 4583 * from sector size. 4584 * 4585 * Arguments: un: unit struct. 4586 * lbasize: new target sector size 4587 * capacity: new target capacity, ie. block count 4588 * 4589 * Context: Kernel thread context 4590 */ 4591 4592 static void 4593 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4594 { 4595 uint_t dblk; 4596 4597 if (lbasize != 0) { 4598 un->un_tgt_blocksize = lbasize; 4599 un->un_f_tgt_blocksize_is_valid = TRUE; 4600 } 4601 4602 if (capacity != 0) { 4603 un->un_blockcount = capacity; 4604 un->un_f_blockcount_is_valid = TRUE; 4605 } 4606 4607 /* 4608 * Update device capacity properties. 4609 * 4610 * 'device-nblocks' number of blocks in target's units 4611 * 'device-blksize' data bearing size of target's block 4612 * 4613 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4614 * not be a power of two for checksumming disks with 520/528 byte 4615 * sectors. 4616 */ 4617 if (un->un_f_tgt_blocksize_is_valid && 4618 un->un_f_blockcount_is_valid && 4619 un->un_sys_blocksize) { 4620 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4621 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4622 "device-nblocks", un->un_blockcount / dblk); 4623 /* 4624 * To save memory, only define "device-blksize" when its 4625 * value is differnet than the default DEV_BSIZE value. 4626 */ 4627 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4628 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4629 SD_DEVINFO(un), "device-blksize", 4630 un->un_sys_blocksize * dblk); 4631 } 4632 } 4633 4634 4635 /* 4636 * Function: sd_register_devid 4637 * 4638 * Description: This routine will obtain the device id information from the 4639 * target, obtain the serial number, and register the device 4640 * id with the ddi framework. 4641 * 4642 * Arguments: devi - the system's dev_info_t for the device. 4643 * un - driver soft state (unit) structure 4644 * reservation_flag - indicates if a reservation conflict 4645 * occurred during attach 4646 * 4647 * Context: Kernel Thread 4648 */ 4649 static void 4650 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4651 { 4652 int rval = 0; 4653 uchar_t *inq80 = NULL; 4654 size_t inq80_len = MAX_INQUIRY_SIZE; 4655 size_t inq80_resid = 0; 4656 uchar_t *inq83 = NULL; 4657 size_t inq83_len = MAX_INQUIRY_SIZE; 4658 size_t inq83_resid = 0; 4659 int dlen, len; 4660 char *sn; 4661 4662 ASSERT(un != NULL); 4663 ASSERT(mutex_owned(SD_MUTEX(un))); 4664 ASSERT((SD_DEVINFO(un)) == devi); 4665 4666 /* 4667 * If transport has already registered a devid for this target 4668 * then that takes precedence over the driver's determination 4669 * of the devid. 4670 */ 4671 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4672 ASSERT(un->un_devid); 4673 return; /* use devid registered by the transport */ 4674 } 4675 4676 /* 4677 * This is the case of antiquated Sun disk drives that have the 4678 * FAB_DEVID property set in the disk_table. These drives 4679 * manage the devid's by storing them in last 2 available sectors 4680 * on the drive and have them fabricated by the ddi layer by calling 4681 * ddi_devid_init and passing the DEVID_FAB flag. 4682 */ 4683 if (un->un_f_opt_fab_devid == TRUE) { 4684 /* 4685 * Depending on EINVAL isn't reliable, since a reserved disk 4686 * may result in invalid geometry, so check to make sure a 4687 * reservation conflict did not occur during attach. 4688 */ 4689 if ((sd_get_devid(un) == EINVAL) && 4690 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4691 /* 4692 * The devid is invalid AND there is no reservation 4693 * conflict. Fabricate a new devid. 4694 */ 4695 (void) sd_create_devid(un); 4696 } 4697 4698 /* Register the devid if it exists */ 4699 if (un->un_devid != NULL) { 4700 (void) ddi_devid_register(SD_DEVINFO(un), 4701 un->un_devid); 4702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4703 "sd_register_devid: Devid Fabricated\n"); 4704 } 4705 return; 4706 } 4707 4708 /* 4709 * We check the availibility of the World Wide Name (0x83) and Unit 4710 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4711 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4712 * 0x83 is availible, that is the best choice. Our next choice is 4713 * 0x80. If neither are availible, we munge the devid from the device 4714 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4715 * to fabricate a devid for non-Sun qualified disks. 4716 */ 4717 if (sd_check_vpd_page_support(un) == 0) { 4718 /* collect page 80 data if available */ 4719 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4720 4721 mutex_exit(SD_MUTEX(un)); 4722 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4723 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4724 0x01, 0x80, &inq80_resid); 4725 4726 if (rval != 0) { 4727 kmem_free(inq80, inq80_len); 4728 inq80 = NULL; 4729 inq80_len = 0; 4730 } else if (ddi_prop_exists( 4731 DDI_DEV_T_NONE, SD_DEVINFO(un), 4732 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4733 INQUIRY_SERIAL_NO) == 0) { 4734 /* 4735 * If we don't already have a serial number 4736 * property, do quick verify of data returned 4737 * and define property. 4738 */ 4739 dlen = inq80_len - inq80_resid; 4740 len = (size_t)inq80[3]; 4741 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4742 /* 4743 * Ensure sn termination, skip leading 4744 * blanks, and create property 4745 * 'inquiry-serial-no'. 4746 */ 4747 sn = (char *)&inq80[4]; 4748 sn[len] = 0; 4749 while (*sn && (*sn == ' ')) 4750 sn++; 4751 if (*sn) { 4752 (void) ddi_prop_update_string( 4753 DDI_DEV_T_NONE, 4754 SD_DEVINFO(un), 4755 INQUIRY_SERIAL_NO, sn); 4756 } 4757 } 4758 } 4759 mutex_enter(SD_MUTEX(un)); 4760 } 4761 4762 /* collect page 83 data if available */ 4763 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4764 mutex_exit(SD_MUTEX(un)); 4765 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4766 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4767 0x01, 0x83, &inq83_resid); 4768 4769 if (rval != 0) { 4770 kmem_free(inq83, inq83_len); 4771 inq83 = NULL; 4772 inq83_len = 0; 4773 } 4774 mutex_enter(SD_MUTEX(un)); 4775 } 4776 } 4777 4778 /* encode best devid possible based on data available */ 4779 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4780 (char *)ddi_driver_name(SD_DEVINFO(un)), 4781 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4782 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4783 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4784 4785 /* devid successfully encoded, register devid */ 4786 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4787 4788 } else { 4789 /* 4790 * Unable to encode a devid based on data available. 4791 * This is not a Sun qualified disk. Older Sun disk 4792 * drives that have the SD_FAB_DEVID property 4793 * set in the disk_table and non Sun qualified 4794 * disks are treated in the same manner. These 4795 * drives manage the devid's by storing them in 4796 * last 2 available sectors on the drive and 4797 * have them fabricated by the ddi layer by 4798 * calling ddi_devid_init and passing the 4799 * DEVID_FAB flag. 4800 * Create a fabricate devid only if there's no 4801 * fabricate devid existed. 4802 */ 4803 if (sd_get_devid(un) == EINVAL) { 4804 (void) sd_create_devid(un); 4805 } 4806 un->un_f_opt_fab_devid = TRUE; 4807 4808 /* Register the devid if it exists */ 4809 if (un->un_devid != NULL) { 4810 (void) ddi_devid_register(SD_DEVINFO(un), 4811 un->un_devid); 4812 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4813 "sd_register_devid: devid fabricated using " 4814 "ddi framework\n"); 4815 } 4816 } 4817 4818 /* clean up resources */ 4819 if (inq80 != NULL) { 4820 kmem_free(inq80, inq80_len); 4821 } 4822 if (inq83 != NULL) { 4823 kmem_free(inq83, inq83_len); 4824 } 4825 } 4826 4827 4828 4829 /* 4830 * Function: sd_get_devid 4831 * 4832 * Description: This routine will return 0 if a valid device id has been 4833 * obtained from the target and stored in the soft state. If a 4834 * valid device id has not been previously read and stored, a 4835 * read attempt will be made. 4836 * 4837 * Arguments: un - driver soft state (unit) structure 4838 * 4839 * Return Code: 0 if we successfully get the device id 4840 * 4841 * Context: Kernel Thread 4842 */ 4843 4844 static int 4845 sd_get_devid(struct sd_lun *un) 4846 { 4847 struct dk_devid *dkdevid; 4848 ddi_devid_t tmpid; 4849 uint_t *ip; 4850 size_t sz; 4851 diskaddr_t blk; 4852 int status; 4853 int chksum; 4854 int i; 4855 size_t buffer_size; 4856 4857 ASSERT(un != NULL); 4858 ASSERT(mutex_owned(SD_MUTEX(un))); 4859 4860 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4861 un); 4862 4863 if (un->un_devid != NULL) { 4864 return (0); 4865 } 4866 4867 mutex_exit(SD_MUTEX(un)); 4868 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4869 (void *)SD_PATH_DIRECT) != 0) { 4870 mutex_enter(SD_MUTEX(un)); 4871 return (EINVAL); 4872 } 4873 4874 /* 4875 * Read and verify device id, stored in the reserved cylinders at the 4876 * end of the disk. Backup label is on the odd sectors of the last 4877 * track of the last cylinder. Device id will be on track of the next 4878 * to last cylinder. 4879 */ 4880 mutex_enter(SD_MUTEX(un)); 4881 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4882 mutex_exit(SD_MUTEX(un)); 4883 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4884 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4885 SD_PATH_DIRECT); 4886 if (status != 0) { 4887 goto error; 4888 } 4889 4890 /* Validate the revision */ 4891 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4892 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4893 status = EINVAL; 4894 goto error; 4895 } 4896 4897 /* Calculate the checksum */ 4898 chksum = 0; 4899 ip = (uint_t *)dkdevid; 4900 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4901 i++) { 4902 chksum ^= ip[i]; 4903 } 4904 4905 /* Compare the checksums */ 4906 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4907 status = EINVAL; 4908 goto error; 4909 } 4910 4911 /* Validate the device id */ 4912 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4913 status = EINVAL; 4914 goto error; 4915 } 4916 4917 /* 4918 * Store the device id in the driver soft state 4919 */ 4920 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4921 tmpid = kmem_alloc(sz, KM_SLEEP); 4922 4923 mutex_enter(SD_MUTEX(un)); 4924 4925 un->un_devid = tmpid; 4926 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4927 4928 kmem_free(dkdevid, buffer_size); 4929 4930 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4931 4932 return (status); 4933 error: 4934 mutex_enter(SD_MUTEX(un)); 4935 kmem_free(dkdevid, buffer_size); 4936 return (status); 4937 } 4938 4939 4940 /* 4941 * Function: sd_create_devid 4942 * 4943 * Description: This routine will fabricate the device id and write it 4944 * to the disk. 4945 * 4946 * Arguments: un - driver soft state (unit) structure 4947 * 4948 * Return Code: value of the fabricated device id 4949 * 4950 * Context: Kernel Thread 4951 */ 4952 4953 static ddi_devid_t 4954 sd_create_devid(struct sd_lun *un) 4955 { 4956 ASSERT(un != NULL); 4957 4958 /* Fabricate the devid */ 4959 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4960 == DDI_FAILURE) { 4961 return (NULL); 4962 } 4963 4964 /* Write the devid to disk */ 4965 if (sd_write_deviceid(un) != 0) { 4966 ddi_devid_free(un->un_devid); 4967 un->un_devid = NULL; 4968 } 4969 4970 return (un->un_devid); 4971 } 4972 4973 4974 /* 4975 * Function: sd_write_deviceid 4976 * 4977 * Description: This routine will write the device id to the disk 4978 * reserved sector. 4979 * 4980 * Arguments: un - driver soft state (unit) structure 4981 * 4982 * Return Code: EINVAL 4983 * value returned by sd_send_scsi_cmd 4984 * 4985 * Context: Kernel Thread 4986 */ 4987 4988 static int 4989 sd_write_deviceid(struct sd_lun *un) 4990 { 4991 struct dk_devid *dkdevid; 4992 diskaddr_t blk; 4993 uint_t *ip, chksum; 4994 int status; 4995 int i; 4996 4997 ASSERT(mutex_owned(SD_MUTEX(un))); 4998 4999 mutex_exit(SD_MUTEX(un)); 5000 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5001 (void *)SD_PATH_DIRECT) != 0) { 5002 mutex_enter(SD_MUTEX(un)); 5003 return (-1); 5004 } 5005 5006 5007 /* Allocate the buffer */ 5008 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5009 5010 /* Fill in the revision */ 5011 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5012 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5013 5014 /* Copy in the device id */ 5015 mutex_enter(SD_MUTEX(un)); 5016 bcopy(un->un_devid, &dkdevid->dkd_devid, 5017 ddi_devid_sizeof(un->un_devid)); 5018 mutex_exit(SD_MUTEX(un)); 5019 5020 /* Calculate the checksum */ 5021 chksum = 0; 5022 ip = (uint_t *)dkdevid; 5023 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5024 i++) { 5025 chksum ^= ip[i]; 5026 } 5027 5028 /* Fill-in checksum */ 5029 DKD_FORMCHKSUM(chksum, dkdevid); 5030 5031 /* Write the reserved sector */ 5032 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5033 SD_PATH_DIRECT); 5034 5035 kmem_free(dkdevid, un->un_sys_blocksize); 5036 5037 mutex_enter(SD_MUTEX(un)); 5038 return (status); 5039 } 5040 5041 5042 /* 5043 * Function: sd_check_vpd_page_support 5044 * 5045 * Description: This routine sends an inquiry command with the EVPD bit set and 5046 * a page code of 0x00 to the device. It is used to determine which 5047 * vital product pages are availible to find the devid. We are 5048 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5049 * device does not support that command. 5050 * 5051 * Arguments: un - driver soft state (unit) structure 5052 * 5053 * Return Code: 0 - success 5054 * 1 - check condition 5055 * 5056 * Context: This routine can sleep. 5057 */ 5058 5059 static int 5060 sd_check_vpd_page_support(struct sd_lun *un) 5061 { 5062 uchar_t *page_list = NULL; 5063 uchar_t page_length = 0xff; /* Use max possible length */ 5064 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5065 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5066 int rval = 0; 5067 int counter; 5068 5069 ASSERT(un != NULL); 5070 ASSERT(mutex_owned(SD_MUTEX(un))); 5071 5072 mutex_exit(SD_MUTEX(un)); 5073 5074 /* 5075 * We'll set the page length to the maximum to save figuring it out 5076 * with an additional call. 5077 */ 5078 page_list = kmem_zalloc(page_length, KM_SLEEP); 5079 5080 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5081 page_code, NULL); 5082 5083 mutex_enter(SD_MUTEX(un)); 5084 5085 /* 5086 * Now we must validate that the device accepted the command, as some 5087 * drives do not support it. If the drive does support it, we will 5088 * return 0, and the supported pages will be in un_vpd_page_mask. If 5089 * not, we return -1. 5090 */ 5091 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5092 /* Loop to find one of the 2 pages we need */ 5093 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5094 5095 /* 5096 * Pages are returned in ascending order, and 0x83 is what we 5097 * are hoping for. 5098 */ 5099 while ((page_list[counter] <= 0x86) && 5100 (counter <= (page_list[VPD_PAGE_LENGTH] + 5101 VPD_HEAD_OFFSET))) { 5102 /* 5103 * Add 3 because page_list[3] is the number of 5104 * pages minus 3 5105 */ 5106 5107 switch (page_list[counter]) { 5108 case 0x00: 5109 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5110 break; 5111 case 0x80: 5112 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5113 break; 5114 case 0x81: 5115 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5116 break; 5117 case 0x82: 5118 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5119 break; 5120 case 0x83: 5121 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5122 break; 5123 case 0x86: 5124 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5125 break; 5126 } 5127 counter++; 5128 } 5129 5130 } else { 5131 rval = -1; 5132 5133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5134 "sd_check_vpd_page_support: This drive does not implement " 5135 "VPD pages.\n"); 5136 } 5137 5138 kmem_free(page_list, page_length); 5139 5140 return (rval); 5141 } 5142 5143 5144 /* 5145 * Function: sd_setup_pm 5146 * 5147 * Description: Initialize Power Management on the device 5148 * 5149 * Context: Kernel Thread 5150 */ 5151 5152 static void 5153 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5154 { 5155 uint_t log_page_size; 5156 uchar_t *log_page_data; 5157 int rval; 5158 5159 /* 5160 * Since we are called from attach, holding a mutex for 5161 * un is unnecessary. Because some of the routines called 5162 * from here require SD_MUTEX to not be held, assert this 5163 * right up front. 5164 */ 5165 ASSERT(!mutex_owned(SD_MUTEX(un))); 5166 /* 5167 * Since the sd device does not have the 'reg' property, 5168 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5169 * The following code is to tell cpr that this device 5170 * DOES need to be suspended and resumed. 5171 */ 5172 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5173 "pm-hardware-state", "needs-suspend-resume"); 5174 5175 /* 5176 * This complies with the new power management framework 5177 * for certain desktop machines. Create the pm_components 5178 * property as a string array property. 5179 */ 5180 if (un->un_f_pm_supported) { 5181 /* 5182 * not all devices have a motor, try it first. 5183 * some devices may return ILLEGAL REQUEST, some 5184 * will hang 5185 * The following START_STOP_UNIT is used to check if target 5186 * device has a motor. 5187 */ 5188 un->un_f_start_stop_supported = TRUE; 5189 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5190 SD_PATH_DIRECT) != 0) { 5191 un->un_f_start_stop_supported = FALSE; 5192 } 5193 5194 /* 5195 * create pm properties anyways otherwise the parent can't 5196 * go to sleep 5197 */ 5198 (void) sd_create_pm_components(devi, un); 5199 un->un_f_pm_is_enabled = TRUE; 5200 return; 5201 } 5202 5203 if (!un->un_f_log_sense_supported) { 5204 un->un_power_level = SD_SPINDLE_ON; 5205 un->un_f_pm_is_enabled = FALSE; 5206 return; 5207 } 5208 5209 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5210 5211 #ifdef SDDEBUG 5212 if (sd_force_pm_supported) { 5213 /* Force a successful result */ 5214 rval = 1; 5215 } 5216 #endif 5217 5218 /* 5219 * If the start-stop cycle counter log page is not supported 5220 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5221 * then we should not create the pm_components property. 5222 */ 5223 if (rval == -1) { 5224 /* 5225 * Error. 5226 * Reading log sense failed, most likely this is 5227 * an older drive that does not support log sense. 5228 * If this fails auto-pm is not supported. 5229 */ 5230 un->un_power_level = SD_SPINDLE_ON; 5231 un->un_f_pm_is_enabled = FALSE; 5232 5233 } else if (rval == 0) { 5234 /* 5235 * Page not found. 5236 * The start stop cycle counter is implemented as page 5237 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5238 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5239 */ 5240 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5241 /* 5242 * Page found, use this one. 5243 */ 5244 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5245 un->un_f_pm_is_enabled = TRUE; 5246 } else { 5247 /* 5248 * Error or page not found. 5249 * auto-pm is not supported for this device. 5250 */ 5251 un->un_power_level = SD_SPINDLE_ON; 5252 un->un_f_pm_is_enabled = FALSE; 5253 } 5254 } else { 5255 /* 5256 * Page found, use it. 5257 */ 5258 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5259 un->un_f_pm_is_enabled = TRUE; 5260 } 5261 5262 5263 if (un->un_f_pm_is_enabled == TRUE) { 5264 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5265 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5266 5267 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5268 log_page_size, un->un_start_stop_cycle_page, 5269 0x01, 0, SD_PATH_DIRECT); 5270 #ifdef SDDEBUG 5271 if (sd_force_pm_supported) { 5272 /* Force a successful result */ 5273 rval = 0; 5274 } 5275 #endif 5276 5277 /* 5278 * If the Log sense for Page( Start/stop cycle counter page) 5279 * succeeds, then power managment is supported and we can 5280 * enable auto-pm. 5281 */ 5282 if (rval == 0) { 5283 (void) sd_create_pm_components(devi, un); 5284 } else { 5285 un->un_power_level = SD_SPINDLE_ON; 5286 un->un_f_pm_is_enabled = FALSE; 5287 } 5288 5289 kmem_free(log_page_data, log_page_size); 5290 } 5291 } 5292 5293 5294 /* 5295 * Function: sd_create_pm_components 5296 * 5297 * Description: Initialize PM property. 5298 * 5299 * Context: Kernel thread context 5300 */ 5301 5302 static void 5303 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5304 { 5305 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5306 5307 ASSERT(!mutex_owned(SD_MUTEX(un))); 5308 5309 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5310 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5311 /* 5312 * When components are initially created they are idle, 5313 * power up any non-removables. 5314 * Note: the return value of pm_raise_power can't be used 5315 * for determining if PM should be enabled for this device. 5316 * Even if you check the return values and remove this 5317 * property created above, the PM framework will not honor the 5318 * change after the first call to pm_raise_power. Hence, 5319 * removal of that property does not help if pm_raise_power 5320 * fails. In the case of removable media, the start/stop 5321 * will fail if the media is not present. 5322 */ 5323 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5324 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5325 mutex_enter(SD_MUTEX(un)); 5326 un->un_power_level = SD_SPINDLE_ON; 5327 mutex_enter(&un->un_pm_mutex); 5328 /* Set to on and not busy. */ 5329 un->un_pm_count = 0; 5330 } else { 5331 mutex_enter(SD_MUTEX(un)); 5332 un->un_power_level = SD_SPINDLE_OFF; 5333 mutex_enter(&un->un_pm_mutex); 5334 /* Set to off. */ 5335 un->un_pm_count = -1; 5336 } 5337 mutex_exit(&un->un_pm_mutex); 5338 mutex_exit(SD_MUTEX(un)); 5339 } else { 5340 un->un_power_level = SD_SPINDLE_ON; 5341 un->un_f_pm_is_enabled = FALSE; 5342 } 5343 } 5344 5345 5346 /* 5347 * Function: sd_ddi_suspend 5348 * 5349 * Description: Performs system power-down operations. This includes 5350 * setting the drive state to indicate its suspended so 5351 * that no new commands will be accepted. Also, wait for 5352 * all commands that are in transport or queued to a timer 5353 * for retry to complete. All timeout threads are cancelled. 5354 * 5355 * Return Code: DDI_FAILURE or DDI_SUCCESS 5356 * 5357 * Context: Kernel thread context 5358 */ 5359 5360 static int 5361 sd_ddi_suspend(dev_info_t *devi) 5362 { 5363 struct sd_lun *un; 5364 clock_t wait_cmds_complete; 5365 5366 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5367 if (un == NULL) { 5368 return (DDI_FAILURE); 5369 } 5370 5371 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5372 5373 mutex_enter(SD_MUTEX(un)); 5374 5375 /* Return success if the device is already suspended. */ 5376 if (un->un_state == SD_STATE_SUSPENDED) { 5377 mutex_exit(SD_MUTEX(un)); 5378 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5379 "device already suspended, exiting\n"); 5380 return (DDI_SUCCESS); 5381 } 5382 5383 /* Return failure if the device is being used by HA */ 5384 if (un->un_resvd_status & 5385 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5386 mutex_exit(SD_MUTEX(un)); 5387 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5388 "device in use by HA, exiting\n"); 5389 return (DDI_FAILURE); 5390 } 5391 5392 /* 5393 * Return failure if the device is in a resource wait 5394 * or power changing state. 5395 */ 5396 if ((un->un_state == SD_STATE_RWAIT) || 5397 (un->un_state == SD_STATE_PM_CHANGING)) { 5398 mutex_exit(SD_MUTEX(un)); 5399 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5400 "device in resource wait state, exiting\n"); 5401 return (DDI_FAILURE); 5402 } 5403 5404 5405 un->un_save_state = un->un_last_state; 5406 New_state(un, SD_STATE_SUSPENDED); 5407 5408 /* 5409 * Wait for all commands that are in transport or queued to a timer 5410 * for retry to complete. 5411 * 5412 * While waiting, no new commands will be accepted or sent because of 5413 * the new state we set above. 5414 * 5415 * Wait till current operation has completed. If we are in the resource 5416 * wait state (with an intr outstanding) then we need to wait till the 5417 * intr completes and starts the next cmd. We want to wait for 5418 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5419 */ 5420 wait_cmds_complete = ddi_get_lbolt() + 5421 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5422 5423 while (un->un_ncmds_in_transport != 0) { 5424 /* 5425 * Fail if commands do not finish in the specified time. 5426 */ 5427 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5428 wait_cmds_complete) == -1) { 5429 /* 5430 * Undo the state changes made above. Everything 5431 * must go back to it's original value. 5432 */ 5433 Restore_state(un); 5434 un->un_last_state = un->un_save_state; 5435 /* Wake up any threads that might be waiting. */ 5436 cv_broadcast(&un->un_suspend_cv); 5437 mutex_exit(SD_MUTEX(un)); 5438 SD_ERROR(SD_LOG_IO_PM, un, 5439 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5440 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5441 return (DDI_FAILURE); 5442 } 5443 } 5444 5445 /* 5446 * Cancel SCSI watch thread and timeouts, if any are active 5447 */ 5448 5449 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5450 opaque_t temp_token = un->un_swr_token; 5451 mutex_exit(SD_MUTEX(un)); 5452 scsi_watch_suspend(temp_token); 5453 mutex_enter(SD_MUTEX(un)); 5454 } 5455 5456 if (un->un_reset_throttle_timeid != NULL) { 5457 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5458 un->un_reset_throttle_timeid = NULL; 5459 mutex_exit(SD_MUTEX(un)); 5460 (void) untimeout(temp_id); 5461 mutex_enter(SD_MUTEX(un)); 5462 } 5463 5464 if (un->un_dcvb_timeid != NULL) { 5465 timeout_id_t temp_id = un->un_dcvb_timeid; 5466 un->un_dcvb_timeid = NULL; 5467 mutex_exit(SD_MUTEX(un)); 5468 (void) untimeout(temp_id); 5469 mutex_enter(SD_MUTEX(un)); 5470 } 5471 5472 mutex_enter(&un->un_pm_mutex); 5473 if (un->un_pm_timeid != NULL) { 5474 timeout_id_t temp_id = un->un_pm_timeid; 5475 un->un_pm_timeid = NULL; 5476 mutex_exit(&un->un_pm_mutex); 5477 mutex_exit(SD_MUTEX(un)); 5478 (void) untimeout(temp_id); 5479 mutex_enter(SD_MUTEX(un)); 5480 } else { 5481 mutex_exit(&un->un_pm_mutex); 5482 } 5483 5484 if (un->un_retry_timeid != NULL) { 5485 timeout_id_t temp_id = un->un_retry_timeid; 5486 un->un_retry_timeid = NULL; 5487 mutex_exit(SD_MUTEX(un)); 5488 (void) untimeout(temp_id); 5489 mutex_enter(SD_MUTEX(un)); 5490 5491 if (un->un_retry_bp != NULL) { 5492 un->un_retry_bp->av_forw = un->un_waitq_headp; 5493 un->un_waitq_headp = un->un_retry_bp; 5494 if (un->un_waitq_tailp == NULL) { 5495 un->un_waitq_tailp = un->un_retry_bp; 5496 } 5497 un->un_retry_bp = NULL; 5498 un->un_retry_statp = NULL; 5499 } 5500 } 5501 5502 if (un->un_direct_priority_timeid != NULL) { 5503 timeout_id_t temp_id = un->un_direct_priority_timeid; 5504 un->un_direct_priority_timeid = NULL; 5505 mutex_exit(SD_MUTEX(un)); 5506 (void) untimeout(temp_id); 5507 mutex_enter(SD_MUTEX(un)); 5508 } 5509 5510 if (un->un_f_is_fibre == TRUE) { 5511 /* 5512 * Remove callbacks for insert and remove events 5513 */ 5514 if (un->un_insert_event != NULL) { 5515 mutex_exit(SD_MUTEX(un)); 5516 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5517 mutex_enter(SD_MUTEX(un)); 5518 un->un_insert_event = NULL; 5519 } 5520 5521 if (un->un_remove_event != NULL) { 5522 mutex_exit(SD_MUTEX(un)); 5523 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5524 mutex_enter(SD_MUTEX(un)); 5525 un->un_remove_event = NULL; 5526 } 5527 } 5528 5529 mutex_exit(SD_MUTEX(un)); 5530 5531 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5532 5533 return (DDI_SUCCESS); 5534 } 5535 5536 5537 /* 5538 * Function: sd_ddi_pm_suspend 5539 * 5540 * Description: Set the drive state to low power. 5541 * Someone else is required to actually change the drive 5542 * power level. 5543 * 5544 * Arguments: un - driver soft state (unit) structure 5545 * 5546 * Return Code: DDI_FAILURE or DDI_SUCCESS 5547 * 5548 * Context: Kernel thread context 5549 */ 5550 5551 static int 5552 sd_ddi_pm_suspend(struct sd_lun *un) 5553 { 5554 ASSERT(un != NULL); 5555 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5556 5557 ASSERT(!mutex_owned(SD_MUTEX(un))); 5558 mutex_enter(SD_MUTEX(un)); 5559 5560 /* 5561 * Exit if power management is not enabled for this device, or if 5562 * the device is being used by HA. 5563 */ 5564 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5565 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5566 mutex_exit(SD_MUTEX(un)); 5567 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5568 return (DDI_SUCCESS); 5569 } 5570 5571 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5572 un->un_ncmds_in_driver); 5573 5574 /* 5575 * See if the device is not busy, ie.: 5576 * - we have no commands in the driver for this device 5577 * - not waiting for resources 5578 */ 5579 if ((un->un_ncmds_in_driver == 0) && 5580 (un->un_state != SD_STATE_RWAIT)) { 5581 /* 5582 * The device is not busy, so it is OK to go to low power state. 5583 * Indicate low power, but rely on someone else to actually 5584 * change it. 5585 */ 5586 mutex_enter(&un->un_pm_mutex); 5587 un->un_pm_count = -1; 5588 mutex_exit(&un->un_pm_mutex); 5589 un->un_power_level = SD_SPINDLE_OFF; 5590 } 5591 5592 mutex_exit(SD_MUTEX(un)); 5593 5594 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5595 5596 return (DDI_SUCCESS); 5597 } 5598 5599 5600 /* 5601 * Function: sd_ddi_resume 5602 * 5603 * Description: Performs system power-up operations.. 5604 * 5605 * Return Code: DDI_SUCCESS 5606 * DDI_FAILURE 5607 * 5608 * Context: Kernel thread context 5609 */ 5610 5611 static int 5612 sd_ddi_resume(dev_info_t *devi) 5613 { 5614 struct sd_lun *un; 5615 5616 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5617 if (un == NULL) { 5618 return (DDI_FAILURE); 5619 } 5620 5621 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5622 5623 mutex_enter(SD_MUTEX(un)); 5624 Restore_state(un); 5625 5626 /* 5627 * Restore the state which was saved to give the 5628 * the right state in un_last_state 5629 */ 5630 un->un_last_state = un->un_save_state; 5631 /* 5632 * Note: throttle comes back at full. 5633 * Also note: this MUST be done before calling pm_raise_power 5634 * otherwise the system can get hung in biowait. The scenario where 5635 * this'll happen is under cpr suspend. Writing of the system 5636 * state goes through sddump, which writes 0 to un_throttle. If 5637 * writing the system state then fails, example if the partition is 5638 * too small, then cpr attempts a resume. If throttle isn't restored 5639 * from the saved value until after calling pm_raise_power then 5640 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5641 * in biowait. 5642 */ 5643 un->un_throttle = un->un_saved_throttle; 5644 5645 /* 5646 * The chance of failure is very rare as the only command done in power 5647 * entry point is START command when you transition from 0->1 or 5648 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5649 * which suspend was done. Ignore the return value as the resume should 5650 * not be failed. In the case of removable media the media need not be 5651 * inserted and hence there is a chance that raise power will fail with 5652 * media not present. 5653 */ 5654 if (un->un_f_attach_spinup) { 5655 mutex_exit(SD_MUTEX(un)); 5656 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5657 mutex_enter(SD_MUTEX(un)); 5658 } 5659 5660 /* 5661 * Don't broadcast to the suspend cv and therefore possibly 5662 * start I/O until after power has been restored. 5663 */ 5664 cv_broadcast(&un->un_suspend_cv); 5665 cv_broadcast(&un->un_state_cv); 5666 5667 /* restart thread */ 5668 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5669 scsi_watch_resume(un->un_swr_token); 5670 } 5671 5672 #if (defined(__fibre)) 5673 if (un->un_f_is_fibre == TRUE) { 5674 /* 5675 * Add callbacks for insert and remove events 5676 */ 5677 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5678 sd_init_event_callbacks(un); 5679 } 5680 } 5681 #endif 5682 5683 /* 5684 * Transport any pending commands to the target. 5685 * 5686 * If this is a low-activity device commands in queue will have to wait 5687 * until new commands come in, which may take awhile. Also, we 5688 * specifically don't check un_ncmds_in_transport because we know that 5689 * there really are no commands in progress after the unit was 5690 * suspended and we could have reached the throttle level, been 5691 * suspended, and have no new commands coming in for awhile. Highly 5692 * unlikely, but so is the low-activity disk scenario. 5693 */ 5694 ddi_xbuf_dispatch(un->un_xbuf_attr); 5695 5696 sd_start_cmds(un, NULL); 5697 mutex_exit(SD_MUTEX(un)); 5698 5699 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5700 5701 return (DDI_SUCCESS); 5702 } 5703 5704 5705 /* 5706 * Function: sd_ddi_pm_resume 5707 * 5708 * Description: Set the drive state to powered on. 5709 * Someone else is required to actually change the drive 5710 * power level. 5711 * 5712 * Arguments: un - driver soft state (unit) structure 5713 * 5714 * Return Code: DDI_SUCCESS 5715 * 5716 * Context: Kernel thread context 5717 */ 5718 5719 static int 5720 sd_ddi_pm_resume(struct sd_lun *un) 5721 { 5722 ASSERT(un != NULL); 5723 5724 ASSERT(!mutex_owned(SD_MUTEX(un))); 5725 mutex_enter(SD_MUTEX(un)); 5726 un->un_power_level = SD_SPINDLE_ON; 5727 5728 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5729 mutex_enter(&un->un_pm_mutex); 5730 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5731 un->un_pm_count++; 5732 ASSERT(un->un_pm_count == 0); 5733 /* 5734 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5735 * un_suspend_cv is for a system resume, not a power management 5736 * device resume. (4297749) 5737 * cv_broadcast(&un->un_suspend_cv); 5738 */ 5739 } 5740 mutex_exit(&un->un_pm_mutex); 5741 mutex_exit(SD_MUTEX(un)); 5742 5743 return (DDI_SUCCESS); 5744 } 5745 5746 5747 /* 5748 * Function: sd_pm_idletimeout_handler 5749 * 5750 * Description: A timer routine that's active only while a device is busy. 5751 * The purpose is to extend slightly the pm framework's busy 5752 * view of the device to prevent busy/idle thrashing for 5753 * back-to-back commands. Do this by comparing the current time 5754 * to the time at which the last command completed and when the 5755 * difference is greater than sd_pm_idletime, call 5756 * pm_idle_component. In addition to indicating idle to the pm 5757 * framework, update the chain type to again use the internal pm 5758 * layers of the driver. 5759 * 5760 * Arguments: arg - driver soft state (unit) structure 5761 * 5762 * Context: Executes in a timeout(9F) thread context 5763 */ 5764 5765 static void 5766 sd_pm_idletimeout_handler(void *arg) 5767 { 5768 struct sd_lun *un = arg; 5769 5770 time_t now; 5771 5772 mutex_enter(&sd_detach_mutex); 5773 if (un->un_detach_count != 0) { 5774 /* Abort if the instance is detaching */ 5775 mutex_exit(&sd_detach_mutex); 5776 return; 5777 } 5778 mutex_exit(&sd_detach_mutex); 5779 5780 now = ddi_get_time(); 5781 /* 5782 * Grab both mutexes, in the proper order, since we're accessing 5783 * both PM and softstate variables. 5784 */ 5785 mutex_enter(SD_MUTEX(un)); 5786 mutex_enter(&un->un_pm_mutex); 5787 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5788 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5789 /* 5790 * Update the chain types. 5791 * This takes affect on the next new command received. 5792 */ 5793 if (un->un_f_non_devbsize_supported) { 5794 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5795 } else { 5796 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5797 } 5798 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5799 5800 SD_TRACE(SD_LOG_IO_PM, un, 5801 "sd_pm_idletimeout_handler: idling device\n"); 5802 (void) pm_idle_component(SD_DEVINFO(un), 0); 5803 un->un_pm_idle_timeid = NULL; 5804 } else { 5805 un->un_pm_idle_timeid = 5806 timeout(sd_pm_idletimeout_handler, un, 5807 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5808 } 5809 mutex_exit(&un->un_pm_mutex); 5810 mutex_exit(SD_MUTEX(un)); 5811 } 5812 5813 5814 /* 5815 * Function: sd_pm_timeout_handler 5816 * 5817 * Description: Callback to tell framework we are idle. 5818 * 5819 * Context: timeout(9f) thread context. 5820 */ 5821 5822 static void 5823 sd_pm_timeout_handler(void *arg) 5824 { 5825 struct sd_lun *un = arg; 5826 5827 (void) pm_idle_component(SD_DEVINFO(un), 0); 5828 mutex_enter(&un->un_pm_mutex); 5829 un->un_pm_timeid = NULL; 5830 mutex_exit(&un->un_pm_mutex); 5831 } 5832 5833 5834 /* 5835 * Function: sdpower 5836 * 5837 * Description: PM entry point. 5838 * 5839 * Return Code: DDI_SUCCESS 5840 * DDI_FAILURE 5841 * 5842 * Context: Kernel thread context 5843 */ 5844 5845 static int 5846 sdpower(dev_info_t *devi, int component, int level) 5847 { 5848 struct sd_lun *un; 5849 int instance; 5850 int rval = DDI_SUCCESS; 5851 uint_t i, log_page_size, maxcycles, ncycles; 5852 uchar_t *log_page_data; 5853 int log_sense_page; 5854 int medium_present; 5855 time_t intvlp; 5856 dev_t dev; 5857 struct pm_trans_data sd_pm_tran_data; 5858 uchar_t save_state; 5859 int sval; 5860 uchar_t state_before_pm; 5861 int got_semaphore_here; 5862 5863 instance = ddi_get_instance(devi); 5864 5865 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5866 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5867 component != 0) { 5868 return (DDI_FAILURE); 5869 } 5870 5871 dev = sd_make_device(SD_DEVINFO(un)); 5872 5873 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5874 5875 /* 5876 * Must synchronize power down with close. 5877 * Attempt to decrement/acquire the open/close semaphore, 5878 * but do NOT wait on it. If it's not greater than zero, 5879 * ie. it can't be decremented without waiting, then 5880 * someone else, either open or close, already has it 5881 * and the try returns 0. Use that knowledge here to determine 5882 * if it's OK to change the device power level. 5883 * Also, only increment it on exit if it was decremented, ie. gotten, 5884 * here. 5885 */ 5886 got_semaphore_here = sema_tryp(&un->un_semoclose); 5887 5888 mutex_enter(SD_MUTEX(un)); 5889 5890 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5891 un->un_ncmds_in_driver); 5892 5893 /* 5894 * If un_ncmds_in_driver is non-zero it indicates commands are 5895 * already being processed in the driver, or if the semaphore was 5896 * not gotten here it indicates an open or close is being processed. 5897 * At the same time somebody is requesting to go low power which 5898 * can't happen, therefore we need to return failure. 5899 */ 5900 if ((level == SD_SPINDLE_OFF) && 5901 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5902 mutex_exit(SD_MUTEX(un)); 5903 5904 if (got_semaphore_here != 0) { 5905 sema_v(&un->un_semoclose); 5906 } 5907 SD_TRACE(SD_LOG_IO_PM, un, 5908 "sdpower: exit, device has queued cmds.\n"); 5909 return (DDI_FAILURE); 5910 } 5911 5912 /* 5913 * if it is OFFLINE that means the disk is completely dead 5914 * in our case we have to put the disk in on or off by sending commands 5915 * Of course that will fail anyway so return back here. 5916 * 5917 * Power changes to a device that's OFFLINE or SUSPENDED 5918 * are not allowed. 5919 */ 5920 if ((un->un_state == SD_STATE_OFFLINE) || 5921 (un->un_state == SD_STATE_SUSPENDED)) { 5922 mutex_exit(SD_MUTEX(un)); 5923 5924 if (got_semaphore_here != 0) { 5925 sema_v(&un->un_semoclose); 5926 } 5927 SD_TRACE(SD_LOG_IO_PM, un, 5928 "sdpower: exit, device is off-line.\n"); 5929 return (DDI_FAILURE); 5930 } 5931 5932 /* 5933 * Change the device's state to indicate it's power level 5934 * is being changed. Do this to prevent a power off in the 5935 * middle of commands, which is especially bad on devices 5936 * that are really powered off instead of just spun down. 5937 */ 5938 state_before_pm = un->un_state; 5939 un->un_state = SD_STATE_PM_CHANGING; 5940 5941 mutex_exit(SD_MUTEX(un)); 5942 5943 /* 5944 * If "pm-capable" property is set to TRUE by HBA drivers, 5945 * bypass the following checking, otherwise, check the log 5946 * sense information for this device 5947 */ 5948 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5949 /* 5950 * Get the log sense information to understand whether the 5951 * the powercycle counts have gone beyond the threshhold. 5952 */ 5953 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5954 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5955 5956 mutex_enter(SD_MUTEX(un)); 5957 log_sense_page = un->un_start_stop_cycle_page; 5958 mutex_exit(SD_MUTEX(un)); 5959 5960 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5961 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5962 #ifdef SDDEBUG 5963 if (sd_force_pm_supported) { 5964 /* Force a successful result */ 5965 rval = 0; 5966 } 5967 #endif 5968 if (rval != 0) { 5969 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5970 "Log Sense Failed\n"); 5971 kmem_free(log_page_data, log_page_size); 5972 /* Cannot support power management on those drives */ 5973 5974 if (got_semaphore_here != 0) { 5975 sema_v(&un->un_semoclose); 5976 } 5977 /* 5978 * On exit put the state back to it's original value 5979 * and broadcast to anyone waiting for the power 5980 * change completion. 5981 */ 5982 mutex_enter(SD_MUTEX(un)); 5983 un->un_state = state_before_pm; 5984 cv_broadcast(&un->un_suspend_cv); 5985 mutex_exit(SD_MUTEX(un)); 5986 SD_TRACE(SD_LOG_IO_PM, un, 5987 "sdpower: exit, Log Sense Failed.\n"); 5988 return (DDI_FAILURE); 5989 } 5990 5991 /* 5992 * From the page data - Convert the essential information to 5993 * pm_trans_data 5994 */ 5995 maxcycles = 5996 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5997 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5998 5999 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6000 6001 ncycles = 6002 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6003 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6004 6005 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6006 6007 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6008 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6009 log_page_data[8+i]; 6010 } 6011 6012 kmem_free(log_page_data, log_page_size); 6013 6014 /* 6015 * Call pm_trans_check routine to get the Ok from 6016 * the global policy 6017 */ 6018 6019 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6020 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6021 6022 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6023 #ifdef SDDEBUG 6024 if (sd_force_pm_supported) { 6025 /* Force a successful result */ 6026 rval = 1; 6027 } 6028 #endif 6029 switch (rval) { 6030 case 0: 6031 /* 6032 * Not Ok to Power cycle or error in parameters passed 6033 * Would have given the advised time to consider power 6034 * cycle. Based on the new intvlp parameter we are 6035 * supposed to pretend we are busy so that pm framework 6036 * will never call our power entry point. Because of 6037 * that install a timeout handler and wait for the 6038 * recommended time to elapse so that power management 6039 * can be effective again. 6040 * 6041 * To effect this behavior, call pm_busy_component to 6042 * indicate to the framework this device is busy. 6043 * By not adjusting un_pm_count the rest of PM in 6044 * the driver will function normally, and independant 6045 * of this but because the framework is told the device 6046 * is busy it won't attempt powering down until it gets 6047 * a matching idle. The timeout handler sends this. 6048 * Note: sd_pm_entry can't be called here to do this 6049 * because sdpower may have been called as a result 6050 * of a call to pm_raise_power from within sd_pm_entry. 6051 * 6052 * If a timeout handler is already active then 6053 * don't install another. 6054 */ 6055 mutex_enter(&un->un_pm_mutex); 6056 if (un->un_pm_timeid == NULL) { 6057 un->un_pm_timeid = 6058 timeout(sd_pm_timeout_handler, 6059 un, intvlp * drv_usectohz(1000000)); 6060 mutex_exit(&un->un_pm_mutex); 6061 (void) pm_busy_component(SD_DEVINFO(un), 0); 6062 } else { 6063 mutex_exit(&un->un_pm_mutex); 6064 } 6065 if (got_semaphore_here != 0) { 6066 sema_v(&un->un_semoclose); 6067 } 6068 /* 6069 * On exit put the state back to it's original value 6070 * and broadcast to anyone waiting for the power 6071 * change completion. 6072 */ 6073 mutex_enter(SD_MUTEX(un)); 6074 un->un_state = state_before_pm; 6075 cv_broadcast(&un->un_suspend_cv); 6076 mutex_exit(SD_MUTEX(un)); 6077 6078 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6079 "trans check Failed, not ok to power cycle.\n"); 6080 return (DDI_FAILURE); 6081 6082 case -1: 6083 if (got_semaphore_here != 0) { 6084 sema_v(&un->un_semoclose); 6085 } 6086 /* 6087 * On exit put the state back to it's original value 6088 * and broadcast to anyone waiting for the power 6089 * change completion. 6090 */ 6091 mutex_enter(SD_MUTEX(un)); 6092 un->un_state = state_before_pm; 6093 cv_broadcast(&un->un_suspend_cv); 6094 mutex_exit(SD_MUTEX(un)); 6095 SD_TRACE(SD_LOG_IO_PM, un, 6096 "sdpower: exit, trans check command Failed.\n"); 6097 return (DDI_FAILURE); 6098 } 6099 } 6100 6101 if (level == SD_SPINDLE_OFF) { 6102 /* 6103 * Save the last state... if the STOP FAILS we need it 6104 * for restoring 6105 */ 6106 mutex_enter(SD_MUTEX(un)); 6107 save_state = un->un_last_state; 6108 /* 6109 * There must not be any cmds. getting processed 6110 * in the driver when we get here. Power to the 6111 * device is potentially going off. 6112 */ 6113 ASSERT(un->un_ncmds_in_driver == 0); 6114 mutex_exit(SD_MUTEX(un)); 6115 6116 /* 6117 * For now suspend the device completely before spindle is 6118 * turned off 6119 */ 6120 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6121 if (got_semaphore_here != 0) { 6122 sema_v(&un->un_semoclose); 6123 } 6124 /* 6125 * On exit put the state back to it's original value 6126 * and broadcast to anyone waiting for the power 6127 * change completion. 6128 */ 6129 mutex_enter(SD_MUTEX(un)); 6130 un->un_state = state_before_pm; 6131 cv_broadcast(&un->un_suspend_cv); 6132 mutex_exit(SD_MUTEX(un)); 6133 SD_TRACE(SD_LOG_IO_PM, un, 6134 "sdpower: exit, PM suspend Failed.\n"); 6135 return (DDI_FAILURE); 6136 } 6137 } 6138 6139 /* 6140 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6141 * close, or strategy. Dump no long uses this routine, it uses it's 6142 * own code so it can be done in polled mode. 6143 */ 6144 6145 medium_present = TRUE; 6146 6147 /* 6148 * When powering up, issue a TUR in case the device is at unit 6149 * attention. Don't do retries. Bypass the PM layer, otherwise 6150 * a deadlock on un_pm_busy_cv will occur. 6151 */ 6152 if (level == SD_SPINDLE_ON) { 6153 (void) sd_send_scsi_TEST_UNIT_READY(un, 6154 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6155 } 6156 6157 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6158 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6159 6160 sval = sd_send_scsi_START_STOP_UNIT(un, 6161 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6162 SD_PATH_DIRECT); 6163 /* Command failed, check for media present. */ 6164 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6165 medium_present = FALSE; 6166 } 6167 6168 /* 6169 * The conditions of interest here are: 6170 * if a spindle off with media present fails, 6171 * then restore the state and return an error. 6172 * else if a spindle on fails, 6173 * then return an error (there's no state to restore). 6174 * In all other cases we setup for the new state 6175 * and return success. 6176 */ 6177 switch (level) { 6178 case SD_SPINDLE_OFF: 6179 if ((medium_present == TRUE) && (sval != 0)) { 6180 /* The stop command from above failed */ 6181 rval = DDI_FAILURE; 6182 /* 6183 * The stop command failed, and we have media 6184 * present. Put the level back by calling the 6185 * sd_pm_resume() and set the state back to 6186 * it's previous value. 6187 */ 6188 (void) sd_ddi_pm_resume(un); 6189 mutex_enter(SD_MUTEX(un)); 6190 un->un_last_state = save_state; 6191 mutex_exit(SD_MUTEX(un)); 6192 break; 6193 } 6194 /* 6195 * The stop command from above succeeded. 6196 */ 6197 if (un->un_f_monitor_media_state) { 6198 /* 6199 * Terminate watch thread in case of removable media 6200 * devices going into low power state. This is as per 6201 * the requirements of pm framework, otherwise commands 6202 * will be generated for the device (through watch 6203 * thread), even when the device is in low power state. 6204 */ 6205 mutex_enter(SD_MUTEX(un)); 6206 un->un_f_watcht_stopped = FALSE; 6207 if (un->un_swr_token != NULL) { 6208 opaque_t temp_token = un->un_swr_token; 6209 un->un_f_watcht_stopped = TRUE; 6210 un->un_swr_token = NULL; 6211 mutex_exit(SD_MUTEX(un)); 6212 (void) scsi_watch_request_terminate(temp_token, 6213 SCSI_WATCH_TERMINATE_WAIT); 6214 } else { 6215 mutex_exit(SD_MUTEX(un)); 6216 } 6217 } 6218 break; 6219 6220 default: /* The level requested is spindle on... */ 6221 /* 6222 * Legacy behavior: return success on a failed spinup 6223 * if there is no media in the drive. 6224 * Do this by looking at medium_present here. 6225 */ 6226 if ((sval != 0) && medium_present) { 6227 /* The start command from above failed */ 6228 rval = DDI_FAILURE; 6229 break; 6230 } 6231 /* 6232 * The start command from above succeeded 6233 * Resume the devices now that we have 6234 * started the disks 6235 */ 6236 (void) sd_ddi_pm_resume(un); 6237 6238 /* 6239 * Resume the watch thread since it was suspended 6240 * when the device went into low power mode. 6241 */ 6242 if (un->un_f_monitor_media_state) { 6243 mutex_enter(SD_MUTEX(un)); 6244 if (un->un_f_watcht_stopped == TRUE) { 6245 opaque_t temp_token; 6246 6247 un->un_f_watcht_stopped = FALSE; 6248 mutex_exit(SD_MUTEX(un)); 6249 temp_token = scsi_watch_request_submit( 6250 SD_SCSI_DEVP(un), 6251 sd_check_media_time, 6252 SENSE_LENGTH, sd_media_watch_cb, 6253 (caddr_t)dev); 6254 mutex_enter(SD_MUTEX(un)); 6255 un->un_swr_token = temp_token; 6256 } 6257 mutex_exit(SD_MUTEX(un)); 6258 } 6259 } 6260 if (got_semaphore_here != 0) { 6261 sema_v(&un->un_semoclose); 6262 } 6263 /* 6264 * On exit put the state back to it's original value 6265 * and broadcast to anyone waiting for the power 6266 * change completion. 6267 */ 6268 mutex_enter(SD_MUTEX(un)); 6269 un->un_state = state_before_pm; 6270 cv_broadcast(&un->un_suspend_cv); 6271 mutex_exit(SD_MUTEX(un)); 6272 6273 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6274 6275 return (rval); 6276 } 6277 6278 6279 6280 /* 6281 * Function: sdattach 6282 * 6283 * Description: Driver's attach(9e) entry point function. 6284 * 6285 * Arguments: devi - opaque device info handle 6286 * cmd - attach type 6287 * 6288 * Return Code: DDI_SUCCESS 6289 * DDI_FAILURE 6290 * 6291 * Context: Kernel thread context 6292 */ 6293 6294 static int 6295 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6296 { 6297 switch (cmd) { 6298 case DDI_ATTACH: 6299 return (sd_unit_attach(devi)); 6300 case DDI_RESUME: 6301 return (sd_ddi_resume(devi)); 6302 default: 6303 break; 6304 } 6305 return (DDI_FAILURE); 6306 } 6307 6308 6309 /* 6310 * Function: sddetach 6311 * 6312 * Description: Driver's detach(9E) entry point function. 6313 * 6314 * Arguments: devi - opaque device info handle 6315 * cmd - detach type 6316 * 6317 * Return Code: DDI_SUCCESS 6318 * DDI_FAILURE 6319 * 6320 * Context: Kernel thread context 6321 */ 6322 6323 static int 6324 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6325 { 6326 switch (cmd) { 6327 case DDI_DETACH: 6328 return (sd_unit_detach(devi)); 6329 case DDI_SUSPEND: 6330 return (sd_ddi_suspend(devi)); 6331 default: 6332 break; 6333 } 6334 return (DDI_FAILURE); 6335 } 6336 6337 6338 /* 6339 * Function: sd_sync_with_callback 6340 * 6341 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6342 * state while the callback routine is active. 6343 * 6344 * Arguments: un: softstate structure for the instance 6345 * 6346 * Context: Kernel thread context 6347 */ 6348 6349 static void 6350 sd_sync_with_callback(struct sd_lun *un) 6351 { 6352 ASSERT(un != NULL); 6353 6354 mutex_enter(SD_MUTEX(un)); 6355 6356 ASSERT(un->un_in_callback >= 0); 6357 6358 while (un->un_in_callback > 0) { 6359 mutex_exit(SD_MUTEX(un)); 6360 delay(2); 6361 mutex_enter(SD_MUTEX(un)); 6362 } 6363 6364 mutex_exit(SD_MUTEX(un)); 6365 } 6366 6367 /* 6368 * Function: sd_unit_attach 6369 * 6370 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6371 * the soft state structure for the device and performs 6372 * all necessary structure and device initializations. 6373 * 6374 * Arguments: devi: the system's dev_info_t for the device. 6375 * 6376 * Return Code: DDI_SUCCESS if attach is successful. 6377 * DDI_FAILURE if any part of the attach fails. 6378 * 6379 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6380 * Kernel thread context only. Can sleep. 6381 */ 6382 6383 static int 6384 sd_unit_attach(dev_info_t *devi) 6385 { 6386 struct scsi_device *devp; 6387 struct sd_lun *un; 6388 char *variantp; 6389 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6390 int instance; 6391 int rval; 6392 int wc_enabled; 6393 int tgt; 6394 uint64_t capacity; 6395 uint_t lbasize = 0; 6396 dev_info_t *pdip = ddi_get_parent(devi); 6397 int offbyone = 0; 6398 int geom_label_valid = 0; 6399 #if defined(__sparc) 6400 int max_xfer_size; 6401 #endif 6402 6403 /* 6404 * Retrieve the target driver's private data area. This was set 6405 * up by the HBA. 6406 */ 6407 devp = ddi_get_driver_private(devi); 6408 6409 /* 6410 * Retrieve the target ID of the device. 6411 */ 6412 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6413 SCSI_ADDR_PROP_TARGET, -1); 6414 6415 /* 6416 * Since we have no idea what state things were left in by the last 6417 * user of the device, set up some 'default' settings, ie. turn 'em 6418 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6419 * Do this before the scsi_probe, which sends an inquiry. 6420 * This is a fix for bug (4430280). 6421 * Of special importance is wide-xfer. The drive could have been left 6422 * in wide transfer mode by the last driver to communicate with it, 6423 * this includes us. If that's the case, and if the following is not 6424 * setup properly or we don't re-negotiate with the drive prior to 6425 * transferring data to/from the drive, it causes bus parity errors, 6426 * data overruns, and unexpected interrupts. This first occurred when 6427 * the fix for bug (4378686) was made. 6428 */ 6429 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6430 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6431 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6432 6433 /* 6434 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6435 * on a target. Setting it per lun instance actually sets the 6436 * capability of this target, which affects those luns already 6437 * attached on the same target. So during attach, we can only disable 6438 * this capability only when no other lun has been attached on this 6439 * target. By doing this, we assume a target has the same tagged-qing 6440 * capability for every lun. The condition can be removed when HBA 6441 * is changed to support per lun based tagged-qing capability. 6442 */ 6443 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6444 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6445 } 6446 6447 /* 6448 * Use scsi_probe() to issue an INQUIRY command to the device. 6449 * This call will allocate and fill in the scsi_inquiry structure 6450 * and point the sd_inq member of the scsi_device structure to it. 6451 * If the attach succeeds, then this memory will not be de-allocated 6452 * (via scsi_unprobe()) until the instance is detached. 6453 */ 6454 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6455 goto probe_failed; 6456 } 6457 6458 /* 6459 * Check the device type as specified in the inquiry data and 6460 * claim it if it is of a type that we support. 6461 */ 6462 switch (devp->sd_inq->inq_dtype) { 6463 case DTYPE_DIRECT: 6464 break; 6465 case DTYPE_RODIRECT: 6466 break; 6467 case DTYPE_OPTICAL: 6468 break; 6469 case DTYPE_NOTPRESENT: 6470 default: 6471 /* Unsupported device type; fail the attach. */ 6472 goto probe_failed; 6473 } 6474 6475 /* 6476 * Allocate the soft state structure for this unit. 6477 * 6478 * We rely upon this memory being set to all zeroes by 6479 * ddi_soft_state_zalloc(). We assume that any member of the 6480 * soft state structure that is not explicitly initialized by 6481 * this routine will have a value of zero. 6482 */ 6483 instance = ddi_get_instance(devp->sd_dev); 6484 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6485 goto probe_failed; 6486 } 6487 6488 /* 6489 * Retrieve a pointer to the newly-allocated soft state. 6490 * 6491 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6492 * was successful, unless something has gone horribly wrong and the 6493 * ddi's soft state internals are corrupt (in which case it is 6494 * probably better to halt here than just fail the attach....) 6495 */ 6496 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6497 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6498 instance); 6499 /*NOTREACHED*/ 6500 } 6501 6502 /* 6503 * Link the back ptr of the driver soft state to the scsi_device 6504 * struct for this lun. 6505 * Save a pointer to the softstate in the driver-private area of 6506 * the scsi_device struct. 6507 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6508 * we first set un->un_sd below. 6509 */ 6510 un->un_sd = devp; 6511 devp->sd_private = (opaque_t)un; 6512 6513 /* 6514 * The following must be after devp is stored in the soft state struct. 6515 */ 6516 #ifdef SDDEBUG 6517 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6518 "%s_unit_attach: un:0x%p instance:%d\n", 6519 ddi_driver_name(devi), un, instance); 6520 #endif 6521 6522 /* 6523 * Set up the device type and node type (for the minor nodes). 6524 * By default we assume that the device can at least support the 6525 * Common Command Set. Call it a CD-ROM if it reports itself 6526 * as a RODIRECT device. 6527 */ 6528 switch (devp->sd_inq->inq_dtype) { 6529 case DTYPE_RODIRECT: 6530 un->un_node_type = DDI_NT_CD_CHAN; 6531 un->un_ctype = CTYPE_CDROM; 6532 break; 6533 case DTYPE_OPTICAL: 6534 un->un_node_type = DDI_NT_BLOCK_CHAN; 6535 un->un_ctype = CTYPE_ROD; 6536 break; 6537 default: 6538 un->un_node_type = DDI_NT_BLOCK_CHAN; 6539 un->un_ctype = CTYPE_CCS; 6540 break; 6541 } 6542 6543 /* 6544 * Try to read the interconnect type from the HBA. 6545 * 6546 * Note: This driver is currently compiled as two binaries, a parallel 6547 * scsi version (sd) and a fibre channel version (ssd). All functional 6548 * differences are determined at compile time. In the future a single 6549 * binary will be provided and the inteconnect type will be used to 6550 * differentiate between fibre and parallel scsi behaviors. At that time 6551 * it will be necessary for all fibre channel HBAs to support this 6552 * property. 6553 * 6554 * set un_f_is_fiber to TRUE ( default fiber ) 6555 */ 6556 un->un_f_is_fibre = TRUE; 6557 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6558 case INTERCONNECT_SSA: 6559 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6560 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6561 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6562 break; 6563 case INTERCONNECT_PARALLEL: 6564 un->un_f_is_fibre = FALSE; 6565 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6566 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6567 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6568 break; 6569 case INTERCONNECT_SATA: 6570 un->un_f_is_fibre = FALSE; 6571 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6572 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6573 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6574 break; 6575 case INTERCONNECT_FIBRE: 6576 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6577 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6578 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6579 break; 6580 case INTERCONNECT_FABRIC: 6581 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6582 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6583 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6584 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6585 break; 6586 default: 6587 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6588 /* 6589 * The HBA does not support the "interconnect-type" property 6590 * (or did not provide a recognized type). 6591 * 6592 * Note: This will be obsoleted when a single fibre channel 6593 * and parallel scsi driver is delivered. In the meantime the 6594 * interconnect type will be set to the platform default.If that 6595 * type is not parallel SCSI, it means that we should be 6596 * assuming "ssd" semantics. However, here this also means that 6597 * the FC HBA is not supporting the "interconnect-type" property 6598 * like we expect it to, so log this occurrence. 6599 */ 6600 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6601 if (!SD_IS_PARALLEL_SCSI(un)) { 6602 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6603 "sd_unit_attach: un:0x%p Assuming " 6604 "INTERCONNECT_FIBRE\n", un); 6605 } else { 6606 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6607 "sd_unit_attach: un:0x%p Assuming " 6608 "INTERCONNECT_PARALLEL\n", un); 6609 un->un_f_is_fibre = FALSE; 6610 } 6611 #else 6612 /* 6613 * Note: This source will be implemented when a single fibre 6614 * channel and parallel scsi driver is delivered. The default 6615 * will be to assume that if a device does not support the 6616 * "interconnect-type" property it is a parallel SCSI HBA and 6617 * we will set the interconnect type for parallel scsi. 6618 */ 6619 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6620 un->un_f_is_fibre = FALSE; 6621 #endif 6622 break; 6623 } 6624 6625 if (un->un_f_is_fibre == TRUE) { 6626 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6627 SCSI_VERSION_3) { 6628 switch (un->un_interconnect_type) { 6629 case SD_INTERCONNECT_FIBRE: 6630 case SD_INTERCONNECT_SSA: 6631 un->un_node_type = DDI_NT_BLOCK_WWN; 6632 break; 6633 default: 6634 break; 6635 } 6636 } 6637 } 6638 6639 /* 6640 * Initialize the Request Sense command for the target 6641 */ 6642 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6643 goto alloc_rqs_failed; 6644 } 6645 6646 /* 6647 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6648 * with separate binary for sd and ssd. 6649 * 6650 * x86 has 1 binary, un_retry_count is set base on connection type. 6651 * The hardcoded values will go away when Sparc uses 1 binary 6652 * for sd and ssd. This hardcoded values need to match 6653 * SD_RETRY_COUNT in sddef.h 6654 * The value used is base on interconnect type. 6655 * fibre = 3, parallel = 5 6656 */ 6657 #if defined(__i386) || defined(__amd64) 6658 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6659 #else 6660 un->un_retry_count = SD_RETRY_COUNT; 6661 #endif 6662 6663 /* 6664 * Set the per disk retry count to the default number of retries 6665 * for disks and CDROMs. This value can be overridden by the 6666 * disk property list or an entry in sd.conf. 6667 */ 6668 un->un_notready_retry_count = 6669 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6670 : DISK_NOT_READY_RETRY_COUNT(un); 6671 6672 /* 6673 * Set the busy retry count to the default value of un_retry_count. 6674 * This can be overridden by entries in sd.conf or the device 6675 * config table. 6676 */ 6677 un->un_busy_retry_count = un->un_retry_count; 6678 6679 /* 6680 * Init the reset threshold for retries. This number determines 6681 * how many retries must be performed before a reset can be issued 6682 * (for certain error conditions). This can be overridden by entries 6683 * in sd.conf or the device config table. 6684 */ 6685 un->un_reset_retry_count = (un->un_retry_count / 2); 6686 6687 /* 6688 * Set the victim_retry_count to the default un_retry_count 6689 */ 6690 un->un_victim_retry_count = (2 * un->un_retry_count); 6691 6692 /* 6693 * Set the reservation release timeout to the default value of 6694 * 5 seconds. This can be overridden by entries in ssd.conf or the 6695 * device config table. 6696 */ 6697 un->un_reserve_release_time = 5; 6698 6699 /* 6700 * Set up the default maximum transfer size. Note that this may 6701 * get updated later in the attach, when setting up default wide 6702 * operations for disks. 6703 */ 6704 #if defined(__i386) || defined(__amd64) 6705 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6706 un->un_partial_dma_supported = 1; 6707 #else 6708 un->un_max_xfer_size = (uint_t)maxphys; 6709 #endif 6710 6711 /* 6712 * Get "allow bus device reset" property (defaults to "enabled" if 6713 * the property was not defined). This is to disable bus resets for 6714 * certain kinds of error recovery. Note: In the future when a run-time 6715 * fibre check is available the soft state flag should default to 6716 * enabled. 6717 */ 6718 if (un->un_f_is_fibre == TRUE) { 6719 un->un_f_allow_bus_device_reset = TRUE; 6720 } else { 6721 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6722 "allow-bus-device-reset", 1) != 0) { 6723 un->un_f_allow_bus_device_reset = TRUE; 6724 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6725 "sd_unit_attach: un:0x%p Bus device reset " 6726 "enabled\n", un); 6727 } else { 6728 un->un_f_allow_bus_device_reset = FALSE; 6729 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6730 "sd_unit_attach: un:0x%p Bus device reset " 6731 "disabled\n", un); 6732 } 6733 } 6734 6735 /* 6736 * Check if this is an ATAPI device. ATAPI devices use Group 1 6737 * Read/Write commands and Group 2 Mode Sense/Select commands. 6738 * 6739 * Note: The "obsolete" way of doing this is to check for the "atapi" 6740 * property. The new "variant" property with a value of "atapi" has been 6741 * introduced so that future 'variants' of standard SCSI behavior (like 6742 * atapi) could be specified by the underlying HBA drivers by supplying 6743 * a new value for the "variant" property, instead of having to define a 6744 * new property. 6745 */ 6746 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6747 un->un_f_cfg_is_atapi = TRUE; 6748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6749 "sd_unit_attach: un:0x%p Atapi device\n", un); 6750 } 6751 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6752 &variantp) == DDI_PROP_SUCCESS) { 6753 if (strcmp(variantp, "atapi") == 0) { 6754 un->un_f_cfg_is_atapi = TRUE; 6755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6756 "sd_unit_attach: un:0x%p Atapi device\n", un); 6757 } 6758 ddi_prop_free(variantp); 6759 } 6760 6761 un->un_cmd_timeout = SD_IO_TIME; 6762 6763 /* Info on current states, statuses, etc. (Updated frequently) */ 6764 un->un_state = SD_STATE_NORMAL; 6765 un->un_last_state = SD_STATE_NORMAL; 6766 6767 /* Control & status info for command throttling */ 6768 un->un_throttle = sd_max_throttle; 6769 un->un_saved_throttle = sd_max_throttle; 6770 un->un_min_throttle = sd_min_throttle; 6771 6772 if (un->un_f_is_fibre == TRUE) { 6773 un->un_f_use_adaptive_throttle = TRUE; 6774 } else { 6775 un->un_f_use_adaptive_throttle = FALSE; 6776 } 6777 6778 /* Removable media support. */ 6779 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6780 un->un_mediastate = DKIO_NONE; 6781 un->un_specified_mediastate = DKIO_NONE; 6782 6783 /* CVs for suspend/resume (PM or DR) */ 6784 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6785 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6786 6787 /* Power management support. */ 6788 un->un_power_level = SD_SPINDLE_UNINIT; 6789 6790 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6791 un->un_f_wcc_inprog = 0; 6792 6793 /* 6794 * The open/close semaphore is used to serialize threads executing 6795 * in the driver's open & close entry point routines for a given 6796 * instance. 6797 */ 6798 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6799 6800 /* 6801 * The conf file entry and softstate variable is a forceful override, 6802 * meaning a non-zero value must be entered to change the default. 6803 */ 6804 un->un_f_disksort_disabled = FALSE; 6805 6806 /* 6807 * Retrieve the properties from the static driver table or the driver 6808 * configuration file (.conf) for this unit and update the soft state 6809 * for the device as needed for the indicated properties. 6810 * Note: the property configuration needs to occur here as some of the 6811 * following routines may have dependancies on soft state flags set 6812 * as part of the driver property configuration. 6813 */ 6814 sd_read_unit_properties(un); 6815 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6816 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6817 6818 /* 6819 * Only if a device has "hotpluggable" property, it is 6820 * treated as hotpluggable device. Otherwise, it is 6821 * regarded as non-hotpluggable one. 6822 */ 6823 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6824 -1) != -1) { 6825 un->un_f_is_hotpluggable = TRUE; 6826 } 6827 6828 /* 6829 * set unit's attributes(flags) according to "hotpluggable" and 6830 * RMB bit in INQUIRY data. 6831 */ 6832 sd_set_unit_attributes(un, devi); 6833 6834 /* 6835 * By default, we mark the capacity, lbasize, and geometry 6836 * as invalid. Only if we successfully read a valid capacity 6837 * will we update the un_blockcount and un_tgt_blocksize with the 6838 * valid values (the geometry will be validated later). 6839 */ 6840 un->un_f_blockcount_is_valid = FALSE; 6841 un->un_f_tgt_blocksize_is_valid = FALSE; 6842 6843 /* 6844 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6845 * otherwise. 6846 */ 6847 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6848 un->un_blockcount = 0; 6849 6850 /* 6851 * Set up the per-instance info needed to determine the correct 6852 * CDBs and other info for issuing commands to the target. 6853 */ 6854 sd_init_cdb_limits(un); 6855 6856 /* 6857 * Set up the IO chains to use, based upon the target type. 6858 */ 6859 if (un->un_f_non_devbsize_supported) { 6860 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6861 } else { 6862 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6863 } 6864 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6865 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6866 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6867 6868 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6869 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6870 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6871 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6872 6873 6874 if (ISCD(un)) { 6875 un->un_additional_codes = sd_additional_codes; 6876 } else { 6877 un->un_additional_codes = NULL; 6878 } 6879 6880 /* 6881 * Create the kstats here so they can be available for attach-time 6882 * routines that send commands to the unit (either polled or via 6883 * sd_send_scsi_cmd). 6884 * 6885 * Note: This is a critical sequence that needs to be maintained: 6886 * 1) Instantiate the kstats here, before any routines using the 6887 * iopath (i.e. sd_send_scsi_cmd). 6888 * 2) Instantiate and initialize the partition stats 6889 * (sd_set_pstats). 6890 * 3) Initialize the error stats (sd_set_errstats), following 6891 * sd_validate_geometry(),sd_register_devid(), 6892 * and sd_cache_control(). 6893 */ 6894 6895 un->un_stats = kstat_create(sd_label, instance, 6896 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6897 if (un->un_stats != NULL) { 6898 un->un_stats->ks_lock = SD_MUTEX(un); 6899 kstat_install(un->un_stats); 6900 } 6901 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6902 "sd_unit_attach: un:0x%p un_stats created\n", un); 6903 6904 sd_create_errstats(un, instance); 6905 if (un->un_errstats == NULL) { 6906 goto create_errstats_failed; 6907 } 6908 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6909 "sd_unit_attach: un:0x%p errstats created\n", un); 6910 6911 /* 6912 * The following if/else code was relocated here from below as part 6913 * of the fix for bug (4430280). However with the default setup added 6914 * on entry to this routine, it's no longer absolutely necessary for 6915 * this to be before the call to sd_spin_up_unit. 6916 */ 6917 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6918 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6919 (devp->sd_inq->inq_ansi == 5)) && 6920 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6921 6922 /* 6923 * If tagged queueing is supported by the target 6924 * and by the host adapter then we will enable it 6925 */ 6926 un->un_tagflags = 0; 6927 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6928 (un->un_f_arq_enabled == TRUE)) { 6929 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6930 1, 1) == 1) { 6931 un->un_tagflags = FLAG_STAG; 6932 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6933 "sd_unit_attach: un:0x%p tag queueing " 6934 "enabled\n", un); 6935 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6936 "untagged-qing", 0) == 1) { 6937 un->un_f_opt_queueing = TRUE; 6938 un->un_saved_throttle = un->un_throttle = 6939 min(un->un_throttle, 3); 6940 } else { 6941 un->un_f_opt_queueing = FALSE; 6942 un->un_saved_throttle = un->un_throttle = 1; 6943 } 6944 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6945 == 1) && (un->un_f_arq_enabled == TRUE)) { 6946 /* The Host Adapter supports internal queueing. */ 6947 un->un_f_opt_queueing = TRUE; 6948 un->un_saved_throttle = un->un_throttle = 6949 min(un->un_throttle, 3); 6950 } else { 6951 un->un_f_opt_queueing = FALSE; 6952 un->un_saved_throttle = un->un_throttle = 1; 6953 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6954 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6955 } 6956 6957 /* 6958 * Enable large transfers for SATA/SAS drives 6959 */ 6960 if (SD_IS_SERIAL(un)) { 6961 un->un_max_xfer_size = 6962 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6963 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6964 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6965 "sd_unit_attach: un:0x%p max transfer " 6966 "size=0x%x\n", un, un->un_max_xfer_size); 6967 6968 } 6969 6970 /* Setup or tear down default wide operations for disks */ 6971 6972 /* 6973 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6974 * and "ssd_max_xfer_size" to exist simultaneously on the same 6975 * system and be set to different values. In the future this 6976 * code may need to be updated when the ssd module is 6977 * obsoleted and removed from the system. (4299588) 6978 */ 6979 if (SD_IS_PARALLEL_SCSI(un) && 6980 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6981 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6982 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6983 1, 1) == 1) { 6984 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6985 "sd_unit_attach: un:0x%p Wide Transfer " 6986 "enabled\n", un); 6987 } 6988 6989 /* 6990 * If tagged queuing has also been enabled, then 6991 * enable large xfers 6992 */ 6993 if (un->un_saved_throttle == sd_max_throttle) { 6994 un->un_max_xfer_size = 6995 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6996 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6997 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6998 "sd_unit_attach: un:0x%p max transfer " 6999 "size=0x%x\n", un, un->un_max_xfer_size); 7000 } 7001 } else { 7002 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7003 0, 1) == 1) { 7004 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7005 "sd_unit_attach: un:0x%p " 7006 "Wide Transfer disabled\n", un); 7007 } 7008 } 7009 } else { 7010 un->un_tagflags = FLAG_STAG; 7011 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7012 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7013 } 7014 7015 /* 7016 * If this target supports LUN reset, try to enable it. 7017 */ 7018 if (un->un_f_lun_reset_enabled) { 7019 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7020 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7021 "un:0x%p lun_reset capability set\n", un); 7022 } else { 7023 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7024 "un:0x%p lun-reset capability not set\n", un); 7025 } 7026 } 7027 7028 /* 7029 * Adjust the maximum transfer size. This is to fix 7030 * the problem of partial DMA support on SPARC. Some 7031 * HBA driver, like aac, has very small dma_attr_maxxfer 7032 * size, which requires partial DMA support on SPARC. 7033 * In the future the SPARC pci nexus driver may solve 7034 * the problem instead of this fix. 7035 */ 7036 #if defined(__sparc) 7037 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7038 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7039 un->un_max_xfer_size = max_xfer_size; 7040 un->un_partial_dma_supported = 1; 7041 } 7042 #endif 7043 7044 /* 7045 * Set PKT_DMA_PARTIAL flag. 7046 */ 7047 if (un->un_partial_dma_supported == 1) { 7048 un->un_pkt_flags = PKT_DMA_PARTIAL; 7049 } else { 7050 un->un_pkt_flags = 0; 7051 } 7052 7053 /* 7054 * At this point in the attach, we have enough info in the 7055 * soft state to be able to issue commands to the target. 7056 * 7057 * All command paths used below MUST issue their commands as 7058 * SD_PATH_DIRECT. This is important as intermediate layers 7059 * are not all initialized yet (such as PM). 7060 */ 7061 7062 /* 7063 * Send a TEST UNIT READY command to the device. This should clear 7064 * any outstanding UNIT ATTENTION that may be present. 7065 * 7066 * Note: Don't check for success, just track if there is a reservation, 7067 * this is a throw away command to clear any unit attentions. 7068 * 7069 * Note: This MUST be the first command issued to the target during 7070 * attach to ensure power on UNIT ATTENTIONS are cleared. 7071 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7072 * with attempts at spinning up a device with no media. 7073 */ 7074 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7075 reservation_flag = SD_TARGET_IS_RESERVED; 7076 } 7077 7078 /* 7079 * If the device is NOT a removable media device, attempt to spin 7080 * it up (using the START_STOP_UNIT command) and read its capacity 7081 * (using the READ CAPACITY command). Note, however, that either 7082 * of these could fail and in some cases we would continue with 7083 * the attach despite the failure (see below). 7084 */ 7085 if (un->un_f_descr_format_supported) { 7086 switch (sd_spin_up_unit(un)) { 7087 case 0: 7088 /* 7089 * Spin-up was successful; now try to read the 7090 * capacity. If successful then save the results 7091 * and mark the capacity & lbasize as valid. 7092 */ 7093 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7094 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7095 7096 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7097 &lbasize, SD_PATH_DIRECT)) { 7098 case 0: { 7099 if (capacity > DK_MAX_BLOCKS) { 7100 #ifdef _LP64 7101 if (capacity + 1 > 7102 SD_GROUP1_MAX_ADDRESS) { 7103 /* 7104 * Enable descriptor format 7105 * sense data so that we can 7106 * get 64 bit sense data 7107 * fields. 7108 */ 7109 sd_enable_descr_sense(un); 7110 } 7111 #else 7112 /* 32-bit kernels can't handle this */ 7113 scsi_log(SD_DEVINFO(un), 7114 sd_label, CE_WARN, 7115 "disk has %llu blocks, which " 7116 "is too large for a 32-bit " 7117 "kernel", capacity); 7118 7119 #if defined(__i386) || defined(__amd64) 7120 /* 7121 * 1TB disk was treated as (1T - 512)B 7122 * in the past, so that it might have 7123 * valid VTOC and solaris partitions, 7124 * we have to allow it to continue to 7125 * work. 7126 */ 7127 if (capacity -1 > DK_MAX_BLOCKS) 7128 #endif 7129 goto spinup_failed; 7130 #endif 7131 } 7132 7133 /* 7134 * Here it's not necessary to check the case: 7135 * the capacity of the device is bigger than 7136 * what the max hba cdb can support. Because 7137 * sd_send_scsi_READ_CAPACITY will retrieve 7138 * the capacity by sending USCSI command, which 7139 * is constrained by the max hba cdb. Actually, 7140 * sd_send_scsi_READ_CAPACITY will return 7141 * EINVAL when using bigger cdb than required 7142 * cdb length. Will handle this case in 7143 * "case EINVAL". 7144 */ 7145 7146 /* 7147 * The following relies on 7148 * sd_send_scsi_READ_CAPACITY never 7149 * returning 0 for capacity and/or lbasize. 7150 */ 7151 sd_update_block_info(un, lbasize, capacity); 7152 7153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7154 "sd_unit_attach: un:0x%p capacity = %ld " 7155 "blocks; lbasize= %ld.\n", un, 7156 un->un_blockcount, un->un_tgt_blocksize); 7157 7158 break; 7159 } 7160 case EINVAL: 7161 /* 7162 * In the case where the max-cdb-length property 7163 * is smaller than the required CDB length for 7164 * a SCSI device, a target driver can fail to 7165 * attach to that device. 7166 */ 7167 scsi_log(SD_DEVINFO(un), 7168 sd_label, CE_WARN, 7169 "disk capacity is too large " 7170 "for current cdb length"); 7171 goto spinup_failed; 7172 case EACCES: 7173 /* 7174 * Should never get here if the spin-up 7175 * succeeded, but code it in anyway. 7176 * From here, just continue with the attach... 7177 */ 7178 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7179 "sd_unit_attach: un:0x%p " 7180 "sd_send_scsi_READ_CAPACITY " 7181 "returned reservation conflict\n", un); 7182 reservation_flag = SD_TARGET_IS_RESERVED; 7183 break; 7184 default: 7185 /* 7186 * Likewise, should never get here if the 7187 * spin-up succeeded. Just continue with 7188 * the attach... 7189 */ 7190 break; 7191 } 7192 break; 7193 case EACCES: 7194 /* 7195 * Device is reserved by another host. In this case 7196 * we could not spin it up or read the capacity, but 7197 * we continue with the attach anyway. 7198 */ 7199 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7200 "sd_unit_attach: un:0x%p spin-up reservation " 7201 "conflict.\n", un); 7202 reservation_flag = SD_TARGET_IS_RESERVED; 7203 break; 7204 default: 7205 /* Fail the attach if the spin-up failed. */ 7206 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7207 "sd_unit_attach: un:0x%p spin-up failed.", un); 7208 goto spinup_failed; 7209 } 7210 } 7211 7212 /* 7213 * Check to see if this is a MMC drive 7214 */ 7215 if (ISCD(un)) { 7216 sd_set_mmc_caps(un); 7217 } 7218 7219 7220 /* 7221 * Add a zero-length attribute to tell the world we support 7222 * kernel ioctls (for layered drivers) 7223 */ 7224 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7225 DDI_KERNEL_IOCTL, NULL, 0); 7226 7227 /* 7228 * Add a boolean property to tell the world we support 7229 * the B_FAILFAST flag (for layered drivers) 7230 */ 7231 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7232 "ddi-failfast-supported", NULL, 0); 7233 7234 /* 7235 * Initialize power management 7236 */ 7237 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7238 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7239 sd_setup_pm(un, devi); 7240 if (un->un_f_pm_is_enabled == FALSE) { 7241 /* 7242 * For performance, point to a jump table that does 7243 * not include pm. 7244 * The direct and priority chains don't change with PM. 7245 * 7246 * Note: this is currently done based on individual device 7247 * capabilities. When an interface for determining system 7248 * power enabled state becomes available, or when additional 7249 * layers are added to the command chain, these values will 7250 * have to be re-evaluated for correctness. 7251 */ 7252 if (un->un_f_non_devbsize_supported) { 7253 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7254 } else { 7255 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7256 } 7257 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7258 } 7259 7260 /* 7261 * This property is set to 0 by HA software to avoid retries 7262 * on a reserved disk. (The preferred property name is 7263 * "retry-on-reservation-conflict") (1189689) 7264 * 7265 * Note: The use of a global here can have unintended consequences. A 7266 * per instance variable is preferrable to match the capabilities of 7267 * different underlying hba's (4402600) 7268 */ 7269 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7270 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7271 sd_retry_on_reservation_conflict); 7272 if (sd_retry_on_reservation_conflict != 0) { 7273 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7274 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7275 sd_retry_on_reservation_conflict); 7276 } 7277 7278 /* Set up options for QFULL handling. */ 7279 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7280 "qfull-retries", -1)) != -1) { 7281 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7282 rval, 1); 7283 } 7284 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7285 "qfull-retry-interval", -1)) != -1) { 7286 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7287 rval, 1); 7288 } 7289 7290 /* 7291 * This just prints a message that announces the existence of the 7292 * device. The message is always printed in the system logfile, but 7293 * only appears on the console if the system is booted with the 7294 * -v (verbose) argument. 7295 */ 7296 ddi_report_dev(devi); 7297 7298 un->un_mediastate = DKIO_NONE; 7299 7300 cmlb_alloc_handle(&un->un_cmlbhandle); 7301 7302 #if defined(__i386) || defined(__amd64) 7303 /* 7304 * On x86, compensate for off-by-1 legacy error 7305 */ 7306 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7307 (lbasize == un->un_sys_blocksize)) 7308 offbyone = CMLB_OFF_BY_ONE; 7309 #endif 7310 7311 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7312 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7313 un->un_node_type, offbyone, un->un_cmlbhandle, 7314 (void *)SD_PATH_DIRECT) != 0) { 7315 goto cmlb_attach_failed; 7316 } 7317 7318 7319 /* 7320 * Read and validate the device's geometry (ie, disk label) 7321 * A new unformatted drive will not have a valid geometry, but 7322 * the driver needs to successfully attach to this device so 7323 * the drive can be formatted via ioctls. 7324 */ 7325 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7326 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7327 7328 mutex_enter(SD_MUTEX(un)); 7329 7330 /* 7331 * Read and initialize the devid for the unit. 7332 */ 7333 if (un->un_f_devid_supported) { 7334 sd_register_devid(un, devi, reservation_flag); 7335 } 7336 mutex_exit(SD_MUTEX(un)); 7337 7338 #if (defined(__fibre)) 7339 /* 7340 * Register callbacks for fibre only. You can't do this soley 7341 * on the basis of the devid_type because this is hba specific. 7342 * We need to query our hba capabilities to find out whether to 7343 * register or not. 7344 */ 7345 if (un->un_f_is_fibre) { 7346 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7347 sd_init_event_callbacks(un); 7348 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7349 "sd_unit_attach: un:0x%p event callbacks inserted", 7350 un); 7351 } 7352 } 7353 #endif 7354 7355 if (un->un_f_opt_disable_cache == TRUE) { 7356 /* 7357 * Disable both read cache and write cache. This is 7358 * the historic behavior of the keywords in the config file. 7359 */ 7360 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7361 0) { 7362 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7363 "sd_unit_attach: un:0x%p Could not disable " 7364 "caching", un); 7365 goto devid_failed; 7366 } 7367 } 7368 7369 /* 7370 * Check the value of the WCE bit now and 7371 * set un_f_write_cache_enabled accordingly. 7372 */ 7373 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7374 mutex_enter(SD_MUTEX(un)); 7375 un->un_f_write_cache_enabled = (wc_enabled != 0); 7376 mutex_exit(SD_MUTEX(un)); 7377 7378 /* 7379 * Check the value of the NV_SUP bit and set 7380 * un_f_suppress_cache_flush accordingly. 7381 */ 7382 sd_get_nv_sup(un); 7383 7384 /* 7385 * Find out what type of reservation this disk supports. 7386 */ 7387 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7388 case 0: 7389 /* 7390 * SCSI-3 reservations are supported. 7391 */ 7392 un->un_reservation_type = SD_SCSI3_RESERVATION; 7393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7394 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7395 break; 7396 case ENOTSUP: 7397 /* 7398 * The PERSISTENT RESERVE IN command would not be recognized by 7399 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7400 */ 7401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7402 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7403 un->un_reservation_type = SD_SCSI2_RESERVATION; 7404 break; 7405 default: 7406 /* 7407 * default to SCSI-3 reservations 7408 */ 7409 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7410 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7411 un->un_reservation_type = SD_SCSI3_RESERVATION; 7412 break; 7413 } 7414 7415 /* 7416 * Set the pstat and error stat values here, so data obtained during the 7417 * previous attach-time routines is available. 7418 * 7419 * Note: This is a critical sequence that needs to be maintained: 7420 * 1) Instantiate the kstats before any routines using the iopath 7421 * (i.e. sd_send_scsi_cmd). 7422 * 2) Initialize the error stats (sd_set_errstats) and partition 7423 * stats (sd_set_pstats)here, following 7424 * cmlb_validate_geometry(), sd_register_devid(), and 7425 * sd_cache_control(). 7426 */ 7427 7428 if (un->un_f_pkstats_enabled && geom_label_valid) { 7429 sd_set_pstats(un); 7430 SD_TRACE(SD_LOG_IO_PARTITION, un, 7431 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7432 } 7433 7434 sd_set_errstats(un); 7435 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7436 "sd_unit_attach: un:0x%p errstats set\n", un); 7437 7438 7439 /* 7440 * After successfully attaching an instance, we record the information 7441 * of how many luns have been attached on the relative target and 7442 * controller for parallel SCSI. This information is used when sd tries 7443 * to set the tagged queuing capability in HBA. 7444 */ 7445 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7446 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7447 } 7448 7449 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7450 "sd_unit_attach: un:0x%p exit success\n", un); 7451 7452 return (DDI_SUCCESS); 7453 7454 /* 7455 * An error occurred during the attach; clean up & return failure. 7456 */ 7457 7458 devid_failed: 7459 7460 setup_pm_failed: 7461 ddi_remove_minor_node(devi, NULL); 7462 7463 cmlb_attach_failed: 7464 /* 7465 * Cleanup from the scsi_ifsetcap() calls (437868) 7466 */ 7467 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7468 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7469 7470 /* 7471 * Refer to the comments of setting tagged-qing in the beginning of 7472 * sd_unit_attach. We can only disable tagged queuing when there is 7473 * no lun attached on the target. 7474 */ 7475 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7476 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7477 } 7478 7479 if (un->un_f_is_fibre == FALSE) { 7480 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7481 } 7482 7483 spinup_failed: 7484 7485 mutex_enter(SD_MUTEX(un)); 7486 7487 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7488 if (un->un_direct_priority_timeid != NULL) { 7489 timeout_id_t temp_id = un->un_direct_priority_timeid; 7490 un->un_direct_priority_timeid = NULL; 7491 mutex_exit(SD_MUTEX(un)); 7492 (void) untimeout(temp_id); 7493 mutex_enter(SD_MUTEX(un)); 7494 } 7495 7496 /* Cancel any pending start/stop timeouts */ 7497 if (un->un_startstop_timeid != NULL) { 7498 timeout_id_t temp_id = un->un_startstop_timeid; 7499 un->un_startstop_timeid = NULL; 7500 mutex_exit(SD_MUTEX(un)); 7501 (void) untimeout(temp_id); 7502 mutex_enter(SD_MUTEX(un)); 7503 } 7504 7505 /* Cancel any pending reset-throttle timeouts */ 7506 if (un->un_reset_throttle_timeid != NULL) { 7507 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7508 un->un_reset_throttle_timeid = NULL; 7509 mutex_exit(SD_MUTEX(un)); 7510 (void) untimeout(temp_id); 7511 mutex_enter(SD_MUTEX(un)); 7512 } 7513 7514 /* Cancel any pending retry timeouts */ 7515 if (un->un_retry_timeid != NULL) { 7516 timeout_id_t temp_id = un->un_retry_timeid; 7517 un->un_retry_timeid = NULL; 7518 mutex_exit(SD_MUTEX(un)); 7519 (void) untimeout(temp_id); 7520 mutex_enter(SD_MUTEX(un)); 7521 } 7522 7523 /* Cancel any pending delayed cv broadcast timeouts */ 7524 if (un->un_dcvb_timeid != NULL) { 7525 timeout_id_t temp_id = un->un_dcvb_timeid; 7526 un->un_dcvb_timeid = NULL; 7527 mutex_exit(SD_MUTEX(un)); 7528 (void) untimeout(temp_id); 7529 mutex_enter(SD_MUTEX(un)); 7530 } 7531 7532 mutex_exit(SD_MUTEX(un)); 7533 7534 /* There should not be any in-progress I/O so ASSERT this check */ 7535 ASSERT(un->un_ncmds_in_transport == 0); 7536 ASSERT(un->un_ncmds_in_driver == 0); 7537 7538 /* Do not free the softstate if the callback routine is active */ 7539 sd_sync_with_callback(un); 7540 7541 /* 7542 * Partition stats apparently are not used with removables. These would 7543 * not have been created during attach, so no need to clean them up... 7544 */ 7545 if (un->un_errstats != NULL) { 7546 kstat_delete(un->un_errstats); 7547 un->un_errstats = NULL; 7548 } 7549 7550 create_errstats_failed: 7551 7552 if (un->un_stats != NULL) { 7553 kstat_delete(un->un_stats); 7554 un->un_stats = NULL; 7555 } 7556 7557 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7558 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7559 7560 ddi_prop_remove_all(devi); 7561 sema_destroy(&un->un_semoclose); 7562 cv_destroy(&un->un_state_cv); 7563 7564 getrbuf_failed: 7565 7566 sd_free_rqs(un); 7567 7568 alloc_rqs_failed: 7569 7570 devp->sd_private = NULL; 7571 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7572 7573 get_softstate_failed: 7574 /* 7575 * Note: the man pages are unclear as to whether or not doing a 7576 * ddi_soft_state_free(sd_state, instance) is the right way to 7577 * clean up after the ddi_soft_state_zalloc() if the subsequent 7578 * ddi_get_soft_state() fails. The implication seems to be 7579 * that the get_soft_state cannot fail if the zalloc succeeds. 7580 */ 7581 ddi_soft_state_free(sd_state, instance); 7582 7583 probe_failed: 7584 scsi_unprobe(devp); 7585 7586 return (DDI_FAILURE); 7587 } 7588 7589 7590 /* 7591 * Function: sd_unit_detach 7592 * 7593 * Description: Performs DDI_DETACH processing for sddetach(). 7594 * 7595 * Return Code: DDI_SUCCESS 7596 * DDI_FAILURE 7597 * 7598 * Context: Kernel thread context 7599 */ 7600 7601 static int 7602 sd_unit_detach(dev_info_t *devi) 7603 { 7604 struct scsi_device *devp; 7605 struct sd_lun *un; 7606 int i; 7607 int tgt; 7608 dev_t dev; 7609 dev_info_t *pdip = ddi_get_parent(devi); 7610 int instance = ddi_get_instance(devi); 7611 7612 mutex_enter(&sd_detach_mutex); 7613 7614 /* 7615 * Fail the detach for any of the following: 7616 * - Unable to get the sd_lun struct for the instance 7617 * - A layered driver has an outstanding open on the instance 7618 * - Another thread is already detaching this instance 7619 * - Another thread is currently performing an open 7620 */ 7621 devp = ddi_get_driver_private(devi); 7622 if ((devp == NULL) || 7623 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7624 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7625 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7626 mutex_exit(&sd_detach_mutex); 7627 return (DDI_FAILURE); 7628 } 7629 7630 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7631 7632 /* 7633 * Mark this instance as currently in a detach, to inhibit any 7634 * opens from a layered driver. 7635 */ 7636 un->un_detach_count++; 7637 mutex_exit(&sd_detach_mutex); 7638 7639 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7640 SCSI_ADDR_PROP_TARGET, -1); 7641 7642 dev = sd_make_device(SD_DEVINFO(un)); 7643 7644 #ifndef lint 7645 _NOTE(COMPETING_THREADS_NOW); 7646 #endif 7647 7648 mutex_enter(SD_MUTEX(un)); 7649 7650 /* 7651 * Fail the detach if there are any outstanding layered 7652 * opens on this device. 7653 */ 7654 for (i = 0; i < NDKMAP; i++) { 7655 if (un->un_ocmap.lyropen[i] != 0) { 7656 goto err_notclosed; 7657 } 7658 } 7659 7660 /* 7661 * Verify there are NO outstanding commands issued to this device. 7662 * ie, un_ncmds_in_transport == 0. 7663 * It's possible to have outstanding commands through the physio 7664 * code path, even though everything's closed. 7665 */ 7666 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7667 (un->un_direct_priority_timeid != NULL) || 7668 (un->un_state == SD_STATE_RWAIT)) { 7669 mutex_exit(SD_MUTEX(un)); 7670 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7671 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7672 goto err_stillbusy; 7673 } 7674 7675 /* 7676 * If we have the device reserved, release the reservation. 7677 */ 7678 if ((un->un_resvd_status & SD_RESERVE) && 7679 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7680 mutex_exit(SD_MUTEX(un)); 7681 /* 7682 * Note: sd_reserve_release sends a command to the device 7683 * via the sd_ioctlcmd() path, and can sleep. 7684 */ 7685 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7686 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7687 "sd_dr_detach: Cannot release reservation \n"); 7688 } 7689 } else { 7690 mutex_exit(SD_MUTEX(un)); 7691 } 7692 7693 /* 7694 * Untimeout any reserve recover, throttle reset, restart unit 7695 * and delayed broadcast timeout threads. Protect the timeout pointer 7696 * from getting nulled by their callback functions. 7697 */ 7698 mutex_enter(SD_MUTEX(un)); 7699 if (un->un_resvd_timeid != NULL) { 7700 timeout_id_t temp_id = un->un_resvd_timeid; 7701 un->un_resvd_timeid = NULL; 7702 mutex_exit(SD_MUTEX(un)); 7703 (void) untimeout(temp_id); 7704 mutex_enter(SD_MUTEX(un)); 7705 } 7706 7707 if (un->un_reset_throttle_timeid != NULL) { 7708 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7709 un->un_reset_throttle_timeid = NULL; 7710 mutex_exit(SD_MUTEX(un)); 7711 (void) untimeout(temp_id); 7712 mutex_enter(SD_MUTEX(un)); 7713 } 7714 7715 if (un->un_startstop_timeid != NULL) { 7716 timeout_id_t temp_id = un->un_startstop_timeid; 7717 un->un_startstop_timeid = NULL; 7718 mutex_exit(SD_MUTEX(un)); 7719 (void) untimeout(temp_id); 7720 mutex_enter(SD_MUTEX(un)); 7721 } 7722 7723 if (un->un_dcvb_timeid != NULL) { 7724 timeout_id_t temp_id = un->un_dcvb_timeid; 7725 un->un_dcvb_timeid = NULL; 7726 mutex_exit(SD_MUTEX(un)); 7727 (void) untimeout(temp_id); 7728 } else { 7729 mutex_exit(SD_MUTEX(un)); 7730 } 7731 7732 /* Remove any pending reservation reclaim requests for this device */ 7733 sd_rmv_resv_reclaim_req(dev); 7734 7735 mutex_enter(SD_MUTEX(un)); 7736 7737 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7738 if (un->un_direct_priority_timeid != NULL) { 7739 timeout_id_t temp_id = un->un_direct_priority_timeid; 7740 un->un_direct_priority_timeid = NULL; 7741 mutex_exit(SD_MUTEX(un)); 7742 (void) untimeout(temp_id); 7743 mutex_enter(SD_MUTEX(un)); 7744 } 7745 7746 /* Cancel any active multi-host disk watch thread requests */ 7747 if (un->un_mhd_token != NULL) { 7748 mutex_exit(SD_MUTEX(un)); 7749 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7750 if (scsi_watch_request_terminate(un->un_mhd_token, 7751 SCSI_WATCH_TERMINATE_NOWAIT)) { 7752 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7753 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7754 /* 7755 * Note: We are returning here after having removed 7756 * some driver timeouts above. This is consistent with 7757 * the legacy implementation but perhaps the watch 7758 * terminate call should be made with the wait flag set. 7759 */ 7760 goto err_stillbusy; 7761 } 7762 mutex_enter(SD_MUTEX(un)); 7763 un->un_mhd_token = NULL; 7764 } 7765 7766 if (un->un_swr_token != NULL) { 7767 mutex_exit(SD_MUTEX(un)); 7768 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7769 if (scsi_watch_request_terminate(un->un_swr_token, 7770 SCSI_WATCH_TERMINATE_NOWAIT)) { 7771 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7772 "sd_dr_detach: Cannot cancel swr watch request\n"); 7773 /* 7774 * Note: We are returning here after having removed 7775 * some driver timeouts above. This is consistent with 7776 * the legacy implementation but perhaps the watch 7777 * terminate call should be made with the wait flag set. 7778 */ 7779 goto err_stillbusy; 7780 } 7781 mutex_enter(SD_MUTEX(un)); 7782 un->un_swr_token = NULL; 7783 } 7784 7785 mutex_exit(SD_MUTEX(un)); 7786 7787 /* 7788 * Clear any scsi_reset_notifies. We clear the reset notifies 7789 * if we have not registered one. 7790 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7791 */ 7792 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7793 sd_mhd_reset_notify_cb, (caddr_t)un); 7794 7795 /* 7796 * protect the timeout pointers from getting nulled by 7797 * their callback functions during the cancellation process. 7798 * In such a scenario untimeout can be invoked with a null value. 7799 */ 7800 _NOTE(NO_COMPETING_THREADS_NOW); 7801 7802 mutex_enter(&un->un_pm_mutex); 7803 if (un->un_pm_idle_timeid != NULL) { 7804 timeout_id_t temp_id = un->un_pm_idle_timeid; 7805 un->un_pm_idle_timeid = NULL; 7806 mutex_exit(&un->un_pm_mutex); 7807 7808 /* 7809 * Timeout is active; cancel it. 7810 * Note that it'll never be active on a device 7811 * that does not support PM therefore we don't 7812 * have to check before calling pm_idle_component. 7813 */ 7814 (void) untimeout(temp_id); 7815 (void) pm_idle_component(SD_DEVINFO(un), 0); 7816 mutex_enter(&un->un_pm_mutex); 7817 } 7818 7819 /* 7820 * Check whether there is already a timeout scheduled for power 7821 * management. If yes then don't lower the power here, that's. 7822 * the timeout handler's job. 7823 */ 7824 if (un->un_pm_timeid != NULL) { 7825 timeout_id_t temp_id = un->un_pm_timeid; 7826 un->un_pm_timeid = NULL; 7827 mutex_exit(&un->un_pm_mutex); 7828 /* 7829 * Timeout is active; cancel it. 7830 * Note that it'll never be active on a device 7831 * that does not support PM therefore we don't 7832 * have to check before calling pm_idle_component. 7833 */ 7834 (void) untimeout(temp_id); 7835 (void) pm_idle_component(SD_DEVINFO(un), 0); 7836 7837 } else { 7838 mutex_exit(&un->un_pm_mutex); 7839 if ((un->un_f_pm_is_enabled == TRUE) && 7840 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7841 DDI_SUCCESS)) { 7842 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7843 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7844 /* 7845 * Fix for bug: 4297749, item # 13 7846 * The above test now includes a check to see if PM is 7847 * supported by this device before call 7848 * pm_lower_power(). 7849 * Note, the following is not dead code. The call to 7850 * pm_lower_power above will generate a call back into 7851 * our sdpower routine which might result in a timeout 7852 * handler getting activated. Therefore the following 7853 * code is valid and necessary. 7854 */ 7855 mutex_enter(&un->un_pm_mutex); 7856 if (un->un_pm_timeid != NULL) { 7857 timeout_id_t temp_id = un->un_pm_timeid; 7858 un->un_pm_timeid = NULL; 7859 mutex_exit(&un->un_pm_mutex); 7860 (void) untimeout(temp_id); 7861 (void) pm_idle_component(SD_DEVINFO(un), 0); 7862 } else { 7863 mutex_exit(&un->un_pm_mutex); 7864 } 7865 } 7866 } 7867 7868 /* 7869 * Cleanup from the scsi_ifsetcap() calls (437868) 7870 * Relocated here from above to be after the call to 7871 * pm_lower_power, which was getting errors. 7872 */ 7873 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7874 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7875 7876 /* 7877 * Currently, tagged queuing is supported per target based by HBA. 7878 * Setting this per lun instance actually sets the capability of this 7879 * target in HBA, which affects those luns already attached on the 7880 * same target. So during detach, we can only disable this capability 7881 * only when this is the only lun left on this target. By doing 7882 * this, we assume a target has the same tagged queuing capability 7883 * for every lun. The condition can be removed when HBA is changed to 7884 * support per lun based tagged queuing capability. 7885 */ 7886 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7887 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7888 } 7889 7890 if (un->un_f_is_fibre == FALSE) { 7891 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7892 } 7893 7894 /* 7895 * Remove any event callbacks, fibre only 7896 */ 7897 if (un->un_f_is_fibre == TRUE) { 7898 if ((un->un_insert_event != NULL) && 7899 (ddi_remove_event_handler(un->un_insert_cb_id) != 7900 DDI_SUCCESS)) { 7901 /* 7902 * Note: We are returning here after having done 7903 * substantial cleanup above. This is consistent 7904 * with the legacy implementation but this may not 7905 * be the right thing to do. 7906 */ 7907 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7908 "sd_dr_detach: Cannot cancel insert event\n"); 7909 goto err_remove_event; 7910 } 7911 un->un_insert_event = NULL; 7912 7913 if ((un->un_remove_event != NULL) && 7914 (ddi_remove_event_handler(un->un_remove_cb_id) != 7915 DDI_SUCCESS)) { 7916 /* 7917 * Note: We are returning here after having done 7918 * substantial cleanup above. This is consistent 7919 * with the legacy implementation but this may not 7920 * be the right thing to do. 7921 */ 7922 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7923 "sd_dr_detach: Cannot cancel remove event\n"); 7924 goto err_remove_event; 7925 } 7926 un->un_remove_event = NULL; 7927 } 7928 7929 /* Do not free the softstate if the callback routine is active */ 7930 sd_sync_with_callback(un); 7931 7932 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7933 cmlb_free_handle(&un->un_cmlbhandle); 7934 7935 /* 7936 * Hold the detach mutex here, to make sure that no other threads ever 7937 * can access a (partially) freed soft state structure. 7938 */ 7939 mutex_enter(&sd_detach_mutex); 7940 7941 /* 7942 * Clean up the soft state struct. 7943 * Cleanup is done in reverse order of allocs/inits. 7944 * At this point there should be no competing threads anymore. 7945 */ 7946 7947 /* Unregister and free device id. */ 7948 ddi_devid_unregister(devi); 7949 if (un->un_devid) { 7950 ddi_devid_free(un->un_devid); 7951 un->un_devid = NULL; 7952 } 7953 7954 /* 7955 * Destroy wmap cache if it exists. 7956 */ 7957 if (un->un_wm_cache != NULL) { 7958 kmem_cache_destroy(un->un_wm_cache); 7959 un->un_wm_cache = NULL; 7960 } 7961 7962 /* 7963 * kstat cleanup is done in detach for all device types (4363169). 7964 * We do not want to fail detach if the device kstats are not deleted 7965 * since there is a confusion about the devo_refcnt for the device. 7966 * We just delete the kstats and let detach complete successfully. 7967 */ 7968 if (un->un_stats != NULL) { 7969 kstat_delete(un->un_stats); 7970 un->un_stats = NULL; 7971 } 7972 if (un->un_errstats != NULL) { 7973 kstat_delete(un->un_errstats); 7974 un->un_errstats = NULL; 7975 } 7976 7977 /* Remove partition stats */ 7978 if (un->un_f_pkstats_enabled) { 7979 for (i = 0; i < NSDMAP; i++) { 7980 if (un->un_pstats[i] != NULL) { 7981 kstat_delete(un->un_pstats[i]); 7982 un->un_pstats[i] = NULL; 7983 } 7984 } 7985 } 7986 7987 /* Remove xbuf registration */ 7988 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7989 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7990 7991 /* Remove driver properties */ 7992 ddi_prop_remove_all(devi); 7993 7994 mutex_destroy(&un->un_pm_mutex); 7995 cv_destroy(&un->un_pm_busy_cv); 7996 7997 cv_destroy(&un->un_wcc_cv); 7998 7999 /* Open/close semaphore */ 8000 sema_destroy(&un->un_semoclose); 8001 8002 /* Removable media condvar. */ 8003 cv_destroy(&un->un_state_cv); 8004 8005 /* Suspend/resume condvar. */ 8006 cv_destroy(&un->un_suspend_cv); 8007 cv_destroy(&un->un_disk_busy_cv); 8008 8009 sd_free_rqs(un); 8010 8011 /* Free up soft state */ 8012 devp->sd_private = NULL; 8013 8014 bzero(un, sizeof (struct sd_lun)); 8015 ddi_soft_state_free(sd_state, instance); 8016 8017 mutex_exit(&sd_detach_mutex); 8018 8019 /* This frees up the INQUIRY data associated with the device. */ 8020 scsi_unprobe(devp); 8021 8022 /* 8023 * After successfully detaching an instance, we update the information 8024 * of how many luns have been attached in the relative target and 8025 * controller for parallel SCSI. This information is used when sd tries 8026 * to set the tagged queuing capability in HBA. 8027 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8028 * check if the device is parallel SCSI. However, we don't need to 8029 * check here because we've already checked during attach. No device 8030 * that is not parallel SCSI is in the chain. 8031 */ 8032 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8033 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8034 } 8035 8036 return (DDI_SUCCESS); 8037 8038 err_notclosed: 8039 mutex_exit(SD_MUTEX(un)); 8040 8041 err_stillbusy: 8042 _NOTE(NO_COMPETING_THREADS_NOW); 8043 8044 err_remove_event: 8045 mutex_enter(&sd_detach_mutex); 8046 un->un_detach_count--; 8047 mutex_exit(&sd_detach_mutex); 8048 8049 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8050 return (DDI_FAILURE); 8051 } 8052 8053 8054 /* 8055 * Function: sd_create_errstats 8056 * 8057 * Description: This routine instantiates the device error stats. 8058 * 8059 * Note: During attach the stats are instantiated first so they are 8060 * available for attach-time routines that utilize the driver 8061 * iopath to send commands to the device. The stats are initialized 8062 * separately so data obtained during some attach-time routines is 8063 * available. (4362483) 8064 * 8065 * Arguments: un - driver soft state (unit) structure 8066 * instance - driver instance 8067 * 8068 * Context: Kernel thread context 8069 */ 8070 8071 static void 8072 sd_create_errstats(struct sd_lun *un, int instance) 8073 { 8074 struct sd_errstats *stp; 8075 char kstatmodule_err[KSTAT_STRLEN]; 8076 char kstatname[KSTAT_STRLEN]; 8077 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8078 8079 ASSERT(un != NULL); 8080 8081 if (un->un_errstats != NULL) { 8082 return; 8083 } 8084 8085 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8086 "%serr", sd_label); 8087 (void) snprintf(kstatname, sizeof (kstatname), 8088 "%s%d,err", sd_label, instance); 8089 8090 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8091 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8092 8093 if (un->un_errstats == NULL) { 8094 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8095 "sd_create_errstats: Failed kstat_create\n"); 8096 return; 8097 } 8098 8099 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8100 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8105 KSTAT_DATA_UINT32); 8106 kstat_named_init(&stp->sd_vid, "Vendor", 8107 KSTAT_DATA_CHAR); 8108 kstat_named_init(&stp->sd_pid, "Product", 8109 KSTAT_DATA_CHAR); 8110 kstat_named_init(&stp->sd_revision, "Revision", 8111 KSTAT_DATA_CHAR); 8112 kstat_named_init(&stp->sd_serial, "Serial No", 8113 KSTAT_DATA_CHAR); 8114 kstat_named_init(&stp->sd_capacity, "Size", 8115 KSTAT_DATA_ULONGLONG); 8116 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8117 KSTAT_DATA_UINT32); 8118 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8119 KSTAT_DATA_UINT32); 8120 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8121 KSTAT_DATA_UINT32); 8122 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8123 KSTAT_DATA_UINT32); 8124 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8125 KSTAT_DATA_UINT32); 8126 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8127 KSTAT_DATA_UINT32); 8128 8129 un->un_errstats->ks_private = un; 8130 un->un_errstats->ks_update = nulldev; 8131 8132 kstat_install(un->un_errstats); 8133 } 8134 8135 8136 /* 8137 * Function: sd_set_errstats 8138 * 8139 * Description: This routine sets the value of the vendor id, product id, 8140 * revision, serial number, and capacity device error stats. 8141 * 8142 * Note: During attach the stats are instantiated first so they are 8143 * available for attach-time routines that utilize the driver 8144 * iopath to send commands to the device. The stats are initialized 8145 * separately so data obtained during some attach-time routines is 8146 * available. (4362483) 8147 * 8148 * Arguments: un - driver soft state (unit) structure 8149 * 8150 * Context: Kernel thread context 8151 */ 8152 8153 static void 8154 sd_set_errstats(struct sd_lun *un) 8155 { 8156 struct sd_errstats *stp; 8157 8158 ASSERT(un != NULL); 8159 ASSERT(un->un_errstats != NULL); 8160 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8161 ASSERT(stp != NULL); 8162 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8163 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8164 (void) strncpy(stp->sd_revision.value.c, 8165 un->un_sd->sd_inq->inq_revision, 4); 8166 8167 /* 8168 * All the errstats are persistent across detach/attach, 8169 * so reset all the errstats here in case of the hot 8170 * replacement of disk drives, except for not changed 8171 * Sun qualified drives. 8172 */ 8173 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8174 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8175 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8176 stp->sd_softerrs.value.ui32 = 0; 8177 stp->sd_harderrs.value.ui32 = 0; 8178 stp->sd_transerrs.value.ui32 = 0; 8179 stp->sd_rq_media_err.value.ui32 = 0; 8180 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8181 stp->sd_rq_nodev_err.value.ui32 = 0; 8182 stp->sd_rq_recov_err.value.ui32 = 0; 8183 stp->sd_rq_illrq_err.value.ui32 = 0; 8184 stp->sd_rq_pfa_err.value.ui32 = 0; 8185 } 8186 8187 /* 8188 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8189 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8190 * (4376302)) 8191 */ 8192 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8193 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8194 sizeof (SD_INQUIRY(un)->inq_serial)); 8195 } 8196 8197 if (un->un_f_blockcount_is_valid != TRUE) { 8198 /* 8199 * Set capacity error stat to 0 for no media. This ensures 8200 * a valid capacity is displayed in response to 'iostat -E' 8201 * when no media is present in the device. 8202 */ 8203 stp->sd_capacity.value.ui64 = 0; 8204 } else { 8205 /* 8206 * Multiply un_blockcount by un->un_sys_blocksize to get 8207 * capacity. 8208 * 8209 * Note: for non-512 blocksize devices "un_blockcount" has been 8210 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8211 * (un_tgt_blocksize / un->un_sys_blocksize). 8212 */ 8213 stp->sd_capacity.value.ui64 = (uint64_t) 8214 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8215 } 8216 } 8217 8218 8219 /* 8220 * Function: sd_set_pstats 8221 * 8222 * Description: This routine instantiates and initializes the partition 8223 * stats for each partition with more than zero blocks. 8224 * (4363169) 8225 * 8226 * Arguments: un - driver soft state (unit) structure 8227 * 8228 * Context: Kernel thread context 8229 */ 8230 8231 static void 8232 sd_set_pstats(struct sd_lun *un) 8233 { 8234 char kstatname[KSTAT_STRLEN]; 8235 int instance; 8236 int i; 8237 diskaddr_t nblks = 0; 8238 char *partname = NULL; 8239 8240 ASSERT(un != NULL); 8241 8242 instance = ddi_get_instance(SD_DEVINFO(un)); 8243 8244 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8245 for (i = 0; i < NSDMAP; i++) { 8246 8247 if (cmlb_partinfo(un->un_cmlbhandle, i, 8248 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8249 continue; 8250 mutex_enter(SD_MUTEX(un)); 8251 8252 if ((un->un_pstats[i] == NULL) && 8253 (nblks != 0)) { 8254 8255 (void) snprintf(kstatname, sizeof (kstatname), 8256 "%s%d,%s", sd_label, instance, 8257 partname); 8258 8259 un->un_pstats[i] = kstat_create(sd_label, 8260 instance, kstatname, "partition", KSTAT_TYPE_IO, 8261 1, KSTAT_FLAG_PERSISTENT); 8262 if (un->un_pstats[i] != NULL) { 8263 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8264 kstat_install(un->un_pstats[i]); 8265 } 8266 } 8267 mutex_exit(SD_MUTEX(un)); 8268 } 8269 } 8270 8271 8272 #if (defined(__fibre)) 8273 /* 8274 * Function: sd_init_event_callbacks 8275 * 8276 * Description: This routine initializes the insertion and removal event 8277 * callbacks. (fibre only) 8278 * 8279 * Arguments: un - driver soft state (unit) structure 8280 * 8281 * Context: Kernel thread context 8282 */ 8283 8284 static void 8285 sd_init_event_callbacks(struct sd_lun *un) 8286 { 8287 ASSERT(un != NULL); 8288 8289 if ((un->un_insert_event == NULL) && 8290 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8291 &un->un_insert_event) == DDI_SUCCESS)) { 8292 /* 8293 * Add the callback for an insertion event 8294 */ 8295 (void) ddi_add_event_handler(SD_DEVINFO(un), 8296 un->un_insert_event, sd_event_callback, (void *)un, 8297 &(un->un_insert_cb_id)); 8298 } 8299 8300 if ((un->un_remove_event == NULL) && 8301 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8302 &un->un_remove_event) == DDI_SUCCESS)) { 8303 /* 8304 * Add the callback for a removal event 8305 */ 8306 (void) ddi_add_event_handler(SD_DEVINFO(un), 8307 un->un_remove_event, sd_event_callback, (void *)un, 8308 &(un->un_remove_cb_id)); 8309 } 8310 } 8311 8312 8313 /* 8314 * Function: sd_event_callback 8315 * 8316 * Description: This routine handles insert/remove events (photon). The 8317 * state is changed to OFFLINE which can be used to supress 8318 * error msgs. (fibre only) 8319 * 8320 * Arguments: un - driver soft state (unit) structure 8321 * 8322 * Context: Callout thread context 8323 */ 8324 /* ARGSUSED */ 8325 static void 8326 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8327 void *bus_impldata) 8328 { 8329 struct sd_lun *un = (struct sd_lun *)arg; 8330 8331 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8332 if (event == un->un_insert_event) { 8333 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8334 mutex_enter(SD_MUTEX(un)); 8335 if (un->un_state == SD_STATE_OFFLINE) { 8336 if (un->un_last_state != SD_STATE_SUSPENDED) { 8337 un->un_state = un->un_last_state; 8338 } else { 8339 /* 8340 * We have gone through SUSPEND/RESUME while 8341 * we were offline. Restore the last state 8342 */ 8343 un->un_state = un->un_save_state; 8344 } 8345 } 8346 mutex_exit(SD_MUTEX(un)); 8347 8348 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8349 } else if (event == un->un_remove_event) { 8350 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8351 mutex_enter(SD_MUTEX(un)); 8352 /* 8353 * We need to handle an event callback that occurs during 8354 * the suspend operation, since we don't prevent it. 8355 */ 8356 if (un->un_state != SD_STATE_OFFLINE) { 8357 if (un->un_state != SD_STATE_SUSPENDED) { 8358 New_state(un, SD_STATE_OFFLINE); 8359 } else { 8360 un->un_last_state = SD_STATE_OFFLINE; 8361 } 8362 } 8363 mutex_exit(SD_MUTEX(un)); 8364 } else { 8365 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8366 "!Unknown event\n"); 8367 } 8368 8369 } 8370 #endif 8371 8372 /* 8373 * Function: sd_cache_control() 8374 * 8375 * Description: This routine is the driver entry point for setting 8376 * read and write caching by modifying the WCE (write cache 8377 * enable) and RCD (read cache disable) bits of mode 8378 * page 8 (MODEPAGE_CACHING). 8379 * 8380 * Arguments: un - driver soft state (unit) structure 8381 * rcd_flag - flag for controlling the read cache 8382 * wce_flag - flag for controlling the write cache 8383 * 8384 * Return Code: EIO 8385 * code returned by sd_send_scsi_MODE_SENSE and 8386 * sd_send_scsi_MODE_SELECT 8387 * 8388 * Context: Kernel Thread 8389 */ 8390 8391 static int 8392 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8393 { 8394 struct mode_caching *mode_caching_page; 8395 uchar_t *header; 8396 size_t buflen; 8397 int hdrlen; 8398 int bd_len; 8399 int rval = 0; 8400 struct mode_header_grp2 *mhp; 8401 8402 ASSERT(un != NULL); 8403 8404 /* 8405 * Do a test unit ready, otherwise a mode sense may not work if this 8406 * is the first command sent to the device after boot. 8407 */ 8408 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8409 8410 if (un->un_f_cfg_is_atapi == TRUE) { 8411 hdrlen = MODE_HEADER_LENGTH_GRP2; 8412 } else { 8413 hdrlen = MODE_HEADER_LENGTH; 8414 } 8415 8416 /* 8417 * Allocate memory for the retrieved mode page and its headers. Set 8418 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8419 * we get all of the mode sense data otherwise, the mode select 8420 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8421 */ 8422 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8423 sizeof (struct mode_cache_scsi3); 8424 8425 header = kmem_zalloc(buflen, KM_SLEEP); 8426 8427 /* Get the information from the device. */ 8428 if (un->un_f_cfg_is_atapi == TRUE) { 8429 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8430 MODEPAGE_CACHING, SD_PATH_DIRECT); 8431 } else { 8432 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8433 MODEPAGE_CACHING, SD_PATH_DIRECT); 8434 } 8435 if (rval != 0) { 8436 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8437 "sd_cache_control: Mode Sense Failed\n"); 8438 kmem_free(header, buflen); 8439 return (rval); 8440 } 8441 8442 /* 8443 * Determine size of Block Descriptors in order to locate 8444 * the mode page data. ATAPI devices return 0, SCSI devices 8445 * should return MODE_BLK_DESC_LENGTH. 8446 */ 8447 if (un->un_f_cfg_is_atapi == TRUE) { 8448 mhp = (struct mode_header_grp2 *)header; 8449 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8450 } else { 8451 bd_len = ((struct mode_header *)header)->bdesc_length; 8452 } 8453 8454 if (bd_len > MODE_BLK_DESC_LENGTH) { 8455 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8456 "sd_cache_control: Mode Sense returned invalid " 8457 "block descriptor length\n"); 8458 kmem_free(header, buflen); 8459 return (EIO); 8460 } 8461 8462 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8463 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8464 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8465 " caching page code mismatch %d\n", 8466 mode_caching_page->mode_page.code); 8467 kmem_free(header, buflen); 8468 return (EIO); 8469 } 8470 8471 /* Check the relevant bits on successful mode sense. */ 8472 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8473 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8474 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8475 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8476 8477 size_t sbuflen; 8478 uchar_t save_pg; 8479 8480 /* 8481 * Construct select buffer length based on the 8482 * length of the sense data returned. 8483 */ 8484 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8485 sizeof (struct mode_page) + 8486 (int)mode_caching_page->mode_page.length; 8487 8488 /* 8489 * Set the caching bits as requested. 8490 */ 8491 if (rcd_flag == SD_CACHE_ENABLE) 8492 mode_caching_page->rcd = 0; 8493 else if (rcd_flag == SD_CACHE_DISABLE) 8494 mode_caching_page->rcd = 1; 8495 8496 if (wce_flag == SD_CACHE_ENABLE) 8497 mode_caching_page->wce = 1; 8498 else if (wce_flag == SD_CACHE_DISABLE) 8499 mode_caching_page->wce = 0; 8500 8501 /* 8502 * Save the page if the mode sense says the 8503 * drive supports it. 8504 */ 8505 save_pg = mode_caching_page->mode_page.ps ? 8506 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8507 8508 /* Clear reserved bits before mode select. */ 8509 mode_caching_page->mode_page.ps = 0; 8510 8511 /* 8512 * Clear out mode header for mode select. 8513 * The rest of the retrieved page will be reused. 8514 */ 8515 bzero(header, hdrlen); 8516 8517 if (un->un_f_cfg_is_atapi == TRUE) { 8518 mhp = (struct mode_header_grp2 *)header; 8519 mhp->bdesc_length_hi = bd_len >> 8; 8520 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8521 } else { 8522 ((struct mode_header *)header)->bdesc_length = bd_len; 8523 } 8524 8525 /* Issue mode select to change the cache settings */ 8526 if (un->un_f_cfg_is_atapi == TRUE) { 8527 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8528 sbuflen, save_pg, SD_PATH_DIRECT); 8529 } else { 8530 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8531 sbuflen, save_pg, SD_PATH_DIRECT); 8532 } 8533 } 8534 8535 kmem_free(header, buflen); 8536 return (rval); 8537 } 8538 8539 8540 /* 8541 * Function: sd_get_write_cache_enabled() 8542 * 8543 * Description: This routine is the driver entry point for determining if 8544 * write caching is enabled. It examines the WCE (write cache 8545 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8546 * 8547 * Arguments: un - driver soft state (unit) structure 8548 * is_enabled - pointer to int where write cache enabled state 8549 * is returned (non-zero -> write cache enabled) 8550 * 8551 * 8552 * Return Code: EIO 8553 * code returned by sd_send_scsi_MODE_SENSE 8554 * 8555 * Context: Kernel Thread 8556 * 8557 * NOTE: If ioctl is added to disable write cache, this sequence should 8558 * be followed so that no locking is required for accesses to 8559 * un->un_f_write_cache_enabled: 8560 * do mode select to clear wce 8561 * do synchronize cache to flush cache 8562 * set un->un_f_write_cache_enabled = FALSE 8563 * 8564 * Conversely, an ioctl to enable the write cache should be done 8565 * in this order: 8566 * set un->un_f_write_cache_enabled = TRUE 8567 * do mode select to set wce 8568 */ 8569 8570 static int 8571 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8572 { 8573 struct mode_caching *mode_caching_page; 8574 uchar_t *header; 8575 size_t buflen; 8576 int hdrlen; 8577 int bd_len; 8578 int rval = 0; 8579 8580 ASSERT(un != NULL); 8581 ASSERT(is_enabled != NULL); 8582 8583 /* in case of error, flag as enabled */ 8584 *is_enabled = TRUE; 8585 8586 /* 8587 * Do a test unit ready, otherwise a mode sense may not work if this 8588 * is the first command sent to the device after boot. 8589 */ 8590 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8591 8592 if (un->un_f_cfg_is_atapi == TRUE) { 8593 hdrlen = MODE_HEADER_LENGTH_GRP2; 8594 } else { 8595 hdrlen = MODE_HEADER_LENGTH; 8596 } 8597 8598 /* 8599 * Allocate memory for the retrieved mode page and its headers. Set 8600 * a pointer to the page itself. 8601 */ 8602 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8603 header = kmem_zalloc(buflen, KM_SLEEP); 8604 8605 /* Get the information from the device. */ 8606 if (un->un_f_cfg_is_atapi == TRUE) { 8607 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8608 MODEPAGE_CACHING, SD_PATH_DIRECT); 8609 } else { 8610 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8611 MODEPAGE_CACHING, SD_PATH_DIRECT); 8612 } 8613 if (rval != 0) { 8614 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8615 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8616 kmem_free(header, buflen); 8617 return (rval); 8618 } 8619 8620 /* 8621 * Determine size of Block Descriptors in order to locate 8622 * the mode page data. ATAPI devices return 0, SCSI devices 8623 * should return MODE_BLK_DESC_LENGTH. 8624 */ 8625 if (un->un_f_cfg_is_atapi == TRUE) { 8626 struct mode_header_grp2 *mhp; 8627 mhp = (struct mode_header_grp2 *)header; 8628 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8629 } else { 8630 bd_len = ((struct mode_header *)header)->bdesc_length; 8631 } 8632 8633 if (bd_len > MODE_BLK_DESC_LENGTH) { 8634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8635 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8636 "block descriptor length\n"); 8637 kmem_free(header, buflen); 8638 return (EIO); 8639 } 8640 8641 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8642 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8643 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8644 " caching page code mismatch %d\n", 8645 mode_caching_page->mode_page.code); 8646 kmem_free(header, buflen); 8647 return (EIO); 8648 } 8649 *is_enabled = mode_caching_page->wce; 8650 8651 kmem_free(header, buflen); 8652 return (0); 8653 } 8654 8655 /* 8656 * Function: sd_get_nv_sup() 8657 * 8658 * Description: This routine is the driver entry point for 8659 * determining whether non-volatile cache is supported. This 8660 * determination process works as follows: 8661 * 8662 * 1. sd first queries sd.conf on whether 8663 * suppress_cache_flush bit is set for this device. 8664 * 8665 * 2. if not there, then queries the internal disk table. 8666 * 8667 * 3. if either sd.conf or internal disk table specifies 8668 * cache flush be suppressed, we don't bother checking 8669 * NV_SUP bit. 8670 * 8671 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8672 * the optional INQUIRY VPD page 0x86. If the device 8673 * supports VPD page 0x86, sd examines the NV_SUP 8674 * (non-volatile cache support) bit in the INQUIRY VPD page 8675 * 0x86: 8676 * o If NV_SUP bit is set, sd assumes the device has a 8677 * non-volatile cache and set the 8678 * un_f_sync_nv_supported to TRUE. 8679 * o Otherwise cache is not non-volatile, 8680 * un_f_sync_nv_supported is set to FALSE. 8681 * 8682 * Arguments: un - driver soft state (unit) structure 8683 * 8684 * Return Code: 8685 * 8686 * Context: Kernel Thread 8687 */ 8688 8689 static void 8690 sd_get_nv_sup(struct sd_lun *un) 8691 { 8692 int rval = 0; 8693 uchar_t *inq86 = NULL; 8694 size_t inq86_len = MAX_INQUIRY_SIZE; 8695 size_t inq86_resid = 0; 8696 struct dk_callback *dkc; 8697 8698 ASSERT(un != NULL); 8699 8700 mutex_enter(SD_MUTEX(un)); 8701 8702 /* 8703 * Be conservative on the device's support of 8704 * SYNC_NV bit: un_f_sync_nv_supported is 8705 * initialized to be false. 8706 */ 8707 un->un_f_sync_nv_supported = FALSE; 8708 8709 /* 8710 * If either sd.conf or internal disk table 8711 * specifies cache flush be suppressed, then 8712 * we don't bother checking NV_SUP bit. 8713 */ 8714 if (un->un_f_suppress_cache_flush == TRUE) { 8715 mutex_exit(SD_MUTEX(un)); 8716 return; 8717 } 8718 8719 if (sd_check_vpd_page_support(un) == 0 && 8720 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8721 mutex_exit(SD_MUTEX(un)); 8722 /* collect page 86 data if available */ 8723 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8724 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8725 0x01, 0x86, &inq86_resid); 8726 8727 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8728 SD_TRACE(SD_LOG_COMMON, un, 8729 "sd_get_nv_sup: \ 8730 successfully get VPD page: %x \ 8731 PAGE LENGTH: %x BYTE 6: %x\n", 8732 inq86[1], inq86[3], inq86[6]); 8733 8734 mutex_enter(SD_MUTEX(un)); 8735 /* 8736 * check the value of NV_SUP bit: only if the device 8737 * reports NV_SUP bit to be 1, the 8738 * un_f_sync_nv_supported bit will be set to true. 8739 */ 8740 if (inq86[6] & SD_VPD_NV_SUP) { 8741 un->un_f_sync_nv_supported = TRUE; 8742 } 8743 mutex_exit(SD_MUTEX(un)); 8744 } 8745 kmem_free(inq86, inq86_len); 8746 } else { 8747 mutex_exit(SD_MUTEX(un)); 8748 } 8749 8750 /* 8751 * Send a SYNC CACHE command to check whether 8752 * SYNC_NV bit is supported. This command should have 8753 * un_f_sync_nv_supported set to correct value. 8754 */ 8755 mutex_enter(SD_MUTEX(un)); 8756 if (un->un_f_sync_nv_supported) { 8757 mutex_exit(SD_MUTEX(un)); 8758 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8759 dkc->dkc_flag = FLUSH_VOLATILE; 8760 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8761 8762 /* 8763 * Send a TEST UNIT READY command to the device. This should 8764 * clear any outstanding UNIT ATTENTION that may be present. 8765 */ 8766 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8767 8768 kmem_free(dkc, sizeof (struct dk_callback)); 8769 } else { 8770 mutex_exit(SD_MUTEX(un)); 8771 } 8772 8773 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8774 un_f_suppress_cache_flush is set to %d\n", 8775 un->un_f_suppress_cache_flush); 8776 } 8777 8778 /* 8779 * Function: sd_make_device 8780 * 8781 * Description: Utility routine to return the Solaris device number from 8782 * the data in the device's dev_info structure. 8783 * 8784 * Return Code: The Solaris device number 8785 * 8786 * Context: Any 8787 */ 8788 8789 static dev_t 8790 sd_make_device(dev_info_t *devi) 8791 { 8792 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8793 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8794 } 8795 8796 8797 /* 8798 * Function: sd_pm_entry 8799 * 8800 * Description: Called at the start of a new command to manage power 8801 * and busy status of a device. This includes determining whether 8802 * the current power state of the device is sufficient for 8803 * performing the command or whether it must be changed. 8804 * The PM framework is notified appropriately. 8805 * Only with a return status of DDI_SUCCESS will the 8806 * component be busy to the framework. 8807 * 8808 * All callers of sd_pm_entry must check the return status 8809 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8810 * of DDI_FAILURE indicates the device failed to power up. 8811 * In this case un_pm_count has been adjusted so the result 8812 * on exit is still powered down, ie. count is less than 0. 8813 * Calling sd_pm_exit with this count value hits an ASSERT. 8814 * 8815 * Return Code: DDI_SUCCESS or DDI_FAILURE 8816 * 8817 * Context: Kernel thread context. 8818 */ 8819 8820 static int 8821 sd_pm_entry(struct sd_lun *un) 8822 { 8823 int return_status = DDI_SUCCESS; 8824 8825 ASSERT(!mutex_owned(SD_MUTEX(un))); 8826 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8827 8828 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8829 8830 if (un->un_f_pm_is_enabled == FALSE) { 8831 SD_TRACE(SD_LOG_IO_PM, un, 8832 "sd_pm_entry: exiting, PM not enabled\n"); 8833 return (return_status); 8834 } 8835 8836 /* 8837 * Just increment a counter if PM is enabled. On the transition from 8838 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8839 * the count with each IO and mark the device as idle when the count 8840 * hits 0. 8841 * 8842 * If the count is less than 0 the device is powered down. If a powered 8843 * down device is successfully powered up then the count must be 8844 * incremented to reflect the power up. Note that it'll get incremented 8845 * a second time to become busy. 8846 * 8847 * Because the following has the potential to change the device state 8848 * and must release the un_pm_mutex to do so, only one thread can be 8849 * allowed through at a time. 8850 */ 8851 8852 mutex_enter(&un->un_pm_mutex); 8853 while (un->un_pm_busy == TRUE) { 8854 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8855 } 8856 un->un_pm_busy = TRUE; 8857 8858 if (un->un_pm_count < 1) { 8859 8860 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8861 8862 /* 8863 * Indicate we are now busy so the framework won't attempt to 8864 * power down the device. This call will only fail if either 8865 * we passed a bad component number or the device has no 8866 * components. Neither of these should ever happen. 8867 */ 8868 mutex_exit(&un->un_pm_mutex); 8869 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8870 ASSERT(return_status == DDI_SUCCESS); 8871 8872 mutex_enter(&un->un_pm_mutex); 8873 8874 if (un->un_pm_count < 0) { 8875 mutex_exit(&un->un_pm_mutex); 8876 8877 SD_TRACE(SD_LOG_IO_PM, un, 8878 "sd_pm_entry: power up component\n"); 8879 8880 /* 8881 * pm_raise_power will cause sdpower to be called 8882 * which brings the device power level to the 8883 * desired state, ON in this case. If successful, 8884 * un_pm_count and un_power_level will be updated 8885 * appropriately. 8886 */ 8887 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8888 SD_SPINDLE_ON); 8889 8890 mutex_enter(&un->un_pm_mutex); 8891 8892 if (return_status != DDI_SUCCESS) { 8893 /* 8894 * Power up failed. 8895 * Idle the device and adjust the count 8896 * so the result on exit is that we're 8897 * still powered down, ie. count is less than 0. 8898 */ 8899 SD_TRACE(SD_LOG_IO_PM, un, 8900 "sd_pm_entry: power up failed," 8901 " idle the component\n"); 8902 8903 (void) pm_idle_component(SD_DEVINFO(un), 0); 8904 un->un_pm_count--; 8905 } else { 8906 /* 8907 * Device is powered up, verify the 8908 * count is non-negative. 8909 * This is debug only. 8910 */ 8911 ASSERT(un->un_pm_count == 0); 8912 } 8913 } 8914 8915 if (return_status == DDI_SUCCESS) { 8916 /* 8917 * For performance, now that the device has been tagged 8918 * as busy, and it's known to be powered up, update the 8919 * chain types to use jump tables that do not include 8920 * pm. This significantly lowers the overhead and 8921 * therefore improves performance. 8922 */ 8923 8924 mutex_exit(&un->un_pm_mutex); 8925 mutex_enter(SD_MUTEX(un)); 8926 SD_TRACE(SD_LOG_IO_PM, un, 8927 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8928 un->un_uscsi_chain_type); 8929 8930 if (un->un_f_non_devbsize_supported) { 8931 un->un_buf_chain_type = 8932 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8933 } else { 8934 un->un_buf_chain_type = 8935 SD_CHAIN_INFO_DISK_NO_PM; 8936 } 8937 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8938 8939 SD_TRACE(SD_LOG_IO_PM, un, 8940 " changed uscsi_chain_type to %d\n", 8941 un->un_uscsi_chain_type); 8942 mutex_exit(SD_MUTEX(un)); 8943 mutex_enter(&un->un_pm_mutex); 8944 8945 if (un->un_pm_idle_timeid == NULL) { 8946 /* 300 ms. */ 8947 un->un_pm_idle_timeid = 8948 timeout(sd_pm_idletimeout_handler, un, 8949 (drv_usectohz((clock_t)300000))); 8950 /* 8951 * Include an extra call to busy which keeps the 8952 * device busy with-respect-to the PM layer 8953 * until the timer fires, at which time it'll 8954 * get the extra idle call. 8955 */ 8956 (void) pm_busy_component(SD_DEVINFO(un), 0); 8957 } 8958 } 8959 } 8960 un->un_pm_busy = FALSE; 8961 /* Next... */ 8962 cv_signal(&un->un_pm_busy_cv); 8963 8964 un->un_pm_count++; 8965 8966 SD_TRACE(SD_LOG_IO_PM, un, 8967 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8968 8969 mutex_exit(&un->un_pm_mutex); 8970 8971 return (return_status); 8972 } 8973 8974 8975 /* 8976 * Function: sd_pm_exit 8977 * 8978 * Description: Called at the completion of a command to manage busy 8979 * status for the device. If the device becomes idle the 8980 * PM framework is notified. 8981 * 8982 * Context: Kernel thread context 8983 */ 8984 8985 static void 8986 sd_pm_exit(struct sd_lun *un) 8987 { 8988 ASSERT(!mutex_owned(SD_MUTEX(un))); 8989 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8990 8991 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8992 8993 /* 8994 * After attach the following flag is only read, so don't 8995 * take the penalty of acquiring a mutex for it. 8996 */ 8997 if (un->un_f_pm_is_enabled == TRUE) { 8998 8999 mutex_enter(&un->un_pm_mutex); 9000 un->un_pm_count--; 9001 9002 SD_TRACE(SD_LOG_IO_PM, un, 9003 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9004 9005 ASSERT(un->un_pm_count >= 0); 9006 if (un->un_pm_count == 0) { 9007 mutex_exit(&un->un_pm_mutex); 9008 9009 SD_TRACE(SD_LOG_IO_PM, un, 9010 "sd_pm_exit: idle component\n"); 9011 9012 (void) pm_idle_component(SD_DEVINFO(un), 0); 9013 9014 } else { 9015 mutex_exit(&un->un_pm_mutex); 9016 } 9017 } 9018 9019 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9020 } 9021 9022 9023 /* 9024 * Function: sdopen 9025 * 9026 * Description: Driver's open(9e) entry point function. 9027 * 9028 * Arguments: dev_i - pointer to device number 9029 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9030 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9031 * cred_p - user credential pointer 9032 * 9033 * Return Code: EINVAL 9034 * ENXIO 9035 * EIO 9036 * EROFS 9037 * EBUSY 9038 * 9039 * Context: Kernel thread context 9040 */ 9041 /* ARGSUSED */ 9042 static int 9043 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9044 { 9045 struct sd_lun *un; 9046 int nodelay; 9047 int part; 9048 uint64_t partmask; 9049 int instance; 9050 dev_t dev; 9051 int rval = EIO; 9052 diskaddr_t nblks = 0; 9053 diskaddr_t label_cap; 9054 9055 /* Validate the open type */ 9056 if (otyp >= OTYPCNT) { 9057 return (EINVAL); 9058 } 9059 9060 dev = *dev_p; 9061 instance = SDUNIT(dev); 9062 mutex_enter(&sd_detach_mutex); 9063 9064 /* 9065 * Fail the open if there is no softstate for the instance, or 9066 * if another thread somewhere is trying to detach the instance. 9067 */ 9068 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9069 (un->un_detach_count != 0)) { 9070 mutex_exit(&sd_detach_mutex); 9071 /* 9072 * The probe cache only needs to be cleared when open (9e) fails 9073 * with ENXIO (4238046). 9074 */ 9075 /* 9076 * un-conditionally clearing probe cache is ok with 9077 * separate sd/ssd binaries 9078 * x86 platform can be an issue with both parallel 9079 * and fibre in 1 binary 9080 */ 9081 sd_scsi_clear_probe_cache(); 9082 return (ENXIO); 9083 } 9084 9085 /* 9086 * The un_layer_count is to prevent another thread in specfs from 9087 * trying to detach the instance, which can happen when we are 9088 * called from a higher-layer driver instead of thru specfs. 9089 * This will not be needed when DDI provides a layered driver 9090 * interface that allows specfs to know that an instance is in 9091 * use by a layered driver & should not be detached. 9092 * 9093 * Note: the semantics for layered driver opens are exactly one 9094 * close for every open. 9095 */ 9096 if (otyp == OTYP_LYR) { 9097 un->un_layer_count++; 9098 } 9099 9100 /* 9101 * Keep a count of the current # of opens in progress. This is because 9102 * some layered drivers try to call us as a regular open. This can 9103 * cause problems that we cannot prevent, however by keeping this count 9104 * we can at least keep our open and detach routines from racing against 9105 * each other under such conditions. 9106 */ 9107 un->un_opens_in_progress++; 9108 mutex_exit(&sd_detach_mutex); 9109 9110 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9111 part = SDPART(dev); 9112 partmask = 1 << part; 9113 9114 /* 9115 * We use a semaphore here in order to serialize 9116 * open and close requests on the device. 9117 */ 9118 sema_p(&un->un_semoclose); 9119 9120 mutex_enter(SD_MUTEX(un)); 9121 9122 /* 9123 * All device accesses go thru sdstrategy() where we check 9124 * on suspend status but there could be a scsi_poll command, 9125 * which bypasses sdstrategy(), so we need to check pm 9126 * status. 9127 */ 9128 9129 if (!nodelay) { 9130 while ((un->un_state == SD_STATE_SUSPENDED) || 9131 (un->un_state == SD_STATE_PM_CHANGING)) { 9132 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9133 } 9134 9135 mutex_exit(SD_MUTEX(un)); 9136 if (sd_pm_entry(un) != DDI_SUCCESS) { 9137 rval = EIO; 9138 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9139 "sdopen: sd_pm_entry failed\n"); 9140 goto open_failed_with_pm; 9141 } 9142 mutex_enter(SD_MUTEX(un)); 9143 } 9144 9145 /* check for previous exclusive open */ 9146 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9147 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9148 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9149 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9150 9151 if (un->un_exclopen & (partmask)) { 9152 goto excl_open_fail; 9153 } 9154 9155 if (flag & FEXCL) { 9156 int i; 9157 if (un->un_ocmap.lyropen[part]) { 9158 goto excl_open_fail; 9159 } 9160 for (i = 0; i < (OTYPCNT - 1); i++) { 9161 if (un->un_ocmap.regopen[i] & (partmask)) { 9162 goto excl_open_fail; 9163 } 9164 } 9165 } 9166 9167 /* 9168 * Check the write permission if this is a removable media device, 9169 * NDELAY has not been set, and writable permission is requested. 9170 * 9171 * Note: If NDELAY was set and this is write-protected media the WRITE 9172 * attempt will fail with EIO as part of the I/O processing. This is a 9173 * more permissive implementation that allows the open to succeed and 9174 * WRITE attempts to fail when appropriate. 9175 */ 9176 if (un->un_f_chk_wp_open) { 9177 if ((flag & FWRITE) && (!nodelay)) { 9178 mutex_exit(SD_MUTEX(un)); 9179 /* 9180 * Defer the check for write permission on writable 9181 * DVD drive till sdstrategy and will not fail open even 9182 * if FWRITE is set as the device can be writable 9183 * depending upon the media and the media can change 9184 * after the call to open(). 9185 */ 9186 if (un->un_f_dvdram_writable_device == FALSE) { 9187 if (ISCD(un) || sr_check_wp(dev)) { 9188 rval = EROFS; 9189 mutex_enter(SD_MUTEX(un)); 9190 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9191 "write to cd or write protected media\n"); 9192 goto open_fail; 9193 } 9194 } 9195 mutex_enter(SD_MUTEX(un)); 9196 } 9197 } 9198 9199 /* 9200 * If opening in NDELAY/NONBLOCK mode, just return. 9201 * Check if disk is ready and has a valid geometry later. 9202 */ 9203 if (!nodelay) { 9204 mutex_exit(SD_MUTEX(un)); 9205 rval = sd_ready_and_valid(un); 9206 mutex_enter(SD_MUTEX(un)); 9207 /* 9208 * Fail if device is not ready or if the number of disk 9209 * blocks is zero or negative for non CD devices. 9210 */ 9211 9212 nblks = 0; 9213 9214 if (rval == SD_READY_VALID && (!ISCD(un))) { 9215 /* if cmlb_partinfo fails, nblks remains 0 */ 9216 mutex_exit(SD_MUTEX(un)); 9217 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9218 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9219 mutex_enter(SD_MUTEX(un)); 9220 } 9221 9222 if ((rval != SD_READY_VALID) || 9223 (!ISCD(un) && nblks <= 0)) { 9224 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9225 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9226 "device not ready or invalid disk block value\n"); 9227 goto open_fail; 9228 } 9229 #if defined(__i386) || defined(__amd64) 9230 } else { 9231 uchar_t *cp; 9232 /* 9233 * x86 requires special nodelay handling, so that p0 is 9234 * always defined and accessible. 9235 * Invalidate geometry only if device is not already open. 9236 */ 9237 cp = &un->un_ocmap.chkd[0]; 9238 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9239 if (*cp != (uchar_t)0) { 9240 break; 9241 } 9242 cp++; 9243 } 9244 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9245 mutex_exit(SD_MUTEX(un)); 9246 cmlb_invalidate(un->un_cmlbhandle, 9247 (void *)SD_PATH_DIRECT); 9248 mutex_enter(SD_MUTEX(un)); 9249 } 9250 9251 #endif 9252 } 9253 9254 if (otyp == OTYP_LYR) { 9255 un->un_ocmap.lyropen[part]++; 9256 } else { 9257 un->un_ocmap.regopen[otyp] |= partmask; 9258 } 9259 9260 /* Set up open and exclusive open flags */ 9261 if (flag & FEXCL) { 9262 un->un_exclopen |= (partmask); 9263 } 9264 9265 /* 9266 * If the lun is EFI labeled and lun capacity is greater than the 9267 * capacity contained in the label, log a sys-event to notify the 9268 * interested module. 9269 * To avoid an infinite loop of logging sys-event, we only log the 9270 * event when the lun is not opened in NDELAY mode. The event handler 9271 * should open the lun in NDELAY mode. 9272 */ 9273 if (!(flag & FNDELAY)) { 9274 mutex_exit(SD_MUTEX(un)); 9275 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9276 (void*)SD_PATH_DIRECT) == 0) { 9277 mutex_enter(SD_MUTEX(un)); 9278 if (un->un_f_blockcount_is_valid && 9279 un->un_blockcount > label_cap) { 9280 mutex_exit(SD_MUTEX(un)); 9281 sd_log_lun_expansion_event(un, 9282 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9283 mutex_enter(SD_MUTEX(un)); 9284 } 9285 } else { 9286 mutex_enter(SD_MUTEX(un)); 9287 } 9288 } 9289 9290 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9291 "open of part %d type %d\n", part, otyp); 9292 9293 mutex_exit(SD_MUTEX(un)); 9294 if (!nodelay) { 9295 sd_pm_exit(un); 9296 } 9297 9298 sema_v(&un->un_semoclose); 9299 9300 mutex_enter(&sd_detach_mutex); 9301 un->un_opens_in_progress--; 9302 mutex_exit(&sd_detach_mutex); 9303 9304 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9305 return (DDI_SUCCESS); 9306 9307 excl_open_fail: 9308 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9309 rval = EBUSY; 9310 9311 open_fail: 9312 mutex_exit(SD_MUTEX(un)); 9313 9314 /* 9315 * On a failed open we must exit the pm management. 9316 */ 9317 if (!nodelay) { 9318 sd_pm_exit(un); 9319 } 9320 open_failed_with_pm: 9321 sema_v(&un->un_semoclose); 9322 9323 mutex_enter(&sd_detach_mutex); 9324 un->un_opens_in_progress--; 9325 if (otyp == OTYP_LYR) { 9326 un->un_layer_count--; 9327 } 9328 mutex_exit(&sd_detach_mutex); 9329 9330 return (rval); 9331 } 9332 9333 9334 /* 9335 * Function: sdclose 9336 * 9337 * Description: Driver's close(9e) entry point function. 9338 * 9339 * Arguments: dev - device number 9340 * flag - file status flag, informational only 9341 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9342 * cred_p - user credential pointer 9343 * 9344 * Return Code: ENXIO 9345 * 9346 * Context: Kernel thread context 9347 */ 9348 /* ARGSUSED */ 9349 static int 9350 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9351 { 9352 struct sd_lun *un; 9353 uchar_t *cp; 9354 int part; 9355 int nodelay; 9356 int rval = 0; 9357 9358 /* Validate the open type */ 9359 if (otyp >= OTYPCNT) { 9360 return (ENXIO); 9361 } 9362 9363 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9364 return (ENXIO); 9365 } 9366 9367 part = SDPART(dev); 9368 nodelay = flag & (FNDELAY | FNONBLOCK); 9369 9370 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9371 "sdclose: close of part %d type %d\n", part, otyp); 9372 9373 /* 9374 * We use a semaphore here in order to serialize 9375 * open and close requests on the device. 9376 */ 9377 sema_p(&un->un_semoclose); 9378 9379 mutex_enter(SD_MUTEX(un)); 9380 9381 /* Don't proceed if power is being changed. */ 9382 while (un->un_state == SD_STATE_PM_CHANGING) { 9383 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9384 } 9385 9386 if (un->un_exclopen & (1 << part)) { 9387 un->un_exclopen &= ~(1 << part); 9388 } 9389 9390 /* Update the open partition map */ 9391 if (otyp == OTYP_LYR) { 9392 un->un_ocmap.lyropen[part] -= 1; 9393 } else { 9394 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9395 } 9396 9397 cp = &un->un_ocmap.chkd[0]; 9398 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9399 if (*cp != NULL) { 9400 break; 9401 } 9402 cp++; 9403 } 9404 9405 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9406 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9407 9408 /* 9409 * We avoid persistance upon the last close, and set 9410 * the throttle back to the maximum. 9411 */ 9412 un->un_throttle = un->un_saved_throttle; 9413 9414 if (un->un_state == SD_STATE_OFFLINE) { 9415 if (un->un_f_is_fibre == FALSE) { 9416 scsi_log(SD_DEVINFO(un), sd_label, 9417 CE_WARN, "offline\n"); 9418 } 9419 mutex_exit(SD_MUTEX(un)); 9420 cmlb_invalidate(un->un_cmlbhandle, 9421 (void *)SD_PATH_DIRECT); 9422 mutex_enter(SD_MUTEX(un)); 9423 9424 } else { 9425 /* 9426 * Flush any outstanding writes in NVRAM cache. 9427 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9428 * cmd, it may not work for non-Pluto devices. 9429 * SYNCHRONIZE CACHE is not required for removables, 9430 * except DVD-RAM drives. 9431 * 9432 * Also note: because SYNCHRONIZE CACHE is currently 9433 * the only command issued here that requires the 9434 * drive be powered up, only do the power up before 9435 * sending the Sync Cache command. If additional 9436 * commands are added which require a powered up 9437 * drive, the following sequence may have to change. 9438 * 9439 * And finally, note that parallel SCSI on SPARC 9440 * only issues a Sync Cache to DVD-RAM, a newly 9441 * supported device. 9442 */ 9443 #if defined(__i386) || defined(__amd64) 9444 if (un->un_f_sync_cache_supported || 9445 un->un_f_dvdram_writable_device == TRUE) { 9446 #else 9447 if (un->un_f_dvdram_writable_device == TRUE) { 9448 #endif 9449 mutex_exit(SD_MUTEX(un)); 9450 if (sd_pm_entry(un) == DDI_SUCCESS) { 9451 rval = 9452 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9453 NULL); 9454 /* ignore error if not supported */ 9455 if (rval == ENOTSUP) { 9456 rval = 0; 9457 } else if (rval != 0) { 9458 rval = EIO; 9459 } 9460 sd_pm_exit(un); 9461 } else { 9462 rval = EIO; 9463 } 9464 mutex_enter(SD_MUTEX(un)); 9465 } 9466 9467 /* 9468 * For devices which supports DOOR_LOCK, send an ALLOW 9469 * MEDIA REMOVAL command, but don't get upset if it 9470 * fails. We need to raise the power of the drive before 9471 * we can call sd_send_scsi_DOORLOCK() 9472 */ 9473 if (un->un_f_doorlock_supported) { 9474 mutex_exit(SD_MUTEX(un)); 9475 if (sd_pm_entry(un) == DDI_SUCCESS) { 9476 rval = sd_send_scsi_DOORLOCK(un, 9477 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9478 9479 sd_pm_exit(un); 9480 if (ISCD(un) && (rval != 0) && 9481 (nodelay != 0)) { 9482 rval = ENXIO; 9483 } 9484 } else { 9485 rval = EIO; 9486 } 9487 mutex_enter(SD_MUTEX(un)); 9488 } 9489 9490 /* 9491 * If a device has removable media, invalidate all 9492 * parameters related to media, such as geometry, 9493 * blocksize, and blockcount. 9494 */ 9495 if (un->un_f_has_removable_media) { 9496 sr_ejected(un); 9497 } 9498 9499 /* 9500 * Destroy the cache (if it exists) which was 9501 * allocated for the write maps since this is 9502 * the last close for this media. 9503 */ 9504 if (un->un_wm_cache) { 9505 /* 9506 * Check if there are pending commands. 9507 * and if there are give a warning and 9508 * do not destroy the cache. 9509 */ 9510 if (un->un_ncmds_in_driver > 0) { 9511 scsi_log(SD_DEVINFO(un), 9512 sd_label, CE_WARN, 9513 "Unable to clean up memory " 9514 "because of pending I/O\n"); 9515 } else { 9516 kmem_cache_destroy( 9517 un->un_wm_cache); 9518 un->un_wm_cache = NULL; 9519 } 9520 } 9521 } 9522 } 9523 9524 mutex_exit(SD_MUTEX(un)); 9525 sema_v(&un->un_semoclose); 9526 9527 if (otyp == OTYP_LYR) { 9528 mutex_enter(&sd_detach_mutex); 9529 /* 9530 * The detach routine may run when the layer count 9531 * drops to zero. 9532 */ 9533 un->un_layer_count--; 9534 mutex_exit(&sd_detach_mutex); 9535 } 9536 9537 return (rval); 9538 } 9539 9540 9541 /* 9542 * Function: sd_ready_and_valid 9543 * 9544 * Description: Test if device is ready and has a valid geometry. 9545 * 9546 * Arguments: dev - device number 9547 * un - driver soft state (unit) structure 9548 * 9549 * Return Code: SD_READY_VALID ready and valid label 9550 * SD_NOT_READY_VALID not ready, no label 9551 * SD_RESERVED_BY_OTHERS reservation conflict 9552 * 9553 * Context: Never called at interrupt context. 9554 */ 9555 9556 static int 9557 sd_ready_and_valid(struct sd_lun *un) 9558 { 9559 struct sd_errstats *stp; 9560 uint64_t capacity; 9561 uint_t lbasize; 9562 int rval = SD_READY_VALID; 9563 char name_str[48]; 9564 int is_valid; 9565 9566 ASSERT(un != NULL); 9567 ASSERT(!mutex_owned(SD_MUTEX(un))); 9568 9569 mutex_enter(SD_MUTEX(un)); 9570 /* 9571 * If a device has removable media, we must check if media is 9572 * ready when checking if this device is ready and valid. 9573 */ 9574 if (un->un_f_has_removable_media) { 9575 mutex_exit(SD_MUTEX(un)); 9576 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9577 rval = SD_NOT_READY_VALID; 9578 mutex_enter(SD_MUTEX(un)); 9579 goto done; 9580 } 9581 9582 is_valid = SD_IS_VALID_LABEL(un); 9583 mutex_enter(SD_MUTEX(un)); 9584 if (!is_valid || 9585 (un->un_f_blockcount_is_valid == FALSE) || 9586 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9587 9588 /* capacity has to be read every open. */ 9589 mutex_exit(SD_MUTEX(un)); 9590 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9591 &lbasize, SD_PATH_DIRECT) != 0) { 9592 cmlb_invalidate(un->un_cmlbhandle, 9593 (void *)SD_PATH_DIRECT); 9594 mutex_enter(SD_MUTEX(un)); 9595 rval = SD_NOT_READY_VALID; 9596 goto done; 9597 } else { 9598 mutex_enter(SD_MUTEX(un)); 9599 sd_update_block_info(un, lbasize, capacity); 9600 } 9601 } 9602 9603 /* 9604 * Check if the media in the device is writable or not. 9605 */ 9606 if (!is_valid && ISCD(un)) { 9607 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9608 } 9609 9610 } else { 9611 /* 9612 * Do a test unit ready to clear any unit attention from non-cd 9613 * devices. 9614 */ 9615 mutex_exit(SD_MUTEX(un)); 9616 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9617 mutex_enter(SD_MUTEX(un)); 9618 } 9619 9620 9621 /* 9622 * If this is a non 512 block device, allocate space for 9623 * the wmap cache. This is being done here since every time 9624 * a media is changed this routine will be called and the 9625 * block size is a function of media rather than device. 9626 */ 9627 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9628 if (!(un->un_wm_cache)) { 9629 (void) snprintf(name_str, sizeof (name_str), 9630 "%s%d_cache", 9631 ddi_driver_name(SD_DEVINFO(un)), 9632 ddi_get_instance(SD_DEVINFO(un))); 9633 un->un_wm_cache = kmem_cache_create( 9634 name_str, sizeof (struct sd_w_map), 9635 8, sd_wm_cache_constructor, 9636 sd_wm_cache_destructor, NULL, 9637 (void *)un, NULL, 0); 9638 if (!(un->un_wm_cache)) { 9639 rval = ENOMEM; 9640 goto done; 9641 } 9642 } 9643 } 9644 9645 if (un->un_state == SD_STATE_NORMAL) { 9646 /* 9647 * If the target is not yet ready here (defined by a TUR 9648 * failure), invalidate the geometry and print an 'offline' 9649 * message. This is a legacy message, as the state of the 9650 * target is not actually changed to SD_STATE_OFFLINE. 9651 * 9652 * If the TUR fails for EACCES (Reservation Conflict), 9653 * SD_RESERVED_BY_OTHERS will be returned to indicate 9654 * reservation conflict. If the TUR fails for other 9655 * reasons, SD_NOT_READY_VALID will be returned. 9656 */ 9657 int err; 9658 9659 mutex_exit(SD_MUTEX(un)); 9660 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9661 mutex_enter(SD_MUTEX(un)); 9662 9663 if (err != 0) { 9664 mutex_exit(SD_MUTEX(un)); 9665 cmlb_invalidate(un->un_cmlbhandle, 9666 (void *)SD_PATH_DIRECT); 9667 mutex_enter(SD_MUTEX(un)); 9668 if (err == EACCES) { 9669 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9670 "reservation conflict\n"); 9671 rval = SD_RESERVED_BY_OTHERS; 9672 } else { 9673 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9674 "drive offline\n"); 9675 rval = SD_NOT_READY_VALID; 9676 } 9677 goto done; 9678 } 9679 } 9680 9681 if (un->un_f_format_in_progress == FALSE) { 9682 mutex_exit(SD_MUTEX(un)); 9683 if (cmlb_validate(un->un_cmlbhandle, 0, 9684 (void *)SD_PATH_DIRECT) != 0) { 9685 rval = SD_NOT_READY_VALID; 9686 mutex_enter(SD_MUTEX(un)); 9687 goto done; 9688 } 9689 if (un->un_f_pkstats_enabled) { 9690 sd_set_pstats(un); 9691 SD_TRACE(SD_LOG_IO_PARTITION, un, 9692 "sd_ready_and_valid: un:0x%p pstats created and " 9693 "set\n", un); 9694 } 9695 mutex_enter(SD_MUTEX(un)); 9696 } 9697 9698 /* 9699 * If this device supports DOOR_LOCK command, try and send 9700 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9701 * if it fails. For a CD, however, it is an error 9702 */ 9703 if (un->un_f_doorlock_supported) { 9704 mutex_exit(SD_MUTEX(un)); 9705 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9706 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9707 rval = SD_NOT_READY_VALID; 9708 mutex_enter(SD_MUTEX(un)); 9709 goto done; 9710 } 9711 mutex_enter(SD_MUTEX(un)); 9712 } 9713 9714 /* The state has changed, inform the media watch routines */ 9715 un->un_mediastate = DKIO_INSERTED; 9716 cv_broadcast(&un->un_state_cv); 9717 rval = SD_READY_VALID; 9718 9719 done: 9720 9721 /* 9722 * Initialize the capacity kstat value, if no media previously 9723 * (capacity kstat is 0) and a media has been inserted 9724 * (un_blockcount > 0). 9725 */ 9726 if (un->un_errstats != NULL) { 9727 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9728 if ((stp->sd_capacity.value.ui64 == 0) && 9729 (un->un_f_blockcount_is_valid == TRUE)) { 9730 stp->sd_capacity.value.ui64 = 9731 (uint64_t)((uint64_t)un->un_blockcount * 9732 un->un_sys_blocksize); 9733 } 9734 } 9735 9736 mutex_exit(SD_MUTEX(un)); 9737 return (rval); 9738 } 9739 9740 9741 /* 9742 * Function: sdmin 9743 * 9744 * Description: Routine to limit the size of a data transfer. Used in 9745 * conjunction with physio(9F). 9746 * 9747 * Arguments: bp - pointer to the indicated buf(9S) struct. 9748 * 9749 * Context: Kernel thread context. 9750 */ 9751 9752 static void 9753 sdmin(struct buf *bp) 9754 { 9755 struct sd_lun *un; 9756 int instance; 9757 9758 instance = SDUNIT(bp->b_edev); 9759 9760 un = ddi_get_soft_state(sd_state, instance); 9761 ASSERT(un != NULL); 9762 9763 if (bp->b_bcount > un->un_max_xfer_size) { 9764 bp->b_bcount = un->un_max_xfer_size; 9765 } 9766 } 9767 9768 9769 /* 9770 * Function: sdread 9771 * 9772 * Description: Driver's read(9e) entry point function. 9773 * 9774 * Arguments: dev - device number 9775 * uio - structure pointer describing where data is to be stored 9776 * in user's space 9777 * cred_p - user credential pointer 9778 * 9779 * Return Code: ENXIO 9780 * EIO 9781 * EINVAL 9782 * value returned by physio 9783 * 9784 * Context: Kernel thread context. 9785 */ 9786 /* ARGSUSED */ 9787 static int 9788 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9789 { 9790 struct sd_lun *un = NULL; 9791 int secmask; 9792 int err; 9793 9794 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9795 return (ENXIO); 9796 } 9797 9798 ASSERT(!mutex_owned(SD_MUTEX(un))); 9799 9800 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9801 mutex_enter(SD_MUTEX(un)); 9802 /* 9803 * Because the call to sd_ready_and_valid will issue I/O we 9804 * must wait here if either the device is suspended or 9805 * if it's power level is changing. 9806 */ 9807 while ((un->un_state == SD_STATE_SUSPENDED) || 9808 (un->un_state == SD_STATE_PM_CHANGING)) { 9809 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9810 } 9811 un->un_ncmds_in_driver++; 9812 mutex_exit(SD_MUTEX(un)); 9813 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9814 mutex_enter(SD_MUTEX(un)); 9815 un->un_ncmds_in_driver--; 9816 ASSERT(un->un_ncmds_in_driver >= 0); 9817 mutex_exit(SD_MUTEX(un)); 9818 return (EIO); 9819 } 9820 mutex_enter(SD_MUTEX(un)); 9821 un->un_ncmds_in_driver--; 9822 ASSERT(un->un_ncmds_in_driver >= 0); 9823 mutex_exit(SD_MUTEX(un)); 9824 } 9825 9826 /* 9827 * Read requests are restricted to multiples of the system block size. 9828 */ 9829 secmask = un->un_sys_blocksize - 1; 9830 9831 if (uio->uio_loffset & ((offset_t)(secmask))) { 9832 SD_ERROR(SD_LOG_READ_WRITE, un, 9833 "sdread: file offset not modulo %d\n", 9834 un->un_sys_blocksize); 9835 err = EINVAL; 9836 } else if (uio->uio_iov->iov_len & (secmask)) { 9837 SD_ERROR(SD_LOG_READ_WRITE, un, 9838 "sdread: transfer length not modulo %d\n", 9839 un->un_sys_blocksize); 9840 err = EINVAL; 9841 } else { 9842 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9843 } 9844 return (err); 9845 } 9846 9847 9848 /* 9849 * Function: sdwrite 9850 * 9851 * Description: Driver's write(9e) entry point function. 9852 * 9853 * Arguments: dev - device number 9854 * uio - structure pointer describing where data is stored in 9855 * user's space 9856 * cred_p - user credential pointer 9857 * 9858 * Return Code: ENXIO 9859 * EIO 9860 * EINVAL 9861 * value returned by physio 9862 * 9863 * Context: Kernel thread context. 9864 */ 9865 /* ARGSUSED */ 9866 static int 9867 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9868 { 9869 struct sd_lun *un = NULL; 9870 int secmask; 9871 int err; 9872 9873 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9874 return (ENXIO); 9875 } 9876 9877 ASSERT(!mutex_owned(SD_MUTEX(un))); 9878 9879 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9880 mutex_enter(SD_MUTEX(un)); 9881 /* 9882 * Because the call to sd_ready_and_valid will issue I/O we 9883 * must wait here if either the device is suspended or 9884 * if it's power level is changing. 9885 */ 9886 while ((un->un_state == SD_STATE_SUSPENDED) || 9887 (un->un_state == SD_STATE_PM_CHANGING)) { 9888 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9889 } 9890 un->un_ncmds_in_driver++; 9891 mutex_exit(SD_MUTEX(un)); 9892 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9893 mutex_enter(SD_MUTEX(un)); 9894 un->un_ncmds_in_driver--; 9895 ASSERT(un->un_ncmds_in_driver >= 0); 9896 mutex_exit(SD_MUTEX(un)); 9897 return (EIO); 9898 } 9899 mutex_enter(SD_MUTEX(un)); 9900 un->un_ncmds_in_driver--; 9901 ASSERT(un->un_ncmds_in_driver >= 0); 9902 mutex_exit(SD_MUTEX(un)); 9903 } 9904 9905 /* 9906 * Write requests are restricted to multiples of the system block size. 9907 */ 9908 secmask = un->un_sys_blocksize - 1; 9909 9910 if (uio->uio_loffset & ((offset_t)(secmask))) { 9911 SD_ERROR(SD_LOG_READ_WRITE, un, 9912 "sdwrite: file offset not modulo %d\n", 9913 un->un_sys_blocksize); 9914 err = EINVAL; 9915 } else if (uio->uio_iov->iov_len & (secmask)) { 9916 SD_ERROR(SD_LOG_READ_WRITE, un, 9917 "sdwrite: transfer length not modulo %d\n", 9918 un->un_sys_blocksize); 9919 err = EINVAL; 9920 } else { 9921 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9922 } 9923 return (err); 9924 } 9925 9926 9927 /* 9928 * Function: sdaread 9929 * 9930 * Description: Driver's aread(9e) entry point function. 9931 * 9932 * Arguments: dev - device number 9933 * aio - structure pointer describing where data is to be stored 9934 * cred_p - user credential pointer 9935 * 9936 * Return Code: ENXIO 9937 * EIO 9938 * EINVAL 9939 * value returned by aphysio 9940 * 9941 * Context: Kernel thread context. 9942 */ 9943 /* ARGSUSED */ 9944 static int 9945 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9946 { 9947 struct sd_lun *un = NULL; 9948 struct uio *uio = aio->aio_uio; 9949 int secmask; 9950 int err; 9951 9952 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9953 return (ENXIO); 9954 } 9955 9956 ASSERT(!mutex_owned(SD_MUTEX(un))); 9957 9958 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9959 mutex_enter(SD_MUTEX(un)); 9960 /* 9961 * Because the call to sd_ready_and_valid will issue I/O we 9962 * must wait here if either the device is suspended or 9963 * if it's power level is changing. 9964 */ 9965 while ((un->un_state == SD_STATE_SUSPENDED) || 9966 (un->un_state == SD_STATE_PM_CHANGING)) { 9967 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9968 } 9969 un->un_ncmds_in_driver++; 9970 mutex_exit(SD_MUTEX(un)); 9971 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9972 mutex_enter(SD_MUTEX(un)); 9973 un->un_ncmds_in_driver--; 9974 ASSERT(un->un_ncmds_in_driver >= 0); 9975 mutex_exit(SD_MUTEX(un)); 9976 return (EIO); 9977 } 9978 mutex_enter(SD_MUTEX(un)); 9979 un->un_ncmds_in_driver--; 9980 ASSERT(un->un_ncmds_in_driver >= 0); 9981 mutex_exit(SD_MUTEX(un)); 9982 } 9983 9984 /* 9985 * Read requests are restricted to multiples of the system block size. 9986 */ 9987 secmask = un->un_sys_blocksize - 1; 9988 9989 if (uio->uio_loffset & ((offset_t)(secmask))) { 9990 SD_ERROR(SD_LOG_READ_WRITE, un, 9991 "sdaread: file offset not modulo %d\n", 9992 un->un_sys_blocksize); 9993 err = EINVAL; 9994 } else if (uio->uio_iov->iov_len & (secmask)) { 9995 SD_ERROR(SD_LOG_READ_WRITE, un, 9996 "sdaread: transfer length not modulo %d\n", 9997 un->un_sys_blocksize); 9998 err = EINVAL; 9999 } else { 10000 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10001 } 10002 return (err); 10003 } 10004 10005 10006 /* 10007 * Function: sdawrite 10008 * 10009 * Description: Driver's awrite(9e) entry point function. 10010 * 10011 * Arguments: dev - device number 10012 * aio - structure pointer describing where data is stored 10013 * cred_p - user credential pointer 10014 * 10015 * Return Code: ENXIO 10016 * EIO 10017 * EINVAL 10018 * value returned by aphysio 10019 * 10020 * Context: Kernel thread context. 10021 */ 10022 /* ARGSUSED */ 10023 static int 10024 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10025 { 10026 struct sd_lun *un = NULL; 10027 struct uio *uio = aio->aio_uio; 10028 int secmask; 10029 int err; 10030 10031 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10032 return (ENXIO); 10033 } 10034 10035 ASSERT(!mutex_owned(SD_MUTEX(un))); 10036 10037 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10038 mutex_enter(SD_MUTEX(un)); 10039 /* 10040 * Because the call to sd_ready_and_valid will issue I/O we 10041 * must wait here if either the device is suspended or 10042 * if it's power level is changing. 10043 */ 10044 while ((un->un_state == SD_STATE_SUSPENDED) || 10045 (un->un_state == SD_STATE_PM_CHANGING)) { 10046 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10047 } 10048 un->un_ncmds_in_driver++; 10049 mutex_exit(SD_MUTEX(un)); 10050 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10051 mutex_enter(SD_MUTEX(un)); 10052 un->un_ncmds_in_driver--; 10053 ASSERT(un->un_ncmds_in_driver >= 0); 10054 mutex_exit(SD_MUTEX(un)); 10055 return (EIO); 10056 } 10057 mutex_enter(SD_MUTEX(un)); 10058 un->un_ncmds_in_driver--; 10059 ASSERT(un->un_ncmds_in_driver >= 0); 10060 mutex_exit(SD_MUTEX(un)); 10061 } 10062 10063 /* 10064 * Write requests are restricted to multiples of the system block size. 10065 */ 10066 secmask = un->un_sys_blocksize - 1; 10067 10068 if (uio->uio_loffset & ((offset_t)(secmask))) { 10069 SD_ERROR(SD_LOG_READ_WRITE, un, 10070 "sdawrite: file offset not modulo %d\n", 10071 un->un_sys_blocksize); 10072 err = EINVAL; 10073 } else if (uio->uio_iov->iov_len & (secmask)) { 10074 SD_ERROR(SD_LOG_READ_WRITE, un, 10075 "sdawrite: transfer length not modulo %d\n", 10076 un->un_sys_blocksize); 10077 err = EINVAL; 10078 } else { 10079 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10080 } 10081 return (err); 10082 } 10083 10084 10085 10086 10087 10088 /* 10089 * Driver IO processing follows the following sequence: 10090 * 10091 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10092 * | | ^ 10093 * v v | 10094 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10095 * | | | | 10096 * v | | | 10097 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10098 * | | ^ ^ 10099 * v v | | 10100 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10101 * | | | | 10102 * +---+ | +------------+ +-------+ 10103 * | | | | 10104 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10105 * | v | | 10106 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10107 * | | ^ | 10108 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10109 * | v | | 10110 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10111 * | | ^ | 10112 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10113 * | v | | 10114 * | sd_checksum_iostart() sd_checksum_iodone() | 10115 * | | ^ | 10116 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10117 * | v | | 10118 * | sd_pm_iostart() sd_pm_iodone() | 10119 * | | ^ | 10120 * | | | | 10121 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10122 * | ^ 10123 * v | 10124 * sd_core_iostart() | 10125 * | | 10126 * | +------>(*destroypkt)() 10127 * +-> sd_start_cmds() <-+ | | 10128 * | | | v 10129 * | | | scsi_destroy_pkt(9F) 10130 * | | | 10131 * +->(*initpkt)() +- sdintr() 10132 * | | | | 10133 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10134 * | +-> scsi_setup_cdb(9F) | 10135 * | | 10136 * +--> scsi_transport(9F) | 10137 * | | 10138 * +----> SCSA ---->+ 10139 * 10140 * 10141 * This code is based upon the following presumptions: 10142 * 10143 * - iostart and iodone functions operate on buf(9S) structures. These 10144 * functions perform the necessary operations on the buf(9S) and pass 10145 * them along to the next function in the chain by using the macros 10146 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10147 * (for iodone side functions). 10148 * 10149 * - The iostart side functions may sleep. The iodone side functions 10150 * are called under interrupt context and may NOT sleep. Therefore 10151 * iodone side functions also may not call iostart side functions. 10152 * (NOTE: iostart side functions should NOT sleep for memory, as 10153 * this could result in deadlock.) 10154 * 10155 * - An iostart side function may call its corresponding iodone side 10156 * function directly (if necessary). 10157 * 10158 * - In the event of an error, an iostart side function can return a buf(9S) 10159 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10160 * b_error in the usual way of course). 10161 * 10162 * - The taskq mechanism may be used by the iodone side functions to dispatch 10163 * requests to the iostart side functions. The iostart side functions in 10164 * this case would be called under the context of a taskq thread, so it's 10165 * OK for them to block/sleep/spin in this case. 10166 * 10167 * - iostart side functions may allocate "shadow" buf(9S) structs and 10168 * pass them along to the next function in the chain. The corresponding 10169 * iodone side functions must coalesce the "shadow" bufs and return 10170 * the "original" buf to the next higher layer. 10171 * 10172 * - The b_private field of the buf(9S) struct holds a pointer to 10173 * an sd_xbuf struct, which contains information needed to 10174 * construct the scsi_pkt for the command. 10175 * 10176 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10177 * layer must acquire & release the SD_MUTEX(un) as needed. 10178 */ 10179 10180 10181 /* 10182 * Create taskq for all targets in the system. This is created at 10183 * _init(9E) and destroyed at _fini(9E). 10184 * 10185 * Note: here we set the minalloc to a reasonably high number to ensure that 10186 * we will have an adequate supply of task entries available at interrupt time. 10187 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10188 * sd_create_taskq(). Since we do not want to sleep for allocations at 10189 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10190 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10191 * requests any one instant in time. 10192 */ 10193 #define SD_TASKQ_NUMTHREADS 8 10194 #define SD_TASKQ_MINALLOC 256 10195 #define SD_TASKQ_MAXALLOC 256 10196 10197 static taskq_t *sd_tq = NULL; 10198 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10199 10200 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10201 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10202 10203 /* 10204 * The following task queue is being created for the write part of 10205 * read-modify-write of non-512 block size devices. 10206 * Limit the number of threads to 1 for now. This number has been chosen 10207 * considering the fact that it applies only to dvd ram drives/MO drives 10208 * currently. Performance for which is not main criteria at this stage. 10209 * Note: It needs to be explored if we can use a single taskq in future 10210 */ 10211 #define SD_WMR_TASKQ_NUMTHREADS 1 10212 static taskq_t *sd_wmr_tq = NULL; 10213 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10214 10215 /* 10216 * Function: sd_taskq_create 10217 * 10218 * Description: Create taskq thread(s) and preallocate task entries 10219 * 10220 * Return Code: Returns a pointer to the allocated taskq_t. 10221 * 10222 * Context: Can sleep. Requires blockable context. 10223 * 10224 * Notes: - The taskq() facility currently is NOT part of the DDI. 10225 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10226 * - taskq_create() will block for memory, also it will panic 10227 * if it cannot create the requested number of threads. 10228 * - Currently taskq_create() creates threads that cannot be 10229 * swapped. 10230 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10231 * supply of taskq entries at interrupt time (ie, so that we 10232 * do not have to sleep for memory) 10233 */ 10234 10235 static void 10236 sd_taskq_create(void) 10237 { 10238 char taskq_name[TASKQ_NAMELEN]; 10239 10240 ASSERT(sd_tq == NULL); 10241 ASSERT(sd_wmr_tq == NULL); 10242 10243 (void) snprintf(taskq_name, sizeof (taskq_name), 10244 "%s_drv_taskq", sd_label); 10245 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10246 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10247 TASKQ_PREPOPULATE)); 10248 10249 (void) snprintf(taskq_name, sizeof (taskq_name), 10250 "%s_rmw_taskq", sd_label); 10251 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10252 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10253 TASKQ_PREPOPULATE)); 10254 } 10255 10256 10257 /* 10258 * Function: sd_taskq_delete 10259 * 10260 * Description: Complementary cleanup routine for sd_taskq_create(). 10261 * 10262 * Context: Kernel thread context. 10263 */ 10264 10265 static void 10266 sd_taskq_delete(void) 10267 { 10268 ASSERT(sd_tq != NULL); 10269 ASSERT(sd_wmr_tq != NULL); 10270 taskq_destroy(sd_tq); 10271 taskq_destroy(sd_wmr_tq); 10272 sd_tq = NULL; 10273 sd_wmr_tq = NULL; 10274 } 10275 10276 10277 /* 10278 * Function: sdstrategy 10279 * 10280 * Description: Driver's strategy (9E) entry point function. 10281 * 10282 * Arguments: bp - pointer to buf(9S) 10283 * 10284 * Return Code: Always returns zero 10285 * 10286 * Context: Kernel thread context. 10287 */ 10288 10289 static int 10290 sdstrategy(struct buf *bp) 10291 { 10292 struct sd_lun *un; 10293 10294 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10295 if (un == NULL) { 10296 bioerror(bp, EIO); 10297 bp->b_resid = bp->b_bcount; 10298 biodone(bp); 10299 return (0); 10300 } 10301 /* As was done in the past, fail new cmds. if state is dumping. */ 10302 if (un->un_state == SD_STATE_DUMPING) { 10303 bioerror(bp, ENXIO); 10304 bp->b_resid = bp->b_bcount; 10305 biodone(bp); 10306 return (0); 10307 } 10308 10309 ASSERT(!mutex_owned(SD_MUTEX(un))); 10310 10311 /* 10312 * Commands may sneak in while we released the mutex in 10313 * DDI_SUSPEND, we should block new commands. However, old 10314 * commands that are still in the driver at this point should 10315 * still be allowed to drain. 10316 */ 10317 mutex_enter(SD_MUTEX(un)); 10318 /* 10319 * Must wait here if either the device is suspended or 10320 * if it's power level is changing. 10321 */ 10322 while ((un->un_state == SD_STATE_SUSPENDED) || 10323 (un->un_state == SD_STATE_PM_CHANGING)) { 10324 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10325 } 10326 10327 un->un_ncmds_in_driver++; 10328 10329 /* 10330 * atapi: Since we are running the CD for now in PIO mode we need to 10331 * call bp_mapin here to avoid bp_mapin called interrupt context under 10332 * the HBA's init_pkt routine. 10333 */ 10334 if (un->un_f_cfg_is_atapi == TRUE) { 10335 mutex_exit(SD_MUTEX(un)); 10336 bp_mapin(bp); 10337 mutex_enter(SD_MUTEX(un)); 10338 } 10339 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10340 un->un_ncmds_in_driver); 10341 10342 mutex_exit(SD_MUTEX(un)); 10343 10344 /* 10345 * This will (eventually) allocate the sd_xbuf area and 10346 * call sd_xbuf_strategy(). We just want to return the 10347 * result of ddi_xbuf_qstrategy so that we have an opt- 10348 * imized tail call which saves us a stack frame. 10349 */ 10350 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10351 } 10352 10353 10354 /* 10355 * Function: sd_xbuf_strategy 10356 * 10357 * Description: Function for initiating IO operations via the 10358 * ddi_xbuf_qstrategy() mechanism. 10359 * 10360 * Context: Kernel thread context. 10361 */ 10362 10363 static void 10364 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10365 { 10366 struct sd_lun *un = arg; 10367 10368 ASSERT(bp != NULL); 10369 ASSERT(xp != NULL); 10370 ASSERT(un != NULL); 10371 ASSERT(!mutex_owned(SD_MUTEX(un))); 10372 10373 /* 10374 * Initialize the fields in the xbuf and save a pointer to the 10375 * xbuf in bp->b_private. 10376 */ 10377 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10378 10379 /* Send the buf down the iostart chain */ 10380 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10381 } 10382 10383 10384 /* 10385 * Function: sd_xbuf_init 10386 * 10387 * Description: Prepare the given sd_xbuf struct for use. 10388 * 10389 * Arguments: un - ptr to softstate 10390 * bp - ptr to associated buf(9S) 10391 * xp - ptr to associated sd_xbuf 10392 * chain_type - IO chain type to use: 10393 * SD_CHAIN_NULL 10394 * SD_CHAIN_BUFIO 10395 * SD_CHAIN_USCSI 10396 * SD_CHAIN_DIRECT 10397 * SD_CHAIN_DIRECT_PRIORITY 10398 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10399 * initialization; may be NULL if none. 10400 * 10401 * Context: Kernel thread context 10402 */ 10403 10404 static void 10405 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10406 uchar_t chain_type, void *pktinfop) 10407 { 10408 int index; 10409 10410 ASSERT(un != NULL); 10411 ASSERT(bp != NULL); 10412 ASSERT(xp != NULL); 10413 10414 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10415 bp, chain_type); 10416 10417 xp->xb_un = un; 10418 xp->xb_pktp = NULL; 10419 xp->xb_pktinfo = pktinfop; 10420 xp->xb_private = bp->b_private; 10421 xp->xb_blkno = (daddr_t)bp->b_blkno; 10422 10423 /* 10424 * Set up the iostart and iodone chain indexes in the xbuf, based 10425 * upon the specified chain type to use. 10426 */ 10427 switch (chain_type) { 10428 case SD_CHAIN_NULL: 10429 /* 10430 * Fall thru to just use the values for the buf type, even 10431 * tho for the NULL chain these values will never be used. 10432 */ 10433 /* FALLTHRU */ 10434 case SD_CHAIN_BUFIO: 10435 index = un->un_buf_chain_type; 10436 break; 10437 case SD_CHAIN_USCSI: 10438 index = un->un_uscsi_chain_type; 10439 break; 10440 case SD_CHAIN_DIRECT: 10441 index = un->un_direct_chain_type; 10442 break; 10443 case SD_CHAIN_DIRECT_PRIORITY: 10444 index = un->un_priority_chain_type; 10445 break; 10446 default: 10447 /* We're really broken if we ever get here... */ 10448 panic("sd_xbuf_init: illegal chain type!"); 10449 /*NOTREACHED*/ 10450 } 10451 10452 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10453 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10454 10455 /* 10456 * It might be a bit easier to simply bzero the entire xbuf above, 10457 * but it turns out that since we init a fair number of members anyway, 10458 * we save a fair number cycles by doing explicit assignment of zero. 10459 */ 10460 xp->xb_pkt_flags = 0; 10461 xp->xb_dma_resid = 0; 10462 xp->xb_retry_count = 0; 10463 xp->xb_victim_retry_count = 0; 10464 xp->xb_ua_retry_count = 0; 10465 xp->xb_nr_retry_count = 0; 10466 xp->xb_sense_bp = NULL; 10467 xp->xb_sense_status = 0; 10468 xp->xb_sense_state = 0; 10469 xp->xb_sense_resid = 0; 10470 10471 bp->b_private = xp; 10472 bp->b_flags &= ~(B_DONE | B_ERROR); 10473 bp->b_resid = 0; 10474 bp->av_forw = NULL; 10475 bp->av_back = NULL; 10476 bioerror(bp, 0); 10477 10478 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10479 } 10480 10481 10482 /* 10483 * Function: sd_uscsi_strategy 10484 * 10485 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10486 * 10487 * Arguments: bp - buf struct ptr 10488 * 10489 * Return Code: Always returns 0 10490 * 10491 * Context: Kernel thread context 10492 */ 10493 10494 static int 10495 sd_uscsi_strategy(struct buf *bp) 10496 { 10497 struct sd_lun *un; 10498 struct sd_uscsi_info *uip; 10499 struct sd_xbuf *xp; 10500 uchar_t chain_type; 10501 10502 ASSERT(bp != NULL); 10503 10504 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10505 if (un == NULL) { 10506 bioerror(bp, EIO); 10507 bp->b_resid = bp->b_bcount; 10508 biodone(bp); 10509 return (0); 10510 } 10511 10512 ASSERT(!mutex_owned(SD_MUTEX(un))); 10513 10514 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 /* 10518 * atapi: Since we are running the CD for now in PIO mode we need to 10519 * call bp_mapin here to avoid bp_mapin called interrupt context under 10520 * the HBA's init_pkt routine. 10521 */ 10522 if (un->un_f_cfg_is_atapi == TRUE) { 10523 mutex_exit(SD_MUTEX(un)); 10524 bp_mapin(bp); 10525 mutex_enter(SD_MUTEX(un)); 10526 } 10527 un->un_ncmds_in_driver++; 10528 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10529 un->un_ncmds_in_driver); 10530 mutex_exit(SD_MUTEX(un)); 10531 10532 /* 10533 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10534 */ 10535 ASSERT(bp->b_private != NULL); 10536 uip = (struct sd_uscsi_info *)bp->b_private; 10537 10538 switch (uip->ui_flags) { 10539 case SD_PATH_DIRECT: 10540 chain_type = SD_CHAIN_DIRECT; 10541 break; 10542 case SD_PATH_DIRECT_PRIORITY: 10543 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10544 break; 10545 default: 10546 chain_type = SD_CHAIN_USCSI; 10547 break; 10548 } 10549 10550 /* 10551 * We may allocate extra buf for external USCSI commands. If the 10552 * application asks for bigger than 20-byte sense data via USCSI, 10553 * SCSA layer will allocate 252 bytes sense buf for that command. 10554 */ 10555 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10556 SENSE_LENGTH) { 10557 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10558 MAX_SENSE_LENGTH, KM_SLEEP); 10559 } else { 10560 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10561 } 10562 10563 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10564 10565 /* Use the index obtained within xbuf_init */ 10566 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10567 10568 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10569 10570 return (0); 10571 } 10572 10573 /* 10574 * Function: sd_send_scsi_cmd 10575 * 10576 * Description: Runs a USCSI command for user (when called thru sdioctl), 10577 * or for the driver 10578 * 10579 * Arguments: dev - the dev_t for the device 10580 * incmd - ptr to a valid uscsi_cmd struct 10581 * flag - bit flag, indicating open settings, 32/64 bit type 10582 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10583 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10584 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10585 * to use the USCSI "direct" chain and bypass the normal 10586 * command waitq. 10587 * 10588 * Return Code: 0 - successful completion of the given command 10589 * EIO - scsi_uscsi_handle_command() failed 10590 * ENXIO - soft state not found for specified dev 10591 * EINVAL 10592 * EFAULT - copyin/copyout error 10593 * return code of scsi_uscsi_handle_command(): 10594 * EIO 10595 * ENXIO 10596 * EACCES 10597 * 10598 * Context: Waits for command to complete. Can sleep. 10599 */ 10600 10601 static int 10602 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10603 enum uio_seg dataspace, int path_flag) 10604 { 10605 struct sd_uscsi_info *uip; 10606 struct uscsi_cmd *uscmd; 10607 struct sd_lun *un; 10608 int format = 0; 10609 int rval; 10610 10611 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10612 if (un == NULL) { 10613 return (ENXIO); 10614 } 10615 10616 ASSERT(!mutex_owned(SD_MUTEX(un))); 10617 10618 #ifdef SDDEBUG 10619 switch (dataspace) { 10620 case UIO_USERSPACE: 10621 SD_TRACE(SD_LOG_IO, un, 10622 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10623 break; 10624 case UIO_SYSSPACE: 10625 SD_TRACE(SD_LOG_IO, un, 10626 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10627 break; 10628 default: 10629 SD_TRACE(SD_LOG_IO, un, 10630 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10631 break; 10632 } 10633 #endif 10634 10635 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10636 SD_ADDRESS(un), &uscmd); 10637 if (rval != 0) { 10638 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10639 "scsi_uscsi_alloc_and_copyin failed\n", un); 10640 return (rval); 10641 } 10642 10643 if ((uscmd->uscsi_cdb != NULL) && 10644 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10645 mutex_enter(SD_MUTEX(un)); 10646 un->un_f_format_in_progress = TRUE; 10647 mutex_exit(SD_MUTEX(un)); 10648 format = 1; 10649 } 10650 10651 /* 10652 * Allocate an sd_uscsi_info struct and fill it with the info 10653 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10654 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10655 * since we allocate the buf here in this function, we do not 10656 * need to preserve the prior contents of b_private. 10657 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10658 */ 10659 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10660 uip->ui_flags = path_flag; 10661 uip->ui_cmdp = uscmd; 10662 10663 /* 10664 * Commands sent with priority are intended for error recovery 10665 * situations, and do not have retries performed. 10666 */ 10667 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10668 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10669 } 10670 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10671 10672 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10673 sd_uscsi_strategy, NULL, uip); 10674 10675 #ifdef SDDEBUG 10676 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10677 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10678 uscmd->uscsi_status, uscmd->uscsi_resid); 10679 if (uscmd->uscsi_bufaddr != NULL) { 10680 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10681 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10682 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10683 if (dataspace == UIO_SYSSPACE) { 10684 SD_DUMP_MEMORY(un, SD_LOG_IO, 10685 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10686 uscmd->uscsi_buflen, SD_LOG_HEX); 10687 } 10688 } 10689 #endif 10690 10691 if (format == 1) { 10692 mutex_enter(SD_MUTEX(un)); 10693 un->un_f_format_in_progress = FALSE; 10694 mutex_exit(SD_MUTEX(un)); 10695 } 10696 10697 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10698 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10699 10700 return (rval); 10701 } 10702 10703 10704 /* 10705 * Function: sd_buf_iodone 10706 * 10707 * Description: Frees the sd_xbuf & returns the buf to its originator. 10708 * 10709 * Context: May be called from interrupt context. 10710 */ 10711 /* ARGSUSED */ 10712 static void 10713 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10714 { 10715 struct sd_xbuf *xp; 10716 10717 ASSERT(un != NULL); 10718 ASSERT(bp != NULL); 10719 ASSERT(!mutex_owned(SD_MUTEX(un))); 10720 10721 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10722 10723 xp = SD_GET_XBUF(bp); 10724 ASSERT(xp != NULL); 10725 10726 mutex_enter(SD_MUTEX(un)); 10727 10728 /* 10729 * Grab time when the cmd completed. 10730 * This is used for determining if the system has been 10731 * idle long enough to make it idle to the PM framework. 10732 * This is for lowering the overhead, and therefore improving 10733 * performance per I/O operation. 10734 */ 10735 un->un_pm_idle_time = ddi_get_time(); 10736 10737 un->un_ncmds_in_driver--; 10738 ASSERT(un->un_ncmds_in_driver >= 0); 10739 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10740 un->un_ncmds_in_driver); 10741 10742 mutex_exit(SD_MUTEX(un)); 10743 10744 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10745 biodone(bp); /* bp is gone after this */ 10746 10747 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10748 } 10749 10750 10751 /* 10752 * Function: sd_uscsi_iodone 10753 * 10754 * Description: Frees the sd_xbuf & returns the buf to its originator. 10755 * 10756 * Context: May be called from interrupt context. 10757 */ 10758 /* ARGSUSED */ 10759 static void 10760 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10761 { 10762 struct sd_xbuf *xp; 10763 10764 ASSERT(un != NULL); 10765 ASSERT(bp != NULL); 10766 10767 xp = SD_GET_XBUF(bp); 10768 ASSERT(xp != NULL); 10769 ASSERT(!mutex_owned(SD_MUTEX(un))); 10770 10771 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10772 10773 bp->b_private = xp->xb_private; 10774 10775 mutex_enter(SD_MUTEX(un)); 10776 10777 /* 10778 * Grab time when the cmd completed. 10779 * This is used for determining if the system has been 10780 * idle long enough to make it idle to the PM framework. 10781 * This is for lowering the overhead, and therefore improving 10782 * performance per I/O operation. 10783 */ 10784 un->un_pm_idle_time = ddi_get_time(); 10785 10786 un->un_ncmds_in_driver--; 10787 ASSERT(un->un_ncmds_in_driver >= 0); 10788 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10789 un->un_ncmds_in_driver); 10790 10791 mutex_exit(SD_MUTEX(un)); 10792 10793 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10794 SENSE_LENGTH) { 10795 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10796 MAX_SENSE_LENGTH); 10797 } else { 10798 kmem_free(xp, sizeof (struct sd_xbuf)); 10799 } 10800 10801 biodone(bp); 10802 10803 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10804 } 10805 10806 10807 /* 10808 * Function: sd_mapblockaddr_iostart 10809 * 10810 * Description: Verify request lies within the partition limits for 10811 * the indicated minor device. Issue "overrun" buf if 10812 * request would exceed partition range. Converts 10813 * partition-relative block address to absolute. 10814 * 10815 * Context: Can sleep 10816 * 10817 * Issues: This follows what the old code did, in terms of accessing 10818 * some of the partition info in the unit struct without holding 10819 * the mutext. This is a general issue, if the partition info 10820 * can be altered while IO is in progress... as soon as we send 10821 * a buf, its partitioning can be invalid before it gets to the 10822 * device. Probably the right fix is to move partitioning out 10823 * of the driver entirely. 10824 */ 10825 10826 static void 10827 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10828 { 10829 diskaddr_t nblocks; /* #blocks in the given partition */ 10830 daddr_t blocknum; /* Block number specified by the buf */ 10831 size_t requested_nblocks; 10832 size_t available_nblocks; 10833 int partition; 10834 diskaddr_t partition_offset; 10835 struct sd_xbuf *xp; 10836 10837 10838 ASSERT(un != NULL); 10839 ASSERT(bp != NULL); 10840 ASSERT(!mutex_owned(SD_MUTEX(un))); 10841 10842 SD_TRACE(SD_LOG_IO_PARTITION, un, 10843 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10844 10845 xp = SD_GET_XBUF(bp); 10846 ASSERT(xp != NULL); 10847 10848 /* 10849 * If the geometry is not indicated as valid, attempt to access 10850 * the unit & verify the geometry/label. This can be the case for 10851 * removable-media devices, of if the device was opened in 10852 * NDELAY/NONBLOCK mode. 10853 */ 10854 if (!SD_IS_VALID_LABEL(un) && 10855 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10856 /* 10857 * For removable devices it is possible to start an I/O 10858 * without a media by opening the device in nodelay mode. 10859 * Also for writable CDs there can be many scenarios where 10860 * there is no geometry yet but volume manager is trying to 10861 * issue a read() just because it can see TOC on the CD. So 10862 * do not print a message for removables. 10863 */ 10864 if (!un->un_f_has_removable_media) { 10865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10866 "i/o to invalid geometry\n"); 10867 } 10868 bioerror(bp, EIO); 10869 bp->b_resid = bp->b_bcount; 10870 SD_BEGIN_IODONE(index, un, bp); 10871 return; 10872 } 10873 10874 partition = SDPART(bp->b_edev); 10875 10876 nblocks = 0; 10877 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10878 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10879 10880 /* 10881 * blocknum is the starting block number of the request. At this 10882 * point it is still relative to the start of the minor device. 10883 */ 10884 blocknum = xp->xb_blkno; 10885 10886 /* 10887 * Legacy: If the starting block number is one past the last block 10888 * in the partition, do not set B_ERROR in the buf. 10889 */ 10890 if (blocknum == nblocks) { 10891 goto error_exit; 10892 } 10893 10894 /* 10895 * Confirm that the first block of the request lies within the 10896 * partition limits. Also the requested number of bytes must be 10897 * a multiple of the system block size. 10898 */ 10899 if ((blocknum < 0) || (blocknum >= nblocks) || 10900 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10901 bp->b_flags |= B_ERROR; 10902 goto error_exit; 10903 } 10904 10905 /* 10906 * If the requsted # blocks exceeds the available # blocks, that 10907 * is an overrun of the partition. 10908 */ 10909 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10910 available_nblocks = (size_t)(nblocks - blocknum); 10911 ASSERT(nblocks >= blocknum); 10912 10913 if (requested_nblocks > available_nblocks) { 10914 /* 10915 * Allocate an "overrun" buf to allow the request to proceed 10916 * for the amount of space available in the partition. The 10917 * amount not transferred will be added into the b_resid 10918 * when the operation is complete. The overrun buf 10919 * replaces the original buf here, and the original buf 10920 * is saved inside the overrun buf, for later use. 10921 */ 10922 size_t resid = SD_SYSBLOCKS2BYTES(un, 10923 (offset_t)(requested_nblocks - available_nblocks)); 10924 size_t count = bp->b_bcount - resid; 10925 /* 10926 * Note: count is an unsigned entity thus it'll NEVER 10927 * be less than 0 so ASSERT the original values are 10928 * correct. 10929 */ 10930 ASSERT(bp->b_bcount >= resid); 10931 10932 bp = sd_bioclone_alloc(bp, count, blocknum, 10933 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10934 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10935 ASSERT(xp != NULL); 10936 } 10937 10938 /* At this point there should be no residual for this buf. */ 10939 ASSERT(bp->b_resid == 0); 10940 10941 /* Convert the block number to an absolute address. */ 10942 xp->xb_blkno += partition_offset; 10943 10944 SD_NEXT_IOSTART(index, un, bp); 10945 10946 SD_TRACE(SD_LOG_IO_PARTITION, un, 10947 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10948 10949 return; 10950 10951 error_exit: 10952 bp->b_resid = bp->b_bcount; 10953 SD_BEGIN_IODONE(index, un, bp); 10954 SD_TRACE(SD_LOG_IO_PARTITION, un, 10955 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10956 } 10957 10958 10959 /* 10960 * Function: sd_mapblockaddr_iodone 10961 * 10962 * Description: Completion-side processing for partition management. 10963 * 10964 * Context: May be called under interrupt context 10965 */ 10966 10967 static void 10968 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10969 { 10970 /* int partition; */ /* Not used, see below. */ 10971 ASSERT(un != NULL); 10972 ASSERT(bp != NULL); 10973 ASSERT(!mutex_owned(SD_MUTEX(un))); 10974 10975 SD_TRACE(SD_LOG_IO_PARTITION, un, 10976 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10977 10978 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10979 /* 10980 * We have an "overrun" buf to deal with... 10981 */ 10982 struct sd_xbuf *xp; 10983 struct buf *obp; /* ptr to the original buf */ 10984 10985 xp = SD_GET_XBUF(bp); 10986 ASSERT(xp != NULL); 10987 10988 /* Retrieve the pointer to the original buf */ 10989 obp = (struct buf *)xp->xb_private; 10990 ASSERT(obp != NULL); 10991 10992 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10993 bioerror(obp, bp->b_error); 10994 10995 sd_bioclone_free(bp); 10996 10997 /* 10998 * Get back the original buf. 10999 * Note that since the restoration of xb_blkno below 11000 * was removed, the sd_xbuf is not needed. 11001 */ 11002 bp = obp; 11003 /* 11004 * xp = SD_GET_XBUF(bp); 11005 * ASSERT(xp != NULL); 11006 */ 11007 } 11008 11009 /* 11010 * Convert sd->xb_blkno back to a minor-device relative value. 11011 * Note: this has been commented out, as it is not needed in the 11012 * current implementation of the driver (ie, since this function 11013 * is at the top of the layering chains, so the info will be 11014 * discarded) and it is in the "hot" IO path. 11015 * 11016 * partition = getminor(bp->b_edev) & SDPART_MASK; 11017 * xp->xb_blkno -= un->un_offset[partition]; 11018 */ 11019 11020 SD_NEXT_IODONE(index, un, bp); 11021 11022 SD_TRACE(SD_LOG_IO_PARTITION, un, 11023 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11024 } 11025 11026 11027 /* 11028 * Function: sd_mapblocksize_iostart 11029 * 11030 * Description: Convert between system block size (un->un_sys_blocksize) 11031 * and target block size (un->un_tgt_blocksize). 11032 * 11033 * Context: Can sleep to allocate resources. 11034 * 11035 * Assumptions: A higher layer has already performed any partition validation, 11036 * and converted the xp->xb_blkno to an absolute value relative 11037 * to the start of the device. 11038 * 11039 * It is also assumed that the higher layer has implemented 11040 * an "overrun" mechanism for the case where the request would 11041 * read/write beyond the end of a partition. In this case we 11042 * assume (and ASSERT) that bp->b_resid == 0. 11043 * 11044 * Note: The implementation for this routine assumes the target 11045 * block size remains constant between allocation and transport. 11046 */ 11047 11048 static void 11049 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11050 { 11051 struct sd_mapblocksize_info *bsp; 11052 struct sd_xbuf *xp; 11053 offset_t first_byte; 11054 daddr_t start_block, end_block; 11055 daddr_t request_bytes; 11056 ushort_t is_aligned = FALSE; 11057 11058 ASSERT(un != NULL); 11059 ASSERT(bp != NULL); 11060 ASSERT(!mutex_owned(SD_MUTEX(un))); 11061 ASSERT(bp->b_resid == 0); 11062 11063 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11064 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11065 11066 /* 11067 * For a non-writable CD, a write request is an error 11068 */ 11069 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11070 (un->un_f_mmc_writable_media == FALSE)) { 11071 bioerror(bp, EIO); 11072 bp->b_resid = bp->b_bcount; 11073 SD_BEGIN_IODONE(index, un, bp); 11074 return; 11075 } 11076 11077 /* 11078 * We do not need a shadow buf if the device is using 11079 * un->un_sys_blocksize as its block size or if bcount == 0. 11080 * In this case there is no layer-private data block allocated. 11081 */ 11082 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11083 (bp->b_bcount == 0)) { 11084 goto done; 11085 } 11086 11087 #if defined(__i386) || defined(__amd64) 11088 /* We do not support non-block-aligned transfers for ROD devices */ 11089 ASSERT(!ISROD(un)); 11090 #endif 11091 11092 xp = SD_GET_XBUF(bp); 11093 ASSERT(xp != NULL); 11094 11095 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11096 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11097 un->un_tgt_blocksize, un->un_sys_blocksize); 11098 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11099 "request start block:0x%x\n", xp->xb_blkno); 11100 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11101 "request len:0x%x\n", bp->b_bcount); 11102 11103 /* 11104 * Allocate the layer-private data area for the mapblocksize layer. 11105 * Layers are allowed to use the xp_private member of the sd_xbuf 11106 * struct to store the pointer to their layer-private data block, but 11107 * each layer also has the responsibility of restoring the prior 11108 * contents of xb_private before returning the buf/xbuf to the 11109 * higher layer that sent it. 11110 * 11111 * Here we save the prior contents of xp->xb_private into the 11112 * bsp->mbs_oprivate field of our layer-private data area. This value 11113 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11114 * the layer-private area and returning the buf/xbuf to the layer 11115 * that sent it. 11116 * 11117 * Note that here we use kmem_zalloc for the allocation as there are 11118 * parts of the mapblocksize code that expect certain fields to be 11119 * zero unless explicitly set to a required value. 11120 */ 11121 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11122 bsp->mbs_oprivate = xp->xb_private; 11123 xp->xb_private = bsp; 11124 11125 /* 11126 * This treats the data on the disk (target) as an array of bytes. 11127 * first_byte is the byte offset, from the beginning of the device, 11128 * to the location of the request. This is converted from a 11129 * un->un_sys_blocksize block address to a byte offset, and then back 11130 * to a block address based upon a un->un_tgt_blocksize block size. 11131 * 11132 * xp->xb_blkno should be absolute upon entry into this function, 11133 * but, but it is based upon partitions that use the "system" 11134 * block size. It must be adjusted to reflect the block size of 11135 * the target. 11136 * 11137 * Note that end_block is actually the block that follows the last 11138 * block of the request, but that's what is needed for the computation. 11139 */ 11140 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11141 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11142 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11143 un->un_tgt_blocksize; 11144 11145 /* request_bytes is rounded up to a multiple of the target block size */ 11146 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11147 11148 /* 11149 * See if the starting address of the request and the request 11150 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11151 * then we do not need to allocate a shadow buf to handle the request. 11152 */ 11153 if (((first_byte % un->un_tgt_blocksize) == 0) && 11154 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11155 is_aligned = TRUE; 11156 } 11157 11158 if ((bp->b_flags & B_READ) == 0) { 11159 /* 11160 * Lock the range for a write operation. An aligned request is 11161 * considered a simple write; otherwise the request must be a 11162 * read-modify-write. 11163 */ 11164 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11165 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11166 } 11167 11168 /* 11169 * Alloc a shadow buf if the request is not aligned. Also, this is 11170 * where the READ command is generated for a read-modify-write. (The 11171 * write phase is deferred until after the read completes.) 11172 */ 11173 if (is_aligned == FALSE) { 11174 11175 struct sd_mapblocksize_info *shadow_bsp; 11176 struct sd_xbuf *shadow_xp; 11177 struct buf *shadow_bp; 11178 11179 /* 11180 * Allocate the shadow buf and it associated xbuf. Note that 11181 * after this call the xb_blkno value in both the original 11182 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11183 * same: absolute relative to the start of the device, and 11184 * adjusted for the target block size. The b_blkno in the 11185 * shadow buf will also be set to this value. We should never 11186 * change b_blkno in the original bp however. 11187 * 11188 * Note also that the shadow buf will always need to be a 11189 * READ command, regardless of whether the incoming command 11190 * is a READ or a WRITE. 11191 */ 11192 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11193 xp->xb_blkno, 11194 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11195 11196 shadow_xp = SD_GET_XBUF(shadow_bp); 11197 11198 /* 11199 * Allocate the layer-private data for the shadow buf. 11200 * (No need to preserve xb_private in the shadow xbuf.) 11201 */ 11202 shadow_xp->xb_private = shadow_bsp = 11203 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11204 11205 /* 11206 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11207 * to figure out where the start of the user data is (based upon 11208 * the system block size) in the data returned by the READ 11209 * command (which will be based upon the target blocksize). Note 11210 * that this is only really used if the request is unaligned. 11211 */ 11212 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11213 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11214 ASSERT((bsp->mbs_copy_offset >= 0) && 11215 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11216 11217 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11218 11219 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11220 11221 /* Transfer the wmap (if any) to the shadow buf */ 11222 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11223 bsp->mbs_wmp = NULL; 11224 11225 /* 11226 * The shadow buf goes on from here in place of the 11227 * original buf. 11228 */ 11229 shadow_bsp->mbs_orig_bp = bp; 11230 bp = shadow_bp; 11231 } 11232 11233 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11234 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11235 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11236 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11237 request_bytes); 11238 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11239 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11240 11241 done: 11242 SD_NEXT_IOSTART(index, un, bp); 11243 11244 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11245 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11246 } 11247 11248 11249 /* 11250 * Function: sd_mapblocksize_iodone 11251 * 11252 * Description: Completion side processing for block-size mapping. 11253 * 11254 * Context: May be called under interrupt context 11255 */ 11256 11257 static void 11258 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11259 { 11260 struct sd_mapblocksize_info *bsp; 11261 struct sd_xbuf *xp; 11262 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11263 struct buf *orig_bp; /* ptr to the original buf */ 11264 offset_t shadow_end; 11265 offset_t request_end; 11266 offset_t shadow_start; 11267 ssize_t copy_offset; 11268 size_t copy_length; 11269 size_t shortfall; 11270 uint_t is_write; /* TRUE if this bp is a WRITE */ 11271 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11272 11273 ASSERT(un != NULL); 11274 ASSERT(bp != NULL); 11275 11276 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11277 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11278 11279 /* 11280 * There is no shadow buf or layer-private data if the target is 11281 * using un->un_sys_blocksize as its block size or if bcount == 0. 11282 */ 11283 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11284 (bp->b_bcount == 0)) { 11285 goto exit; 11286 } 11287 11288 xp = SD_GET_XBUF(bp); 11289 ASSERT(xp != NULL); 11290 11291 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11292 bsp = xp->xb_private; 11293 11294 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11295 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11296 11297 if (is_write) { 11298 /* 11299 * For a WRITE request we must free up the block range that 11300 * we have locked up. This holds regardless of whether this is 11301 * an aligned write request or a read-modify-write request. 11302 */ 11303 sd_range_unlock(un, bsp->mbs_wmp); 11304 bsp->mbs_wmp = NULL; 11305 } 11306 11307 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11308 /* 11309 * An aligned read or write command will have no shadow buf; 11310 * there is not much else to do with it. 11311 */ 11312 goto done; 11313 } 11314 11315 orig_bp = bsp->mbs_orig_bp; 11316 ASSERT(orig_bp != NULL); 11317 orig_xp = SD_GET_XBUF(orig_bp); 11318 ASSERT(orig_xp != NULL); 11319 ASSERT(!mutex_owned(SD_MUTEX(un))); 11320 11321 if (!is_write && has_wmap) { 11322 /* 11323 * A READ with a wmap means this is the READ phase of a 11324 * read-modify-write. If an error occurred on the READ then 11325 * we do not proceed with the WRITE phase or copy any data. 11326 * Just release the write maps and return with an error. 11327 */ 11328 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11329 orig_bp->b_resid = orig_bp->b_bcount; 11330 bioerror(orig_bp, bp->b_error); 11331 sd_range_unlock(un, bsp->mbs_wmp); 11332 goto freebuf_done; 11333 } 11334 } 11335 11336 /* 11337 * Here is where we set up to copy the data from the shadow buf 11338 * into the space associated with the original buf. 11339 * 11340 * To deal with the conversion between block sizes, these 11341 * computations treat the data as an array of bytes, with the 11342 * first byte (byte 0) corresponding to the first byte in the 11343 * first block on the disk. 11344 */ 11345 11346 /* 11347 * shadow_start and shadow_len indicate the location and size of 11348 * the data returned with the shadow IO request. 11349 */ 11350 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11351 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11352 11353 /* 11354 * copy_offset gives the offset (in bytes) from the start of the first 11355 * block of the READ request to the beginning of the data. We retrieve 11356 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11357 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11358 * data to be copied (in bytes). 11359 */ 11360 copy_offset = bsp->mbs_copy_offset; 11361 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11362 copy_length = orig_bp->b_bcount; 11363 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11364 11365 /* 11366 * Set up the resid and error fields of orig_bp as appropriate. 11367 */ 11368 if (shadow_end >= request_end) { 11369 /* We got all the requested data; set resid to zero */ 11370 orig_bp->b_resid = 0; 11371 } else { 11372 /* 11373 * We failed to get enough data to fully satisfy the original 11374 * request. Just copy back whatever data we got and set 11375 * up the residual and error code as required. 11376 * 11377 * 'shortfall' is the amount by which the data received with the 11378 * shadow buf has "fallen short" of the requested amount. 11379 */ 11380 shortfall = (size_t)(request_end - shadow_end); 11381 11382 if (shortfall > orig_bp->b_bcount) { 11383 /* 11384 * We did not get enough data to even partially 11385 * fulfill the original request. The residual is 11386 * equal to the amount requested. 11387 */ 11388 orig_bp->b_resid = orig_bp->b_bcount; 11389 } else { 11390 /* 11391 * We did not get all the data that we requested 11392 * from the device, but we will try to return what 11393 * portion we did get. 11394 */ 11395 orig_bp->b_resid = shortfall; 11396 } 11397 ASSERT(copy_length >= orig_bp->b_resid); 11398 copy_length -= orig_bp->b_resid; 11399 } 11400 11401 /* Propagate the error code from the shadow buf to the original buf */ 11402 bioerror(orig_bp, bp->b_error); 11403 11404 if (is_write) { 11405 goto freebuf_done; /* No data copying for a WRITE */ 11406 } 11407 11408 if (has_wmap) { 11409 /* 11410 * This is a READ command from the READ phase of a 11411 * read-modify-write request. We have to copy the data given 11412 * by the user OVER the data returned by the READ command, 11413 * then convert the command from a READ to a WRITE and send 11414 * it back to the target. 11415 */ 11416 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11417 copy_length); 11418 11419 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11420 11421 /* 11422 * Dispatch the WRITE command to the taskq thread, which 11423 * will in turn send the command to the target. When the 11424 * WRITE command completes, we (sd_mapblocksize_iodone()) 11425 * will get called again as part of the iodone chain 11426 * processing for it. Note that we will still be dealing 11427 * with the shadow buf at that point. 11428 */ 11429 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11430 KM_NOSLEEP) != 0) { 11431 /* 11432 * Dispatch was successful so we are done. Return 11433 * without going any higher up the iodone chain. Do 11434 * not free up any layer-private data until after the 11435 * WRITE completes. 11436 */ 11437 return; 11438 } 11439 11440 /* 11441 * Dispatch of the WRITE command failed; set up the error 11442 * condition and send this IO back up the iodone chain. 11443 */ 11444 bioerror(orig_bp, EIO); 11445 orig_bp->b_resid = orig_bp->b_bcount; 11446 11447 } else { 11448 /* 11449 * This is a regular READ request (ie, not a RMW). Copy the 11450 * data from the shadow buf into the original buf. The 11451 * copy_offset compensates for any "misalignment" between the 11452 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11453 * original buf (with its un->un_sys_blocksize blocks). 11454 */ 11455 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11456 copy_length); 11457 } 11458 11459 freebuf_done: 11460 11461 /* 11462 * At this point we still have both the shadow buf AND the original 11463 * buf to deal with, as well as the layer-private data area in each. 11464 * Local variables are as follows: 11465 * 11466 * bp -- points to shadow buf 11467 * xp -- points to xbuf of shadow buf 11468 * bsp -- points to layer-private data area of shadow buf 11469 * orig_bp -- points to original buf 11470 * 11471 * First free the shadow buf and its associated xbuf, then free the 11472 * layer-private data area from the shadow buf. There is no need to 11473 * restore xb_private in the shadow xbuf. 11474 */ 11475 sd_shadow_buf_free(bp); 11476 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11477 11478 /* 11479 * Now update the local variables to point to the original buf, xbuf, 11480 * and layer-private area. 11481 */ 11482 bp = orig_bp; 11483 xp = SD_GET_XBUF(bp); 11484 ASSERT(xp != NULL); 11485 ASSERT(xp == orig_xp); 11486 bsp = xp->xb_private; 11487 ASSERT(bsp != NULL); 11488 11489 done: 11490 /* 11491 * Restore xb_private to whatever it was set to by the next higher 11492 * layer in the chain, then free the layer-private data area. 11493 */ 11494 xp->xb_private = bsp->mbs_oprivate; 11495 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11496 11497 exit: 11498 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11499 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11500 11501 SD_NEXT_IODONE(index, un, bp); 11502 } 11503 11504 11505 /* 11506 * Function: sd_checksum_iostart 11507 * 11508 * Description: A stub function for a layer that's currently not used. 11509 * For now just a placeholder. 11510 * 11511 * Context: Kernel thread context 11512 */ 11513 11514 static void 11515 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11516 { 11517 ASSERT(un != NULL); 11518 ASSERT(bp != NULL); 11519 ASSERT(!mutex_owned(SD_MUTEX(un))); 11520 SD_NEXT_IOSTART(index, un, bp); 11521 } 11522 11523 11524 /* 11525 * Function: sd_checksum_iodone 11526 * 11527 * Description: A stub function for a layer that's currently not used. 11528 * For now just a placeholder. 11529 * 11530 * Context: May be called under interrupt context 11531 */ 11532 11533 static void 11534 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11535 { 11536 ASSERT(un != NULL); 11537 ASSERT(bp != NULL); 11538 ASSERT(!mutex_owned(SD_MUTEX(un))); 11539 SD_NEXT_IODONE(index, un, bp); 11540 } 11541 11542 11543 /* 11544 * Function: sd_checksum_uscsi_iostart 11545 * 11546 * Description: A stub function for a layer that's currently not used. 11547 * For now just a placeholder. 11548 * 11549 * Context: Kernel thread context 11550 */ 11551 11552 static void 11553 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11554 { 11555 ASSERT(un != NULL); 11556 ASSERT(bp != NULL); 11557 ASSERT(!mutex_owned(SD_MUTEX(un))); 11558 SD_NEXT_IOSTART(index, un, bp); 11559 } 11560 11561 11562 /* 11563 * Function: sd_checksum_uscsi_iodone 11564 * 11565 * Description: A stub function for a layer that's currently not used. 11566 * For now just a placeholder. 11567 * 11568 * Context: May be called under interrupt context 11569 */ 11570 11571 static void 11572 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11573 { 11574 ASSERT(un != NULL); 11575 ASSERT(bp != NULL); 11576 ASSERT(!mutex_owned(SD_MUTEX(un))); 11577 SD_NEXT_IODONE(index, un, bp); 11578 } 11579 11580 11581 /* 11582 * Function: sd_pm_iostart 11583 * 11584 * Description: iostart-side routine for Power mangement. 11585 * 11586 * Context: Kernel thread context 11587 */ 11588 11589 static void 11590 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11591 { 11592 ASSERT(un != NULL); 11593 ASSERT(bp != NULL); 11594 ASSERT(!mutex_owned(SD_MUTEX(un))); 11595 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11596 11597 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11598 11599 if (sd_pm_entry(un) != DDI_SUCCESS) { 11600 /* 11601 * Set up to return the failed buf back up the 'iodone' 11602 * side of the calling chain. 11603 */ 11604 bioerror(bp, EIO); 11605 bp->b_resid = bp->b_bcount; 11606 11607 SD_BEGIN_IODONE(index, un, bp); 11608 11609 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11610 return; 11611 } 11612 11613 SD_NEXT_IOSTART(index, un, bp); 11614 11615 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11616 } 11617 11618 11619 /* 11620 * Function: sd_pm_iodone 11621 * 11622 * Description: iodone-side routine for power mangement. 11623 * 11624 * Context: may be called from interrupt context 11625 */ 11626 11627 static void 11628 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11629 { 11630 ASSERT(un != NULL); 11631 ASSERT(bp != NULL); 11632 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11633 11634 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11635 11636 /* 11637 * After attach the following flag is only read, so don't 11638 * take the penalty of acquiring a mutex for it. 11639 */ 11640 if (un->un_f_pm_is_enabled == TRUE) { 11641 sd_pm_exit(un); 11642 } 11643 11644 SD_NEXT_IODONE(index, un, bp); 11645 11646 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11647 } 11648 11649 11650 /* 11651 * Function: sd_core_iostart 11652 * 11653 * Description: Primary driver function for enqueuing buf(9S) structs from 11654 * the system and initiating IO to the target device 11655 * 11656 * Context: Kernel thread context. Can sleep. 11657 * 11658 * Assumptions: - The given xp->xb_blkno is absolute 11659 * (ie, relative to the start of the device). 11660 * - The IO is to be done using the native blocksize of 11661 * the device, as specified in un->un_tgt_blocksize. 11662 */ 11663 /* ARGSUSED */ 11664 static void 11665 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11666 { 11667 struct sd_xbuf *xp; 11668 11669 ASSERT(un != NULL); 11670 ASSERT(bp != NULL); 11671 ASSERT(!mutex_owned(SD_MUTEX(un))); 11672 ASSERT(bp->b_resid == 0); 11673 11674 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11675 11676 xp = SD_GET_XBUF(bp); 11677 ASSERT(xp != NULL); 11678 11679 mutex_enter(SD_MUTEX(un)); 11680 11681 /* 11682 * If we are currently in the failfast state, fail any new IO 11683 * that has B_FAILFAST set, then return. 11684 */ 11685 if ((bp->b_flags & B_FAILFAST) && 11686 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11687 mutex_exit(SD_MUTEX(un)); 11688 bioerror(bp, EIO); 11689 bp->b_resid = bp->b_bcount; 11690 SD_BEGIN_IODONE(index, un, bp); 11691 return; 11692 } 11693 11694 if (SD_IS_DIRECT_PRIORITY(xp)) { 11695 /* 11696 * Priority command -- transport it immediately. 11697 * 11698 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11699 * because all direct priority commands should be associated 11700 * with error recovery actions which we don't want to retry. 11701 */ 11702 sd_start_cmds(un, bp); 11703 } else { 11704 /* 11705 * Normal command -- add it to the wait queue, then start 11706 * transporting commands from the wait queue. 11707 */ 11708 sd_add_buf_to_waitq(un, bp); 11709 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11710 sd_start_cmds(un, NULL); 11711 } 11712 11713 mutex_exit(SD_MUTEX(un)); 11714 11715 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11716 } 11717 11718 11719 /* 11720 * Function: sd_init_cdb_limits 11721 * 11722 * Description: This is to handle scsi_pkt initialization differences 11723 * between the driver platforms. 11724 * 11725 * Legacy behaviors: 11726 * 11727 * If the block number or the sector count exceeds the 11728 * capabilities of a Group 0 command, shift over to a 11729 * Group 1 command. We don't blindly use Group 1 11730 * commands because a) some drives (CDC Wren IVs) get a 11731 * bit confused, and b) there is probably a fair amount 11732 * of speed difference for a target to receive and decode 11733 * a 10 byte command instead of a 6 byte command. 11734 * 11735 * The xfer time difference of 6 vs 10 byte CDBs is 11736 * still significant so this code is still worthwhile. 11737 * 10 byte CDBs are very inefficient with the fas HBA driver 11738 * and older disks. Each CDB byte took 1 usec with some 11739 * popular disks. 11740 * 11741 * Context: Must be called at attach time 11742 */ 11743 11744 static void 11745 sd_init_cdb_limits(struct sd_lun *un) 11746 { 11747 int hba_cdb_limit; 11748 11749 /* 11750 * Use CDB_GROUP1 commands for most devices except for 11751 * parallel SCSI fixed drives in which case we get better 11752 * performance using CDB_GROUP0 commands (where applicable). 11753 */ 11754 un->un_mincdb = SD_CDB_GROUP1; 11755 #if !defined(__fibre) 11756 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11757 !un->un_f_has_removable_media) { 11758 un->un_mincdb = SD_CDB_GROUP0; 11759 } 11760 #endif 11761 11762 /* 11763 * Try to read the max-cdb-length supported by HBA. 11764 */ 11765 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11766 if (0 >= un->un_max_hba_cdb) { 11767 un->un_max_hba_cdb = CDB_GROUP4; 11768 hba_cdb_limit = SD_CDB_GROUP4; 11769 } else if (0 < un->un_max_hba_cdb && 11770 un->un_max_hba_cdb < CDB_GROUP1) { 11771 hba_cdb_limit = SD_CDB_GROUP0; 11772 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11773 un->un_max_hba_cdb < CDB_GROUP5) { 11774 hba_cdb_limit = SD_CDB_GROUP1; 11775 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11776 un->un_max_hba_cdb < CDB_GROUP4) { 11777 hba_cdb_limit = SD_CDB_GROUP5; 11778 } else { 11779 hba_cdb_limit = SD_CDB_GROUP4; 11780 } 11781 11782 /* 11783 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11784 * commands for fixed disks unless we are building for a 32 bit 11785 * kernel. 11786 */ 11787 #ifdef _LP64 11788 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11789 min(hba_cdb_limit, SD_CDB_GROUP4); 11790 #else 11791 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11792 min(hba_cdb_limit, SD_CDB_GROUP1); 11793 #endif 11794 11795 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11796 ? sizeof (struct scsi_arq_status) : 1); 11797 un->un_cmd_timeout = (ushort_t)sd_io_time; 11798 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11799 } 11800 11801 11802 /* 11803 * Function: sd_initpkt_for_buf 11804 * 11805 * Description: Allocate and initialize for transport a scsi_pkt struct, 11806 * based upon the info specified in the given buf struct. 11807 * 11808 * Assumes the xb_blkno in the request is absolute (ie, 11809 * relative to the start of the device (NOT partition!). 11810 * Also assumes that the request is using the native block 11811 * size of the device (as returned by the READ CAPACITY 11812 * command). 11813 * 11814 * Return Code: SD_PKT_ALLOC_SUCCESS 11815 * SD_PKT_ALLOC_FAILURE 11816 * SD_PKT_ALLOC_FAILURE_NO_DMA 11817 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11818 * 11819 * Context: Kernel thread and may be called from software interrupt context 11820 * as part of a sdrunout callback. This function may not block or 11821 * call routines that block 11822 */ 11823 11824 static int 11825 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11826 { 11827 struct sd_xbuf *xp; 11828 struct scsi_pkt *pktp = NULL; 11829 struct sd_lun *un; 11830 size_t blockcount; 11831 daddr_t startblock; 11832 int rval; 11833 int cmd_flags; 11834 11835 ASSERT(bp != NULL); 11836 ASSERT(pktpp != NULL); 11837 xp = SD_GET_XBUF(bp); 11838 ASSERT(xp != NULL); 11839 un = SD_GET_UN(bp); 11840 ASSERT(un != NULL); 11841 ASSERT(mutex_owned(SD_MUTEX(un))); 11842 ASSERT(bp->b_resid == 0); 11843 11844 SD_TRACE(SD_LOG_IO_CORE, un, 11845 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11846 11847 mutex_exit(SD_MUTEX(un)); 11848 11849 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11850 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11851 /* 11852 * Already have a scsi_pkt -- just need DMA resources. 11853 * We must recompute the CDB in case the mapping returns 11854 * a nonzero pkt_resid. 11855 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11856 * that is being retried, the unmap/remap of the DMA resouces 11857 * will result in the entire transfer starting over again 11858 * from the very first block. 11859 */ 11860 ASSERT(xp->xb_pktp != NULL); 11861 pktp = xp->xb_pktp; 11862 } else { 11863 pktp = NULL; 11864 } 11865 #endif /* __i386 || __amd64 */ 11866 11867 startblock = xp->xb_blkno; /* Absolute block num. */ 11868 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11869 11870 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11871 11872 /* 11873 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11874 * call scsi_init_pkt, and build the CDB. 11875 */ 11876 rval = sd_setup_rw_pkt(un, &pktp, bp, 11877 cmd_flags, sdrunout, (caddr_t)un, 11878 startblock, blockcount); 11879 11880 if (rval == 0) { 11881 /* 11882 * Success. 11883 * 11884 * If partial DMA is being used and required for this transfer. 11885 * set it up here. 11886 */ 11887 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11888 (pktp->pkt_resid != 0)) { 11889 11890 /* 11891 * Save the CDB length and pkt_resid for the 11892 * next xfer 11893 */ 11894 xp->xb_dma_resid = pktp->pkt_resid; 11895 11896 /* rezero resid */ 11897 pktp->pkt_resid = 0; 11898 11899 } else { 11900 xp->xb_dma_resid = 0; 11901 } 11902 11903 pktp->pkt_flags = un->un_tagflags; 11904 pktp->pkt_time = un->un_cmd_timeout; 11905 pktp->pkt_comp = sdintr; 11906 11907 pktp->pkt_private = bp; 11908 *pktpp = pktp; 11909 11910 SD_TRACE(SD_LOG_IO_CORE, un, 11911 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11912 11913 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11914 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11915 #endif 11916 11917 mutex_enter(SD_MUTEX(un)); 11918 return (SD_PKT_ALLOC_SUCCESS); 11919 11920 } 11921 11922 /* 11923 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11924 * from sd_setup_rw_pkt. 11925 */ 11926 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11927 11928 if (rval == SD_PKT_ALLOC_FAILURE) { 11929 *pktpp = NULL; 11930 /* 11931 * Set the driver state to RWAIT to indicate the driver 11932 * is waiting on resource allocations. The driver will not 11933 * suspend, pm_suspend, or detatch while the state is RWAIT. 11934 */ 11935 mutex_enter(SD_MUTEX(un)); 11936 New_state(un, SD_STATE_RWAIT); 11937 11938 SD_ERROR(SD_LOG_IO_CORE, un, 11939 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11940 11941 if ((bp->b_flags & B_ERROR) != 0) { 11942 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11943 } 11944 return (SD_PKT_ALLOC_FAILURE); 11945 } else { 11946 /* 11947 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11948 * 11949 * This should never happen. Maybe someone messed with the 11950 * kernel's minphys? 11951 */ 11952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11953 "Request rejected: too large for CDB: " 11954 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11955 SD_ERROR(SD_LOG_IO_CORE, un, 11956 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11957 mutex_enter(SD_MUTEX(un)); 11958 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11959 11960 } 11961 } 11962 11963 11964 /* 11965 * Function: sd_destroypkt_for_buf 11966 * 11967 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11968 * 11969 * Context: Kernel thread or interrupt context 11970 */ 11971 11972 static void 11973 sd_destroypkt_for_buf(struct buf *bp) 11974 { 11975 ASSERT(bp != NULL); 11976 ASSERT(SD_GET_UN(bp) != NULL); 11977 11978 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11979 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11980 11981 ASSERT(SD_GET_PKTP(bp) != NULL); 11982 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11983 11984 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11985 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11986 } 11987 11988 /* 11989 * Function: sd_setup_rw_pkt 11990 * 11991 * Description: Determines appropriate CDB group for the requested LBA 11992 * and transfer length, calls scsi_init_pkt, and builds 11993 * the CDB. Do not use for partial DMA transfers except 11994 * for the initial transfer since the CDB size must 11995 * remain constant. 11996 * 11997 * Context: Kernel thread and may be called from software interrupt 11998 * context as part of a sdrunout callback. This function may not 11999 * block or call routines that block 12000 */ 12001 12002 12003 int 12004 sd_setup_rw_pkt(struct sd_lun *un, 12005 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12006 int (*callback)(caddr_t), caddr_t callback_arg, 12007 diskaddr_t lba, uint32_t blockcount) 12008 { 12009 struct scsi_pkt *return_pktp; 12010 union scsi_cdb *cdbp; 12011 struct sd_cdbinfo *cp = NULL; 12012 int i; 12013 12014 /* 12015 * See which size CDB to use, based upon the request. 12016 */ 12017 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12018 12019 /* 12020 * Check lba and block count against sd_cdbtab limits. 12021 * In the partial DMA case, we have to use the same size 12022 * CDB for all the transfers. Check lba + blockcount 12023 * against the max LBA so we know that segment of the 12024 * transfer can use the CDB we select. 12025 */ 12026 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12027 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12028 12029 /* 12030 * The command will fit into the CDB type 12031 * specified by sd_cdbtab[i]. 12032 */ 12033 cp = sd_cdbtab + i; 12034 12035 /* 12036 * Call scsi_init_pkt so we can fill in the 12037 * CDB. 12038 */ 12039 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12040 bp, cp->sc_grpcode, un->un_status_len, 0, 12041 flags, callback, callback_arg); 12042 12043 if (return_pktp != NULL) { 12044 12045 /* 12046 * Return new value of pkt 12047 */ 12048 *pktpp = return_pktp; 12049 12050 /* 12051 * To be safe, zero the CDB insuring there is 12052 * no leftover data from a previous command. 12053 */ 12054 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12055 12056 /* 12057 * Handle partial DMA mapping 12058 */ 12059 if (return_pktp->pkt_resid != 0) { 12060 12061 /* 12062 * Not going to xfer as many blocks as 12063 * originally expected 12064 */ 12065 blockcount -= 12066 SD_BYTES2TGTBLOCKS(un, 12067 return_pktp->pkt_resid); 12068 } 12069 12070 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12071 12072 /* 12073 * Set command byte based on the CDB 12074 * type we matched. 12075 */ 12076 cdbp->scc_cmd = cp->sc_grpmask | 12077 ((bp->b_flags & B_READ) ? 12078 SCMD_READ : SCMD_WRITE); 12079 12080 SD_FILL_SCSI1_LUN(un, return_pktp); 12081 12082 /* 12083 * Fill in LBA and length 12084 */ 12085 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12086 (cp->sc_grpcode == CDB_GROUP4) || 12087 (cp->sc_grpcode == CDB_GROUP0) || 12088 (cp->sc_grpcode == CDB_GROUP5)); 12089 12090 if (cp->sc_grpcode == CDB_GROUP1) { 12091 FORMG1ADDR(cdbp, lba); 12092 FORMG1COUNT(cdbp, blockcount); 12093 return (0); 12094 } else if (cp->sc_grpcode == CDB_GROUP4) { 12095 FORMG4LONGADDR(cdbp, lba); 12096 FORMG4COUNT(cdbp, blockcount); 12097 return (0); 12098 } else if (cp->sc_grpcode == CDB_GROUP0) { 12099 FORMG0ADDR(cdbp, lba); 12100 FORMG0COUNT(cdbp, blockcount); 12101 return (0); 12102 } else if (cp->sc_grpcode == CDB_GROUP5) { 12103 FORMG5ADDR(cdbp, lba); 12104 FORMG5COUNT(cdbp, blockcount); 12105 return (0); 12106 } 12107 12108 /* 12109 * It should be impossible to not match one 12110 * of the CDB types above, so we should never 12111 * reach this point. Set the CDB command byte 12112 * to test-unit-ready to avoid writing 12113 * to somewhere we don't intend. 12114 */ 12115 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12116 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12117 } else { 12118 /* 12119 * Couldn't get scsi_pkt 12120 */ 12121 return (SD_PKT_ALLOC_FAILURE); 12122 } 12123 } 12124 } 12125 12126 /* 12127 * None of the available CDB types were suitable. This really 12128 * should never happen: on a 64 bit system we support 12129 * READ16/WRITE16 which will hold an entire 64 bit disk address 12130 * and on a 32 bit system we will refuse to bind to a device 12131 * larger than 2TB so addresses will never be larger than 32 bits. 12132 */ 12133 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12134 } 12135 12136 /* 12137 * Function: sd_setup_next_rw_pkt 12138 * 12139 * Description: Setup packet for partial DMA transfers, except for the 12140 * initial transfer. sd_setup_rw_pkt should be used for 12141 * the initial transfer. 12142 * 12143 * Context: Kernel thread and may be called from interrupt context. 12144 */ 12145 12146 int 12147 sd_setup_next_rw_pkt(struct sd_lun *un, 12148 struct scsi_pkt *pktp, struct buf *bp, 12149 diskaddr_t lba, uint32_t blockcount) 12150 { 12151 uchar_t com; 12152 union scsi_cdb *cdbp; 12153 uchar_t cdb_group_id; 12154 12155 ASSERT(pktp != NULL); 12156 ASSERT(pktp->pkt_cdbp != NULL); 12157 12158 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12159 com = cdbp->scc_cmd; 12160 cdb_group_id = CDB_GROUPID(com); 12161 12162 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12163 (cdb_group_id == CDB_GROUPID_1) || 12164 (cdb_group_id == CDB_GROUPID_4) || 12165 (cdb_group_id == CDB_GROUPID_5)); 12166 12167 /* 12168 * Move pkt to the next portion of the xfer. 12169 * func is NULL_FUNC so we do not have to release 12170 * the disk mutex here. 12171 */ 12172 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12173 NULL_FUNC, NULL) == pktp) { 12174 /* Success. Handle partial DMA */ 12175 if (pktp->pkt_resid != 0) { 12176 blockcount -= 12177 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12178 } 12179 12180 cdbp->scc_cmd = com; 12181 SD_FILL_SCSI1_LUN(un, pktp); 12182 if (cdb_group_id == CDB_GROUPID_1) { 12183 FORMG1ADDR(cdbp, lba); 12184 FORMG1COUNT(cdbp, blockcount); 12185 return (0); 12186 } else if (cdb_group_id == CDB_GROUPID_4) { 12187 FORMG4LONGADDR(cdbp, lba); 12188 FORMG4COUNT(cdbp, blockcount); 12189 return (0); 12190 } else if (cdb_group_id == CDB_GROUPID_0) { 12191 FORMG0ADDR(cdbp, lba); 12192 FORMG0COUNT(cdbp, blockcount); 12193 return (0); 12194 } else if (cdb_group_id == CDB_GROUPID_5) { 12195 FORMG5ADDR(cdbp, lba); 12196 FORMG5COUNT(cdbp, blockcount); 12197 return (0); 12198 } 12199 12200 /* Unreachable */ 12201 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12202 } 12203 12204 /* 12205 * Error setting up next portion of cmd transfer. 12206 * Something is definitely very wrong and this 12207 * should not happen. 12208 */ 12209 return (SD_PKT_ALLOC_FAILURE); 12210 } 12211 12212 /* 12213 * Function: sd_initpkt_for_uscsi 12214 * 12215 * Description: Allocate and initialize for transport a scsi_pkt struct, 12216 * based upon the info specified in the given uscsi_cmd struct. 12217 * 12218 * Return Code: SD_PKT_ALLOC_SUCCESS 12219 * SD_PKT_ALLOC_FAILURE 12220 * SD_PKT_ALLOC_FAILURE_NO_DMA 12221 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12222 * 12223 * Context: Kernel thread and may be called from software interrupt context 12224 * as part of a sdrunout callback. This function may not block or 12225 * call routines that block 12226 */ 12227 12228 static int 12229 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12230 { 12231 struct uscsi_cmd *uscmd; 12232 struct sd_xbuf *xp; 12233 struct scsi_pkt *pktp; 12234 struct sd_lun *un; 12235 uint32_t flags = 0; 12236 12237 ASSERT(bp != NULL); 12238 ASSERT(pktpp != NULL); 12239 xp = SD_GET_XBUF(bp); 12240 ASSERT(xp != NULL); 12241 un = SD_GET_UN(bp); 12242 ASSERT(un != NULL); 12243 ASSERT(mutex_owned(SD_MUTEX(un))); 12244 12245 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12246 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12247 ASSERT(uscmd != NULL); 12248 12249 SD_TRACE(SD_LOG_IO_CORE, un, 12250 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12251 12252 /* 12253 * Allocate the scsi_pkt for the command. 12254 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12255 * during scsi_init_pkt time and will continue to use the 12256 * same path as long as the same scsi_pkt is used without 12257 * intervening scsi_dma_free(). Since uscsi command does 12258 * not call scsi_dmafree() before retry failed command, it 12259 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12260 * set such that scsi_vhci can use other available path for 12261 * retry. Besides, ucsci command does not allow DMA breakup, 12262 * so there is no need to set PKT_DMA_PARTIAL flag. 12263 */ 12264 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12265 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12266 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12267 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12268 - sizeof (struct scsi_extended_sense)), 0, 12269 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12270 sdrunout, (caddr_t)un); 12271 } else { 12272 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12273 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12274 sizeof (struct scsi_arq_status), 0, 12275 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12276 sdrunout, (caddr_t)un); 12277 } 12278 12279 if (pktp == NULL) { 12280 *pktpp = NULL; 12281 /* 12282 * Set the driver state to RWAIT to indicate the driver 12283 * is waiting on resource allocations. The driver will not 12284 * suspend, pm_suspend, or detatch while the state is RWAIT. 12285 */ 12286 New_state(un, SD_STATE_RWAIT); 12287 12288 SD_ERROR(SD_LOG_IO_CORE, un, 12289 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12290 12291 if ((bp->b_flags & B_ERROR) != 0) { 12292 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12293 } 12294 return (SD_PKT_ALLOC_FAILURE); 12295 } 12296 12297 /* 12298 * We do not do DMA breakup for USCSI commands, so return failure 12299 * here if all the needed DMA resources were not allocated. 12300 */ 12301 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12302 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12303 scsi_destroy_pkt(pktp); 12304 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12305 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12306 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12307 } 12308 12309 /* Init the cdb from the given uscsi struct */ 12310 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12311 uscmd->uscsi_cdb[0], 0, 0, 0); 12312 12313 SD_FILL_SCSI1_LUN(un, pktp); 12314 12315 /* 12316 * Set up the optional USCSI flags. See the uscsi (7I) man page 12317 * for listing of the supported flags. 12318 */ 12319 12320 if (uscmd->uscsi_flags & USCSI_SILENT) { 12321 flags |= FLAG_SILENT; 12322 } 12323 12324 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12325 flags |= FLAG_DIAGNOSE; 12326 } 12327 12328 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12329 flags |= FLAG_ISOLATE; 12330 } 12331 12332 if (un->un_f_is_fibre == FALSE) { 12333 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12334 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12335 } 12336 } 12337 12338 /* 12339 * Set the pkt flags here so we save time later. 12340 * Note: These flags are NOT in the uscsi man page!!! 12341 */ 12342 if (uscmd->uscsi_flags & USCSI_HEAD) { 12343 flags |= FLAG_HEAD; 12344 } 12345 12346 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12347 flags |= FLAG_NOINTR; 12348 } 12349 12350 /* 12351 * For tagged queueing, things get a bit complicated. 12352 * Check first for head of queue and last for ordered queue. 12353 * If neither head nor order, use the default driver tag flags. 12354 */ 12355 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12356 if (uscmd->uscsi_flags & USCSI_HTAG) { 12357 flags |= FLAG_HTAG; 12358 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12359 flags |= FLAG_OTAG; 12360 } else { 12361 flags |= un->un_tagflags & FLAG_TAGMASK; 12362 } 12363 } 12364 12365 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12366 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12367 } 12368 12369 pktp->pkt_flags = flags; 12370 12371 /* Transfer uscsi information to scsi_pkt */ 12372 (void) scsi_uscsi_pktinit(uscmd, pktp); 12373 12374 /* Copy the caller's CDB into the pkt... */ 12375 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12376 12377 if (uscmd->uscsi_timeout == 0) { 12378 pktp->pkt_time = un->un_uscsi_timeout; 12379 } else { 12380 pktp->pkt_time = uscmd->uscsi_timeout; 12381 } 12382 12383 /* need it later to identify USCSI request in sdintr */ 12384 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12385 12386 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12387 12388 pktp->pkt_private = bp; 12389 pktp->pkt_comp = sdintr; 12390 *pktpp = pktp; 12391 12392 SD_TRACE(SD_LOG_IO_CORE, un, 12393 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12394 12395 return (SD_PKT_ALLOC_SUCCESS); 12396 } 12397 12398 12399 /* 12400 * Function: sd_destroypkt_for_uscsi 12401 * 12402 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12403 * IOs.. Also saves relevant info into the associated uscsi_cmd 12404 * struct. 12405 * 12406 * Context: May be called under interrupt context 12407 */ 12408 12409 static void 12410 sd_destroypkt_for_uscsi(struct buf *bp) 12411 { 12412 struct uscsi_cmd *uscmd; 12413 struct sd_xbuf *xp; 12414 struct scsi_pkt *pktp; 12415 struct sd_lun *un; 12416 12417 ASSERT(bp != NULL); 12418 xp = SD_GET_XBUF(bp); 12419 ASSERT(xp != NULL); 12420 un = SD_GET_UN(bp); 12421 ASSERT(un != NULL); 12422 ASSERT(!mutex_owned(SD_MUTEX(un))); 12423 pktp = SD_GET_PKTP(bp); 12424 ASSERT(pktp != NULL); 12425 12426 SD_TRACE(SD_LOG_IO_CORE, un, 12427 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12428 12429 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12430 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12431 ASSERT(uscmd != NULL); 12432 12433 /* Save the status and the residual into the uscsi_cmd struct */ 12434 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12435 uscmd->uscsi_resid = bp->b_resid; 12436 12437 /* Transfer scsi_pkt information to uscsi */ 12438 (void) scsi_uscsi_pktfini(pktp, uscmd); 12439 12440 /* 12441 * If enabled, copy any saved sense data into the area specified 12442 * by the uscsi command. 12443 */ 12444 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12445 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12446 /* 12447 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12448 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12449 */ 12450 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12451 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12452 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12453 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12454 MAX_SENSE_LENGTH); 12455 } else { 12456 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12457 SENSE_LENGTH); 12458 } 12459 } 12460 12461 /* We are done with the scsi_pkt; free it now */ 12462 ASSERT(SD_GET_PKTP(bp) != NULL); 12463 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12464 12465 SD_TRACE(SD_LOG_IO_CORE, un, 12466 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12467 } 12468 12469 12470 /* 12471 * Function: sd_bioclone_alloc 12472 * 12473 * Description: Allocate a buf(9S) and init it as per the given buf 12474 * and the various arguments. The associated sd_xbuf 12475 * struct is (nearly) duplicated. The struct buf *bp 12476 * argument is saved in new_xp->xb_private. 12477 * 12478 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12479 * datalen - size of data area for the shadow bp 12480 * blkno - starting LBA 12481 * func - function pointer for b_iodone in the shadow buf. (May 12482 * be NULL if none.) 12483 * 12484 * Return Code: Pointer to allocates buf(9S) struct 12485 * 12486 * Context: Can sleep. 12487 */ 12488 12489 static struct buf * 12490 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12491 daddr_t blkno, int (*func)(struct buf *)) 12492 { 12493 struct sd_lun *un; 12494 struct sd_xbuf *xp; 12495 struct sd_xbuf *new_xp; 12496 struct buf *new_bp; 12497 12498 ASSERT(bp != NULL); 12499 xp = SD_GET_XBUF(bp); 12500 ASSERT(xp != NULL); 12501 un = SD_GET_UN(bp); 12502 ASSERT(un != NULL); 12503 ASSERT(!mutex_owned(SD_MUTEX(un))); 12504 12505 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12506 NULL, KM_SLEEP); 12507 12508 new_bp->b_lblkno = blkno; 12509 12510 /* 12511 * Allocate an xbuf for the shadow bp and copy the contents of the 12512 * original xbuf into it. 12513 */ 12514 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12515 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12516 12517 /* 12518 * The given bp is automatically saved in the xb_private member 12519 * of the new xbuf. Callers are allowed to depend on this. 12520 */ 12521 new_xp->xb_private = bp; 12522 12523 new_bp->b_private = new_xp; 12524 12525 return (new_bp); 12526 } 12527 12528 /* 12529 * Function: sd_shadow_buf_alloc 12530 * 12531 * Description: Allocate a buf(9S) and init it as per the given buf 12532 * and the various arguments. The associated sd_xbuf 12533 * struct is (nearly) duplicated. The struct buf *bp 12534 * argument is saved in new_xp->xb_private. 12535 * 12536 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12537 * datalen - size of data area for the shadow bp 12538 * bflags - B_READ or B_WRITE (pseudo flag) 12539 * blkno - starting LBA 12540 * func - function pointer for b_iodone in the shadow buf. (May 12541 * be NULL if none.) 12542 * 12543 * Return Code: Pointer to allocates buf(9S) struct 12544 * 12545 * Context: Can sleep. 12546 */ 12547 12548 static struct buf * 12549 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12550 daddr_t blkno, int (*func)(struct buf *)) 12551 { 12552 struct sd_lun *un; 12553 struct sd_xbuf *xp; 12554 struct sd_xbuf *new_xp; 12555 struct buf *new_bp; 12556 12557 ASSERT(bp != NULL); 12558 xp = SD_GET_XBUF(bp); 12559 ASSERT(xp != NULL); 12560 un = SD_GET_UN(bp); 12561 ASSERT(un != NULL); 12562 ASSERT(!mutex_owned(SD_MUTEX(un))); 12563 12564 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12565 bp_mapin(bp); 12566 } 12567 12568 bflags &= (B_READ | B_WRITE); 12569 #if defined(__i386) || defined(__amd64) 12570 new_bp = getrbuf(KM_SLEEP); 12571 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12572 new_bp->b_bcount = datalen; 12573 new_bp->b_flags = bflags | 12574 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12575 #else 12576 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12577 datalen, bflags, SLEEP_FUNC, NULL); 12578 #endif 12579 new_bp->av_forw = NULL; 12580 new_bp->av_back = NULL; 12581 new_bp->b_dev = bp->b_dev; 12582 new_bp->b_blkno = blkno; 12583 new_bp->b_iodone = func; 12584 new_bp->b_edev = bp->b_edev; 12585 new_bp->b_resid = 0; 12586 12587 /* We need to preserve the B_FAILFAST flag */ 12588 if (bp->b_flags & B_FAILFAST) { 12589 new_bp->b_flags |= B_FAILFAST; 12590 } 12591 12592 /* 12593 * Allocate an xbuf for the shadow bp and copy the contents of the 12594 * original xbuf into it. 12595 */ 12596 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12597 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12598 12599 /* Need later to copy data between the shadow buf & original buf! */ 12600 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12601 12602 /* 12603 * The given bp is automatically saved in the xb_private member 12604 * of the new xbuf. Callers are allowed to depend on this. 12605 */ 12606 new_xp->xb_private = bp; 12607 12608 new_bp->b_private = new_xp; 12609 12610 return (new_bp); 12611 } 12612 12613 /* 12614 * Function: sd_bioclone_free 12615 * 12616 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12617 * in the larger than partition operation. 12618 * 12619 * Context: May be called under interrupt context 12620 */ 12621 12622 static void 12623 sd_bioclone_free(struct buf *bp) 12624 { 12625 struct sd_xbuf *xp; 12626 12627 ASSERT(bp != NULL); 12628 xp = SD_GET_XBUF(bp); 12629 ASSERT(xp != NULL); 12630 12631 /* 12632 * Call bp_mapout() before freeing the buf, in case a lower 12633 * layer or HBA had done a bp_mapin(). we must do this here 12634 * as we are the "originator" of the shadow buf. 12635 */ 12636 bp_mapout(bp); 12637 12638 /* 12639 * Null out b_iodone before freeing the bp, to ensure that the driver 12640 * never gets confused by a stale value in this field. (Just a little 12641 * extra defensiveness here.) 12642 */ 12643 bp->b_iodone = NULL; 12644 12645 freerbuf(bp); 12646 12647 kmem_free(xp, sizeof (struct sd_xbuf)); 12648 } 12649 12650 /* 12651 * Function: sd_shadow_buf_free 12652 * 12653 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12654 * 12655 * Context: May be called under interrupt context 12656 */ 12657 12658 static void 12659 sd_shadow_buf_free(struct buf *bp) 12660 { 12661 struct sd_xbuf *xp; 12662 12663 ASSERT(bp != NULL); 12664 xp = SD_GET_XBUF(bp); 12665 ASSERT(xp != NULL); 12666 12667 #if defined(__sparc) 12668 /* 12669 * Call bp_mapout() before freeing the buf, in case a lower 12670 * layer or HBA had done a bp_mapin(). we must do this here 12671 * as we are the "originator" of the shadow buf. 12672 */ 12673 bp_mapout(bp); 12674 #endif 12675 12676 /* 12677 * Null out b_iodone before freeing the bp, to ensure that the driver 12678 * never gets confused by a stale value in this field. (Just a little 12679 * extra defensiveness here.) 12680 */ 12681 bp->b_iodone = NULL; 12682 12683 #if defined(__i386) || defined(__amd64) 12684 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12685 freerbuf(bp); 12686 #else 12687 scsi_free_consistent_buf(bp); 12688 #endif 12689 12690 kmem_free(xp, sizeof (struct sd_xbuf)); 12691 } 12692 12693 12694 /* 12695 * Function: sd_print_transport_rejected_message 12696 * 12697 * Description: This implements the ludicrously complex rules for printing 12698 * a "transport rejected" message. This is to address the 12699 * specific problem of having a flood of this error message 12700 * produced when a failover occurs. 12701 * 12702 * Context: Any. 12703 */ 12704 12705 static void 12706 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12707 int code) 12708 { 12709 ASSERT(un != NULL); 12710 ASSERT(mutex_owned(SD_MUTEX(un))); 12711 ASSERT(xp != NULL); 12712 12713 /* 12714 * Print the "transport rejected" message under the following 12715 * conditions: 12716 * 12717 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12718 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12719 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12720 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12721 * scsi_transport(9F) (which indicates that the target might have 12722 * gone off-line). This uses the un->un_tran_fatal_count 12723 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12724 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12725 * from scsi_transport(). 12726 * 12727 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12728 * the preceeding cases in order for the message to be printed. 12729 */ 12730 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12731 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12732 (code != TRAN_FATAL_ERROR) || 12733 (un->un_tran_fatal_count == 1)) { 12734 switch (code) { 12735 case TRAN_BADPKT: 12736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12737 "transport rejected bad packet\n"); 12738 break; 12739 case TRAN_FATAL_ERROR: 12740 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12741 "transport rejected fatal error\n"); 12742 break; 12743 default: 12744 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12745 "transport rejected (%d)\n", code); 12746 break; 12747 } 12748 } 12749 } 12750 } 12751 12752 12753 /* 12754 * Function: sd_add_buf_to_waitq 12755 * 12756 * Description: Add the given buf(9S) struct to the wait queue for the 12757 * instance. If sorting is enabled, then the buf is added 12758 * to the queue via an elevator sort algorithm (a la 12759 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12760 * If sorting is not enabled, then the buf is just added 12761 * to the end of the wait queue. 12762 * 12763 * Return Code: void 12764 * 12765 * Context: Does not sleep/block, therefore technically can be called 12766 * from any context. However if sorting is enabled then the 12767 * execution time is indeterminate, and may take long if 12768 * the wait queue grows large. 12769 */ 12770 12771 static void 12772 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12773 { 12774 struct buf *ap; 12775 12776 ASSERT(bp != NULL); 12777 ASSERT(un != NULL); 12778 ASSERT(mutex_owned(SD_MUTEX(un))); 12779 12780 /* If the queue is empty, add the buf as the only entry & return. */ 12781 if (un->un_waitq_headp == NULL) { 12782 ASSERT(un->un_waitq_tailp == NULL); 12783 un->un_waitq_headp = un->un_waitq_tailp = bp; 12784 bp->av_forw = NULL; 12785 return; 12786 } 12787 12788 ASSERT(un->un_waitq_tailp != NULL); 12789 12790 /* 12791 * If sorting is disabled, just add the buf to the tail end of 12792 * the wait queue and return. 12793 */ 12794 if (un->un_f_disksort_disabled) { 12795 un->un_waitq_tailp->av_forw = bp; 12796 un->un_waitq_tailp = bp; 12797 bp->av_forw = NULL; 12798 return; 12799 } 12800 12801 /* 12802 * Sort thru the list of requests currently on the wait queue 12803 * and add the new buf request at the appropriate position. 12804 * 12805 * The un->un_waitq_headp is an activity chain pointer on which 12806 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12807 * first queue holds those requests which are positioned after 12808 * the current SD_GET_BLKNO() (in the first request); the second holds 12809 * requests which came in after their SD_GET_BLKNO() number was passed. 12810 * Thus we implement a one way scan, retracting after reaching 12811 * the end of the drive to the first request on the second 12812 * queue, at which time it becomes the first queue. 12813 * A one-way scan is natural because of the way UNIX read-ahead 12814 * blocks are allocated. 12815 * 12816 * If we lie after the first request, then we must locate the 12817 * second request list and add ourselves to it. 12818 */ 12819 ap = un->un_waitq_headp; 12820 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12821 while (ap->av_forw != NULL) { 12822 /* 12823 * Look for an "inversion" in the (normally 12824 * ascending) block numbers. This indicates 12825 * the start of the second request list. 12826 */ 12827 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12828 /* 12829 * Search the second request list for the 12830 * first request at a larger block number. 12831 * We go before that; however if there is 12832 * no such request, we go at the end. 12833 */ 12834 do { 12835 if (SD_GET_BLKNO(bp) < 12836 SD_GET_BLKNO(ap->av_forw)) { 12837 goto insert; 12838 } 12839 ap = ap->av_forw; 12840 } while (ap->av_forw != NULL); 12841 goto insert; /* after last */ 12842 } 12843 ap = ap->av_forw; 12844 } 12845 12846 /* 12847 * No inversions... we will go after the last, and 12848 * be the first request in the second request list. 12849 */ 12850 goto insert; 12851 } 12852 12853 /* 12854 * Request is at/after the current request... 12855 * sort in the first request list. 12856 */ 12857 while (ap->av_forw != NULL) { 12858 /* 12859 * We want to go after the current request (1) if 12860 * there is an inversion after it (i.e. it is the end 12861 * of the first request list), or (2) if the next 12862 * request is a larger block no. than our request. 12863 */ 12864 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12865 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12866 goto insert; 12867 } 12868 ap = ap->av_forw; 12869 } 12870 12871 /* 12872 * Neither a second list nor a larger request, therefore 12873 * we go at the end of the first list (which is the same 12874 * as the end of the whole schebang). 12875 */ 12876 insert: 12877 bp->av_forw = ap->av_forw; 12878 ap->av_forw = bp; 12879 12880 /* 12881 * If we inserted onto the tail end of the waitq, make sure the 12882 * tail pointer is updated. 12883 */ 12884 if (ap == un->un_waitq_tailp) { 12885 un->un_waitq_tailp = bp; 12886 } 12887 } 12888 12889 12890 /* 12891 * Function: sd_start_cmds 12892 * 12893 * Description: Remove and transport cmds from the driver queues. 12894 * 12895 * Arguments: un - pointer to the unit (soft state) struct for the target. 12896 * 12897 * immed_bp - ptr to a buf to be transported immediately. Only 12898 * the immed_bp is transported; bufs on the waitq are not 12899 * processed and the un_retry_bp is not checked. If immed_bp is 12900 * NULL, then normal queue processing is performed. 12901 * 12902 * Context: May be called from kernel thread context, interrupt context, 12903 * or runout callback context. This function may not block or 12904 * call routines that block. 12905 */ 12906 12907 static void 12908 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12909 { 12910 struct sd_xbuf *xp; 12911 struct buf *bp; 12912 void (*statp)(kstat_io_t *); 12913 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12914 void (*saved_statp)(kstat_io_t *); 12915 #endif 12916 int rval; 12917 12918 ASSERT(un != NULL); 12919 ASSERT(mutex_owned(SD_MUTEX(un))); 12920 ASSERT(un->un_ncmds_in_transport >= 0); 12921 ASSERT(un->un_throttle >= 0); 12922 12923 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12924 12925 do { 12926 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12927 saved_statp = NULL; 12928 #endif 12929 12930 /* 12931 * If we are syncing or dumping, fail the command to 12932 * avoid recursively calling back into scsi_transport(). 12933 * The dump I/O itself uses a separate code path so this 12934 * only prevents non-dump I/O from being sent while dumping. 12935 * File system sync takes place before dumping begins. 12936 * During panic, filesystem I/O is allowed provided 12937 * un_in_callback is <= 1. This is to prevent recursion 12938 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12939 * sd_start_cmds and so on. See panic.c for more information 12940 * about the states the system can be in during panic. 12941 */ 12942 if ((un->un_state == SD_STATE_DUMPING) || 12943 (ddi_in_panic() && (un->un_in_callback > 1))) { 12944 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12945 "sd_start_cmds: panicking\n"); 12946 goto exit; 12947 } 12948 12949 if ((bp = immed_bp) != NULL) { 12950 /* 12951 * We have a bp that must be transported immediately. 12952 * It's OK to transport the immed_bp here without doing 12953 * the throttle limit check because the immed_bp is 12954 * always used in a retry/recovery case. This means 12955 * that we know we are not at the throttle limit by 12956 * virtue of the fact that to get here we must have 12957 * already gotten a command back via sdintr(). This also 12958 * relies on (1) the command on un_retry_bp preventing 12959 * further commands from the waitq from being issued; 12960 * and (2) the code in sd_retry_command checking the 12961 * throttle limit before issuing a delayed or immediate 12962 * retry. This holds even if the throttle limit is 12963 * currently ratcheted down from its maximum value. 12964 */ 12965 statp = kstat_runq_enter; 12966 if (bp == un->un_retry_bp) { 12967 ASSERT((un->un_retry_statp == NULL) || 12968 (un->un_retry_statp == kstat_waitq_enter) || 12969 (un->un_retry_statp == 12970 kstat_runq_back_to_waitq)); 12971 /* 12972 * If the waitq kstat was incremented when 12973 * sd_set_retry_bp() queued this bp for a retry, 12974 * then we must set up statp so that the waitq 12975 * count will get decremented correctly below. 12976 * Also we must clear un->un_retry_statp to 12977 * ensure that we do not act on a stale value 12978 * in this field. 12979 */ 12980 if ((un->un_retry_statp == kstat_waitq_enter) || 12981 (un->un_retry_statp == 12982 kstat_runq_back_to_waitq)) { 12983 statp = kstat_waitq_to_runq; 12984 } 12985 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12986 saved_statp = un->un_retry_statp; 12987 #endif 12988 un->un_retry_statp = NULL; 12989 12990 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12991 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12992 "un_throttle:%d un_ncmds_in_transport:%d\n", 12993 un, un->un_retry_bp, un->un_throttle, 12994 un->un_ncmds_in_transport); 12995 } else { 12996 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12997 "processing priority bp:0x%p\n", bp); 12998 } 12999 13000 } else if ((bp = un->un_waitq_headp) != NULL) { 13001 /* 13002 * A command on the waitq is ready to go, but do not 13003 * send it if: 13004 * 13005 * (1) the throttle limit has been reached, or 13006 * (2) a retry is pending, or 13007 * (3) a START_STOP_UNIT callback pending, or 13008 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13009 * command is pending. 13010 * 13011 * For all of these conditions, IO processing will 13012 * restart after the condition is cleared. 13013 */ 13014 if (un->un_ncmds_in_transport >= un->un_throttle) { 13015 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13016 "sd_start_cmds: exiting, " 13017 "throttle limit reached!\n"); 13018 goto exit; 13019 } 13020 if (un->un_retry_bp != NULL) { 13021 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13022 "sd_start_cmds: exiting, retry pending!\n"); 13023 goto exit; 13024 } 13025 if (un->un_startstop_timeid != NULL) { 13026 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13027 "sd_start_cmds: exiting, " 13028 "START_STOP pending!\n"); 13029 goto exit; 13030 } 13031 if (un->un_direct_priority_timeid != NULL) { 13032 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13033 "sd_start_cmds: exiting, " 13034 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13035 goto exit; 13036 } 13037 13038 /* Dequeue the command */ 13039 un->un_waitq_headp = bp->av_forw; 13040 if (un->un_waitq_headp == NULL) { 13041 un->un_waitq_tailp = NULL; 13042 } 13043 bp->av_forw = NULL; 13044 statp = kstat_waitq_to_runq; 13045 SD_TRACE(SD_LOG_IO_CORE, un, 13046 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13047 13048 } else { 13049 /* No work to do so bail out now */ 13050 SD_TRACE(SD_LOG_IO_CORE, un, 13051 "sd_start_cmds: no more work, exiting!\n"); 13052 goto exit; 13053 } 13054 13055 /* 13056 * Reset the state to normal. This is the mechanism by which 13057 * the state transitions from either SD_STATE_RWAIT or 13058 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13059 * If state is SD_STATE_PM_CHANGING then this command is 13060 * part of the device power control and the state must 13061 * not be put back to normal. Doing so would would 13062 * allow new commands to proceed when they shouldn't, 13063 * the device may be going off. 13064 */ 13065 if ((un->un_state != SD_STATE_SUSPENDED) && 13066 (un->un_state != SD_STATE_PM_CHANGING)) { 13067 New_state(un, SD_STATE_NORMAL); 13068 } 13069 13070 xp = SD_GET_XBUF(bp); 13071 ASSERT(xp != NULL); 13072 13073 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13074 /* 13075 * Allocate the scsi_pkt if we need one, or attach DMA 13076 * resources if we have a scsi_pkt that needs them. The 13077 * latter should only occur for commands that are being 13078 * retried. 13079 */ 13080 if ((xp->xb_pktp == NULL) || 13081 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13082 #else 13083 if (xp->xb_pktp == NULL) { 13084 #endif 13085 /* 13086 * There is no scsi_pkt allocated for this buf. Call 13087 * the initpkt function to allocate & init one. 13088 * 13089 * The scsi_init_pkt runout callback functionality is 13090 * implemented as follows: 13091 * 13092 * 1) The initpkt function always calls 13093 * scsi_init_pkt(9F) with sdrunout specified as the 13094 * callback routine. 13095 * 2) A successful packet allocation is initialized and 13096 * the I/O is transported. 13097 * 3) The I/O associated with an allocation resource 13098 * failure is left on its queue to be retried via 13099 * runout or the next I/O. 13100 * 4) The I/O associated with a DMA error is removed 13101 * from the queue and failed with EIO. Processing of 13102 * the transport queues is also halted to be 13103 * restarted via runout or the next I/O. 13104 * 5) The I/O associated with a CDB size or packet 13105 * size error is removed from the queue and failed 13106 * with EIO. Processing of the transport queues is 13107 * continued. 13108 * 13109 * Note: there is no interface for canceling a runout 13110 * callback. To prevent the driver from detaching or 13111 * suspending while a runout is pending the driver 13112 * state is set to SD_STATE_RWAIT 13113 * 13114 * Note: using the scsi_init_pkt callback facility can 13115 * result in an I/O request persisting at the head of 13116 * the list which cannot be satisfied even after 13117 * multiple retries. In the future the driver may 13118 * implement some kind of maximum runout count before 13119 * failing an I/O. 13120 * 13121 * Note: the use of funcp below may seem superfluous, 13122 * but it helps warlock figure out the correct 13123 * initpkt function calls (see [s]sd.wlcmd). 13124 */ 13125 struct scsi_pkt *pktp; 13126 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13127 13128 ASSERT(bp != un->un_rqs_bp); 13129 13130 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13131 switch ((*funcp)(bp, &pktp)) { 13132 case SD_PKT_ALLOC_SUCCESS: 13133 xp->xb_pktp = pktp; 13134 SD_TRACE(SD_LOG_IO_CORE, un, 13135 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13136 pktp); 13137 goto got_pkt; 13138 13139 case SD_PKT_ALLOC_FAILURE: 13140 /* 13141 * Temporary (hopefully) resource depletion. 13142 * Since retries and RQS commands always have a 13143 * scsi_pkt allocated, these cases should never 13144 * get here. So the only cases this needs to 13145 * handle is a bp from the waitq (which we put 13146 * back onto the waitq for sdrunout), or a bp 13147 * sent as an immed_bp (which we just fail). 13148 */ 13149 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13150 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13151 13152 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13153 13154 if (bp == immed_bp) { 13155 /* 13156 * If SD_XB_DMA_FREED is clear, then 13157 * this is a failure to allocate a 13158 * scsi_pkt, and we must fail the 13159 * command. 13160 */ 13161 if ((xp->xb_pkt_flags & 13162 SD_XB_DMA_FREED) == 0) { 13163 break; 13164 } 13165 13166 /* 13167 * If this immediate command is NOT our 13168 * un_retry_bp, then we must fail it. 13169 */ 13170 if (bp != un->un_retry_bp) { 13171 break; 13172 } 13173 13174 /* 13175 * We get here if this cmd is our 13176 * un_retry_bp that was DMAFREED, but 13177 * scsi_init_pkt() failed to reallocate 13178 * DMA resources when we attempted to 13179 * retry it. This can happen when an 13180 * mpxio failover is in progress, but 13181 * we don't want to just fail the 13182 * command in this case. 13183 * 13184 * Use timeout(9F) to restart it after 13185 * a 100ms delay. We don't want to 13186 * let sdrunout() restart it, because 13187 * sdrunout() is just supposed to start 13188 * commands that are sitting on the 13189 * wait queue. The un_retry_bp stays 13190 * set until the command completes, but 13191 * sdrunout can be called many times 13192 * before that happens. Since sdrunout 13193 * cannot tell if the un_retry_bp is 13194 * already in the transport, it could 13195 * end up calling scsi_transport() for 13196 * the un_retry_bp multiple times. 13197 * 13198 * Also: don't schedule the callback 13199 * if some other callback is already 13200 * pending. 13201 */ 13202 if (un->un_retry_statp == NULL) { 13203 /* 13204 * restore the kstat pointer to 13205 * keep kstat counts coherent 13206 * when we do retry the command. 13207 */ 13208 un->un_retry_statp = 13209 saved_statp; 13210 } 13211 13212 if ((un->un_startstop_timeid == NULL) && 13213 (un->un_retry_timeid == NULL) && 13214 (un->un_direct_priority_timeid == 13215 NULL)) { 13216 13217 un->un_retry_timeid = 13218 timeout( 13219 sd_start_retry_command, 13220 un, SD_RESTART_TIMEOUT); 13221 } 13222 goto exit; 13223 } 13224 13225 #else 13226 if (bp == immed_bp) { 13227 break; /* Just fail the command */ 13228 } 13229 #endif 13230 13231 /* Add the buf back to the head of the waitq */ 13232 bp->av_forw = un->un_waitq_headp; 13233 un->un_waitq_headp = bp; 13234 if (un->un_waitq_tailp == NULL) { 13235 un->un_waitq_tailp = bp; 13236 } 13237 goto exit; 13238 13239 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13240 /* 13241 * HBA DMA resource failure. Fail the command 13242 * and continue processing of the queues. 13243 */ 13244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13245 "sd_start_cmds: " 13246 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13247 break; 13248 13249 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13250 /* 13251 * Note:x86: Partial DMA mapping not supported 13252 * for USCSI commands, and all the needed DMA 13253 * resources were not allocated. 13254 */ 13255 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13256 "sd_start_cmds: " 13257 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13258 break; 13259 13260 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13261 /* 13262 * Note:x86: Request cannot fit into CDB based 13263 * on lba and len. 13264 */ 13265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13266 "sd_start_cmds: " 13267 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13268 break; 13269 13270 default: 13271 /* Should NEVER get here! */ 13272 panic("scsi_initpkt error"); 13273 /*NOTREACHED*/ 13274 } 13275 13276 /* 13277 * Fatal error in allocating a scsi_pkt for this buf. 13278 * Update kstats & return the buf with an error code. 13279 * We must use sd_return_failed_command_no_restart() to 13280 * avoid a recursive call back into sd_start_cmds(). 13281 * However this also means that we must keep processing 13282 * the waitq here in order to avoid stalling. 13283 */ 13284 if (statp == kstat_waitq_to_runq) { 13285 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13286 } 13287 sd_return_failed_command_no_restart(un, bp, EIO); 13288 if (bp == immed_bp) { 13289 /* immed_bp is gone by now, so clear this */ 13290 immed_bp = NULL; 13291 } 13292 continue; 13293 } 13294 got_pkt: 13295 if (bp == immed_bp) { 13296 /* goto the head of the class.... */ 13297 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13298 } 13299 13300 un->un_ncmds_in_transport++; 13301 SD_UPDATE_KSTATS(un, statp, bp); 13302 13303 /* 13304 * Call scsi_transport() to send the command to the target. 13305 * According to SCSA architecture, we must drop the mutex here 13306 * before calling scsi_transport() in order to avoid deadlock. 13307 * Note that the scsi_pkt's completion routine can be executed 13308 * (from interrupt context) even before the call to 13309 * scsi_transport() returns. 13310 */ 13311 SD_TRACE(SD_LOG_IO_CORE, un, 13312 "sd_start_cmds: calling scsi_transport()\n"); 13313 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13314 13315 mutex_exit(SD_MUTEX(un)); 13316 rval = scsi_transport(xp->xb_pktp); 13317 mutex_enter(SD_MUTEX(un)); 13318 13319 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13320 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13321 13322 switch (rval) { 13323 case TRAN_ACCEPT: 13324 /* Clear this with every pkt accepted by the HBA */ 13325 un->un_tran_fatal_count = 0; 13326 break; /* Success; try the next cmd (if any) */ 13327 13328 case TRAN_BUSY: 13329 un->un_ncmds_in_transport--; 13330 ASSERT(un->un_ncmds_in_transport >= 0); 13331 13332 /* 13333 * Don't retry request sense, the sense data 13334 * is lost when another request is sent. 13335 * Free up the rqs buf and retry 13336 * the original failed cmd. Update kstat. 13337 */ 13338 if (bp == un->un_rqs_bp) { 13339 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13340 bp = sd_mark_rqs_idle(un, xp); 13341 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13342 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13343 kstat_waitq_enter); 13344 goto exit; 13345 } 13346 13347 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13348 /* 13349 * Free the DMA resources for the scsi_pkt. This will 13350 * allow mpxio to select another path the next time 13351 * we call scsi_transport() with this scsi_pkt. 13352 * See sdintr() for the rationalization behind this. 13353 */ 13354 if ((un->un_f_is_fibre == TRUE) && 13355 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13356 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13357 scsi_dmafree(xp->xb_pktp); 13358 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13359 } 13360 #endif 13361 13362 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13363 /* 13364 * Commands that are SD_PATH_DIRECT_PRIORITY 13365 * are for error recovery situations. These do 13366 * not use the normal command waitq, so if they 13367 * get a TRAN_BUSY we cannot put them back onto 13368 * the waitq for later retry. One possible 13369 * problem is that there could already be some 13370 * other command on un_retry_bp that is waiting 13371 * for this one to complete, so we would be 13372 * deadlocked if we put this command back onto 13373 * the waitq for later retry (since un_retry_bp 13374 * must complete before the driver gets back to 13375 * commands on the waitq). 13376 * 13377 * To avoid deadlock we must schedule a callback 13378 * that will restart this command after a set 13379 * interval. This should keep retrying for as 13380 * long as the underlying transport keeps 13381 * returning TRAN_BUSY (just like for other 13382 * commands). Use the same timeout interval as 13383 * for the ordinary TRAN_BUSY retry. 13384 */ 13385 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13386 "sd_start_cmds: scsi_transport() returned " 13387 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13388 13389 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13390 un->un_direct_priority_timeid = 13391 timeout(sd_start_direct_priority_command, 13392 bp, SD_BSY_TIMEOUT / 500); 13393 13394 goto exit; 13395 } 13396 13397 /* 13398 * For TRAN_BUSY, we want to reduce the throttle value, 13399 * unless we are retrying a command. 13400 */ 13401 if (bp != un->un_retry_bp) { 13402 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13403 } 13404 13405 /* 13406 * Set up the bp to be tried again 10 ms later. 13407 * Note:x86: Is there a timeout value in the sd_lun 13408 * for this condition? 13409 */ 13410 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13411 kstat_runq_back_to_waitq); 13412 goto exit; 13413 13414 case TRAN_FATAL_ERROR: 13415 un->un_tran_fatal_count++; 13416 /* FALLTHRU */ 13417 13418 case TRAN_BADPKT: 13419 default: 13420 un->un_ncmds_in_transport--; 13421 ASSERT(un->un_ncmds_in_transport >= 0); 13422 13423 /* 13424 * If this is our REQUEST SENSE command with a 13425 * transport error, we must get back the pointers 13426 * to the original buf, and mark the REQUEST 13427 * SENSE command as "available". 13428 */ 13429 if (bp == un->un_rqs_bp) { 13430 bp = sd_mark_rqs_idle(un, xp); 13431 xp = SD_GET_XBUF(bp); 13432 } else { 13433 /* 13434 * Legacy behavior: do not update transport 13435 * error count for request sense commands. 13436 */ 13437 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13438 } 13439 13440 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13441 sd_print_transport_rejected_message(un, xp, rval); 13442 13443 /* 13444 * We must use sd_return_failed_command_no_restart() to 13445 * avoid a recursive call back into sd_start_cmds(). 13446 * However this also means that we must keep processing 13447 * the waitq here in order to avoid stalling. 13448 */ 13449 sd_return_failed_command_no_restart(un, bp, EIO); 13450 13451 /* 13452 * Notify any threads waiting in sd_ddi_suspend() that 13453 * a command completion has occurred. 13454 */ 13455 if (un->un_state == SD_STATE_SUSPENDED) { 13456 cv_broadcast(&un->un_disk_busy_cv); 13457 } 13458 13459 if (bp == immed_bp) { 13460 /* immed_bp is gone by now, so clear this */ 13461 immed_bp = NULL; 13462 } 13463 break; 13464 } 13465 13466 } while (immed_bp == NULL); 13467 13468 exit: 13469 ASSERT(mutex_owned(SD_MUTEX(un))); 13470 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13471 } 13472 13473 13474 /* 13475 * Function: sd_return_command 13476 * 13477 * Description: Returns a command to its originator (with or without an 13478 * error). Also starts commands waiting to be transported 13479 * to the target. 13480 * 13481 * Context: May be called from interrupt, kernel, or timeout context 13482 */ 13483 13484 static void 13485 sd_return_command(struct sd_lun *un, struct buf *bp) 13486 { 13487 struct sd_xbuf *xp; 13488 struct scsi_pkt *pktp; 13489 13490 ASSERT(bp != NULL); 13491 ASSERT(un != NULL); 13492 ASSERT(mutex_owned(SD_MUTEX(un))); 13493 ASSERT(bp != un->un_rqs_bp); 13494 xp = SD_GET_XBUF(bp); 13495 ASSERT(xp != NULL); 13496 13497 pktp = SD_GET_PKTP(bp); 13498 13499 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13500 13501 /* 13502 * Note: check for the "sdrestart failed" case. 13503 */ 13504 if ((un->un_partial_dma_supported == 1) && 13505 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13506 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13507 (xp->xb_pktp->pkt_resid == 0)) { 13508 13509 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13510 /* 13511 * Successfully set up next portion of cmd 13512 * transfer, try sending it 13513 */ 13514 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13515 NULL, NULL, 0, (clock_t)0, NULL); 13516 sd_start_cmds(un, NULL); 13517 return; /* Note:x86: need a return here? */ 13518 } 13519 } 13520 13521 /* 13522 * If this is the failfast bp, clear it from un_failfast_bp. This 13523 * can happen if upon being re-tried the failfast bp either 13524 * succeeded or encountered another error (possibly even a different 13525 * error than the one that precipitated the failfast state, but in 13526 * that case it would have had to exhaust retries as well). Regardless, 13527 * this should not occur whenever the instance is in the active 13528 * failfast state. 13529 */ 13530 if (bp == un->un_failfast_bp) { 13531 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13532 un->un_failfast_bp = NULL; 13533 } 13534 13535 /* 13536 * Clear the failfast state upon successful completion of ANY cmd. 13537 */ 13538 if (bp->b_error == 0) { 13539 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13540 } 13541 13542 /* 13543 * This is used if the command was retried one or more times. Show that 13544 * we are done with it, and allow processing of the waitq to resume. 13545 */ 13546 if (bp == un->un_retry_bp) { 13547 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13548 "sd_return_command: un:0x%p: " 13549 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13550 un->un_retry_bp = NULL; 13551 un->un_retry_statp = NULL; 13552 } 13553 13554 SD_UPDATE_RDWR_STATS(un, bp); 13555 SD_UPDATE_PARTITION_STATS(un, bp); 13556 13557 switch (un->un_state) { 13558 case SD_STATE_SUSPENDED: 13559 /* 13560 * Notify any threads waiting in sd_ddi_suspend() that 13561 * a command completion has occurred. 13562 */ 13563 cv_broadcast(&un->un_disk_busy_cv); 13564 break; 13565 default: 13566 sd_start_cmds(un, NULL); 13567 break; 13568 } 13569 13570 /* Return this command up the iodone chain to its originator. */ 13571 mutex_exit(SD_MUTEX(un)); 13572 13573 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13574 xp->xb_pktp = NULL; 13575 13576 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13577 13578 ASSERT(!mutex_owned(SD_MUTEX(un))); 13579 mutex_enter(SD_MUTEX(un)); 13580 13581 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13582 } 13583 13584 13585 /* 13586 * Function: sd_return_failed_command 13587 * 13588 * Description: Command completion when an error occurred. 13589 * 13590 * Context: May be called from interrupt context 13591 */ 13592 13593 static void 13594 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13595 { 13596 ASSERT(bp != NULL); 13597 ASSERT(un != NULL); 13598 ASSERT(mutex_owned(SD_MUTEX(un))); 13599 13600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13601 "sd_return_failed_command: entry\n"); 13602 13603 /* 13604 * b_resid could already be nonzero due to a partial data 13605 * transfer, so do not change it here. 13606 */ 13607 SD_BIOERROR(bp, errcode); 13608 13609 sd_return_command(un, bp); 13610 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13611 "sd_return_failed_command: exit\n"); 13612 } 13613 13614 13615 /* 13616 * Function: sd_return_failed_command_no_restart 13617 * 13618 * Description: Same as sd_return_failed_command, but ensures that no 13619 * call back into sd_start_cmds will be issued. 13620 * 13621 * Context: May be called from interrupt context 13622 */ 13623 13624 static void 13625 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13626 int errcode) 13627 { 13628 struct sd_xbuf *xp; 13629 13630 ASSERT(bp != NULL); 13631 ASSERT(un != NULL); 13632 ASSERT(mutex_owned(SD_MUTEX(un))); 13633 xp = SD_GET_XBUF(bp); 13634 ASSERT(xp != NULL); 13635 ASSERT(errcode != 0); 13636 13637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13638 "sd_return_failed_command_no_restart: entry\n"); 13639 13640 /* 13641 * b_resid could already be nonzero due to a partial data 13642 * transfer, so do not change it here. 13643 */ 13644 SD_BIOERROR(bp, errcode); 13645 13646 /* 13647 * If this is the failfast bp, clear it. This can happen if the 13648 * failfast bp encounterd a fatal error when we attempted to 13649 * re-try it (such as a scsi_transport(9F) failure). However 13650 * we should NOT be in an active failfast state if the failfast 13651 * bp is not NULL. 13652 */ 13653 if (bp == un->un_failfast_bp) { 13654 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13655 un->un_failfast_bp = NULL; 13656 } 13657 13658 if (bp == un->un_retry_bp) { 13659 /* 13660 * This command was retried one or more times. Show that we are 13661 * done with it, and allow processing of the waitq to resume. 13662 */ 13663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13664 "sd_return_failed_command_no_restart: " 13665 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13666 un->un_retry_bp = NULL; 13667 un->un_retry_statp = NULL; 13668 } 13669 13670 SD_UPDATE_RDWR_STATS(un, bp); 13671 SD_UPDATE_PARTITION_STATS(un, bp); 13672 13673 mutex_exit(SD_MUTEX(un)); 13674 13675 if (xp->xb_pktp != NULL) { 13676 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13677 xp->xb_pktp = NULL; 13678 } 13679 13680 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13681 13682 mutex_enter(SD_MUTEX(un)); 13683 13684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13685 "sd_return_failed_command_no_restart: exit\n"); 13686 } 13687 13688 13689 /* 13690 * Function: sd_retry_command 13691 * 13692 * Description: queue up a command for retry, or (optionally) fail it 13693 * if retry counts are exhausted. 13694 * 13695 * Arguments: un - Pointer to the sd_lun struct for the target. 13696 * 13697 * bp - Pointer to the buf for the command to be retried. 13698 * 13699 * retry_check_flag - Flag to see which (if any) of the retry 13700 * counts should be decremented/checked. If the indicated 13701 * retry count is exhausted, then the command will not be 13702 * retried; it will be failed instead. This should use a 13703 * value equal to one of the following: 13704 * 13705 * SD_RETRIES_NOCHECK 13706 * SD_RESD_RETRIES_STANDARD 13707 * SD_RETRIES_VICTIM 13708 * 13709 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13710 * if the check should be made to see of FLAG_ISOLATE is set 13711 * in the pkt. If FLAG_ISOLATE is set, then the command is 13712 * not retried, it is simply failed. 13713 * 13714 * user_funcp - Ptr to function to call before dispatching the 13715 * command. May be NULL if no action needs to be performed. 13716 * (Primarily intended for printing messages.) 13717 * 13718 * user_arg - Optional argument to be passed along to 13719 * the user_funcp call. 13720 * 13721 * failure_code - errno return code to set in the bp if the 13722 * command is going to be failed. 13723 * 13724 * retry_delay - Retry delay interval in (clock_t) units. May 13725 * be zero which indicates that the retry should be retried 13726 * immediately (ie, without an intervening delay). 13727 * 13728 * statp - Ptr to kstat function to be updated if the command 13729 * is queued for a delayed retry. May be NULL if no kstat 13730 * update is desired. 13731 * 13732 * Context: May be called from interrupt context. 13733 */ 13734 13735 static void 13736 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13737 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13738 code), void *user_arg, int failure_code, clock_t retry_delay, 13739 void (*statp)(kstat_io_t *)) 13740 { 13741 struct sd_xbuf *xp; 13742 struct scsi_pkt *pktp; 13743 13744 ASSERT(un != NULL); 13745 ASSERT(mutex_owned(SD_MUTEX(un))); 13746 ASSERT(bp != NULL); 13747 xp = SD_GET_XBUF(bp); 13748 ASSERT(xp != NULL); 13749 pktp = SD_GET_PKTP(bp); 13750 ASSERT(pktp != NULL); 13751 13752 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13753 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13754 13755 /* 13756 * If we are syncing or dumping, fail the command to avoid 13757 * recursively calling back into scsi_transport(). 13758 */ 13759 if (ddi_in_panic()) { 13760 goto fail_command_no_log; 13761 } 13762 13763 /* 13764 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13765 * log an error and fail the command. 13766 */ 13767 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13768 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13769 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13770 sd_dump_memory(un, SD_LOG_IO, "CDB", 13771 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13772 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13773 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13774 goto fail_command; 13775 } 13776 13777 /* 13778 * If we are suspended, then put the command onto head of the 13779 * wait queue since we don't want to start more commands, and 13780 * clear the un_retry_bp. Next time when we are resumed, will 13781 * handle the command in the wait queue. 13782 */ 13783 switch (un->un_state) { 13784 case SD_STATE_SUSPENDED: 13785 case SD_STATE_DUMPING: 13786 bp->av_forw = un->un_waitq_headp; 13787 un->un_waitq_headp = bp; 13788 if (un->un_waitq_tailp == NULL) { 13789 un->un_waitq_tailp = bp; 13790 } 13791 if (bp == un->un_retry_bp) { 13792 un->un_retry_bp = NULL; 13793 un->un_retry_statp = NULL; 13794 } 13795 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13797 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13798 return; 13799 default: 13800 break; 13801 } 13802 13803 /* 13804 * If the caller wants us to check FLAG_ISOLATE, then see if that 13805 * is set; if it is then we do not want to retry the command. 13806 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13807 */ 13808 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13809 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13810 goto fail_command; 13811 } 13812 } 13813 13814 13815 /* 13816 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13817 * command timeout or a selection timeout has occurred. This means 13818 * that we were unable to establish an kind of communication with 13819 * the target, and subsequent retries and/or commands are likely 13820 * to encounter similar results and take a long time to complete. 13821 * 13822 * If this is a failfast error condition, we need to update the 13823 * failfast state, even if this bp does not have B_FAILFAST set. 13824 */ 13825 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13826 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13827 ASSERT(un->un_failfast_bp == NULL); 13828 /* 13829 * If we are already in the active failfast state, and 13830 * another failfast error condition has been detected, 13831 * then fail this command if it has B_FAILFAST set. 13832 * If B_FAILFAST is clear, then maintain the legacy 13833 * behavior of retrying heroically, even tho this will 13834 * take a lot more time to fail the command. 13835 */ 13836 if (bp->b_flags & B_FAILFAST) { 13837 goto fail_command; 13838 } 13839 } else { 13840 /* 13841 * We're not in the active failfast state, but we 13842 * have a failfast error condition, so we must begin 13843 * transition to the next state. We do this regardless 13844 * of whether or not this bp has B_FAILFAST set. 13845 */ 13846 if (un->un_failfast_bp == NULL) { 13847 /* 13848 * This is the first bp to meet a failfast 13849 * condition so save it on un_failfast_bp & 13850 * do normal retry processing. Do not enter 13851 * active failfast state yet. This marks 13852 * entry into the "failfast pending" state. 13853 */ 13854 un->un_failfast_bp = bp; 13855 13856 } else if (un->un_failfast_bp == bp) { 13857 /* 13858 * This is the second time *this* bp has 13859 * encountered a failfast error condition, 13860 * so enter active failfast state & flush 13861 * queues as appropriate. 13862 */ 13863 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13864 un->un_failfast_bp = NULL; 13865 sd_failfast_flushq(un); 13866 13867 /* 13868 * Fail this bp now if B_FAILFAST set; 13869 * otherwise continue with retries. (It would 13870 * be pretty ironic if this bp succeeded on a 13871 * subsequent retry after we just flushed all 13872 * the queues). 13873 */ 13874 if (bp->b_flags & B_FAILFAST) { 13875 goto fail_command; 13876 } 13877 13878 #if !defined(lint) && !defined(__lint) 13879 } else { 13880 /* 13881 * If neither of the preceeding conditionals 13882 * was true, it means that there is some 13883 * *other* bp that has met an inital failfast 13884 * condition and is currently either being 13885 * retried or is waiting to be retried. In 13886 * that case we should perform normal retry 13887 * processing on *this* bp, since there is a 13888 * chance that the current failfast condition 13889 * is transient and recoverable. If that does 13890 * not turn out to be the case, then retries 13891 * will be cleared when the wait queue is 13892 * flushed anyway. 13893 */ 13894 #endif 13895 } 13896 } 13897 } else { 13898 /* 13899 * SD_RETRIES_FAILFAST is clear, which indicates that we 13900 * likely were able to at least establish some level of 13901 * communication with the target and subsequent commands 13902 * and/or retries are likely to get through to the target, 13903 * In this case we want to be aggressive about clearing 13904 * the failfast state. Note that this does not affect 13905 * the "failfast pending" condition. 13906 */ 13907 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13908 } 13909 13910 13911 /* 13912 * Check the specified retry count to see if we can still do 13913 * any retries with this pkt before we should fail it. 13914 */ 13915 switch (retry_check_flag & SD_RETRIES_MASK) { 13916 case SD_RETRIES_VICTIM: 13917 /* 13918 * Check the victim retry count. If exhausted, then fall 13919 * thru & check against the standard retry count. 13920 */ 13921 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13922 /* Increment count & proceed with the retry */ 13923 xp->xb_victim_retry_count++; 13924 break; 13925 } 13926 /* Victim retries exhausted, fall back to std. retries... */ 13927 /* FALLTHRU */ 13928 13929 case SD_RETRIES_STANDARD: 13930 if (xp->xb_retry_count >= un->un_retry_count) { 13931 /* Retries exhausted, fail the command */ 13932 SD_TRACE(SD_LOG_IO_CORE, un, 13933 "sd_retry_command: retries exhausted!\n"); 13934 /* 13935 * update b_resid for failed SCMD_READ & SCMD_WRITE 13936 * commands with nonzero pkt_resid. 13937 */ 13938 if ((pktp->pkt_reason == CMD_CMPLT) && 13939 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13940 (pktp->pkt_resid != 0)) { 13941 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13942 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13943 SD_UPDATE_B_RESID(bp, pktp); 13944 } 13945 } 13946 goto fail_command; 13947 } 13948 xp->xb_retry_count++; 13949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13950 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13951 break; 13952 13953 case SD_RETRIES_UA: 13954 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13955 /* Retries exhausted, fail the command */ 13956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13957 "Unit Attention retries exhausted. " 13958 "Check the target.\n"); 13959 goto fail_command; 13960 } 13961 xp->xb_ua_retry_count++; 13962 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13963 "sd_retry_command: retry count:%d\n", 13964 xp->xb_ua_retry_count); 13965 break; 13966 13967 case SD_RETRIES_BUSY: 13968 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13969 /* Retries exhausted, fail the command */ 13970 SD_TRACE(SD_LOG_IO_CORE, un, 13971 "sd_retry_command: retries exhausted!\n"); 13972 goto fail_command; 13973 } 13974 xp->xb_retry_count++; 13975 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13976 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13977 break; 13978 13979 case SD_RETRIES_NOCHECK: 13980 default: 13981 /* No retry count to check. Just proceed with the retry */ 13982 break; 13983 } 13984 13985 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13986 13987 /* 13988 * If we were given a zero timeout, we must attempt to retry the 13989 * command immediately (ie, without a delay). 13990 */ 13991 if (retry_delay == 0) { 13992 /* 13993 * Check some limiting conditions to see if we can actually 13994 * do the immediate retry. If we cannot, then we must 13995 * fall back to queueing up a delayed retry. 13996 */ 13997 if (un->un_ncmds_in_transport >= un->un_throttle) { 13998 /* 13999 * We are at the throttle limit for the target, 14000 * fall back to delayed retry. 14001 */ 14002 retry_delay = SD_BSY_TIMEOUT; 14003 statp = kstat_waitq_enter; 14004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14005 "sd_retry_command: immed. retry hit " 14006 "throttle!\n"); 14007 } else { 14008 /* 14009 * We're clear to proceed with the immediate retry. 14010 * First call the user-provided function (if any) 14011 */ 14012 if (user_funcp != NULL) { 14013 (*user_funcp)(un, bp, user_arg, 14014 SD_IMMEDIATE_RETRY_ISSUED); 14015 #ifdef __lock_lint 14016 sd_print_incomplete_msg(un, bp, user_arg, 14017 SD_IMMEDIATE_RETRY_ISSUED); 14018 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14019 SD_IMMEDIATE_RETRY_ISSUED); 14020 sd_print_sense_failed_msg(un, bp, user_arg, 14021 SD_IMMEDIATE_RETRY_ISSUED); 14022 #endif 14023 } 14024 14025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14026 "sd_retry_command: issuing immediate retry\n"); 14027 14028 /* 14029 * Call sd_start_cmds() to transport the command to 14030 * the target. 14031 */ 14032 sd_start_cmds(un, bp); 14033 14034 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14035 "sd_retry_command exit\n"); 14036 return; 14037 } 14038 } 14039 14040 /* 14041 * Set up to retry the command after a delay. 14042 * First call the user-provided function (if any) 14043 */ 14044 if (user_funcp != NULL) { 14045 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14046 } 14047 14048 sd_set_retry_bp(un, bp, retry_delay, statp); 14049 14050 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14051 return; 14052 14053 fail_command: 14054 14055 if (user_funcp != NULL) { 14056 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14057 } 14058 14059 fail_command_no_log: 14060 14061 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14062 "sd_retry_command: returning failed command\n"); 14063 14064 sd_return_failed_command(un, bp, failure_code); 14065 14066 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14067 } 14068 14069 14070 /* 14071 * Function: sd_set_retry_bp 14072 * 14073 * Description: Set up the given bp for retry. 14074 * 14075 * Arguments: un - ptr to associated softstate 14076 * bp - ptr to buf(9S) for the command 14077 * retry_delay - time interval before issuing retry (may be 0) 14078 * statp - optional pointer to kstat function 14079 * 14080 * Context: May be called under interrupt context 14081 */ 14082 14083 static void 14084 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14085 void (*statp)(kstat_io_t *)) 14086 { 14087 ASSERT(un != NULL); 14088 ASSERT(mutex_owned(SD_MUTEX(un))); 14089 ASSERT(bp != NULL); 14090 14091 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14092 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14093 14094 /* 14095 * Indicate that the command is being retried. This will not allow any 14096 * other commands on the wait queue to be transported to the target 14097 * until this command has been completed (success or failure). The 14098 * "retry command" is not transported to the target until the given 14099 * time delay expires, unless the user specified a 0 retry_delay. 14100 * 14101 * Note: the timeout(9F) callback routine is what actually calls 14102 * sd_start_cmds() to transport the command, with the exception of a 14103 * zero retry_delay. The only current implementor of a zero retry delay 14104 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14105 */ 14106 if (un->un_retry_bp == NULL) { 14107 ASSERT(un->un_retry_statp == NULL); 14108 un->un_retry_bp = bp; 14109 14110 /* 14111 * If the user has not specified a delay the command should 14112 * be queued and no timeout should be scheduled. 14113 */ 14114 if (retry_delay == 0) { 14115 /* 14116 * Save the kstat pointer that will be used in the 14117 * call to SD_UPDATE_KSTATS() below, so that 14118 * sd_start_cmds() can correctly decrement the waitq 14119 * count when it is time to transport this command. 14120 */ 14121 un->un_retry_statp = statp; 14122 goto done; 14123 } 14124 } 14125 14126 if (un->un_retry_bp == bp) { 14127 /* 14128 * Save the kstat pointer that will be used in the call to 14129 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14130 * correctly decrement the waitq count when it is time to 14131 * transport this command. 14132 */ 14133 un->un_retry_statp = statp; 14134 14135 /* 14136 * Schedule a timeout if: 14137 * 1) The user has specified a delay. 14138 * 2) There is not a START_STOP_UNIT callback pending. 14139 * 14140 * If no delay has been specified, then it is up to the caller 14141 * to ensure that IO processing continues without stalling. 14142 * Effectively, this means that the caller will issue the 14143 * required call to sd_start_cmds(). The START_STOP_UNIT 14144 * callback does this after the START STOP UNIT command has 14145 * completed. In either of these cases we should not schedule 14146 * a timeout callback here. Also don't schedule the timeout if 14147 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14148 */ 14149 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14150 (un->un_direct_priority_timeid == NULL)) { 14151 un->un_retry_timeid = 14152 timeout(sd_start_retry_command, un, retry_delay); 14153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14154 "sd_set_retry_bp: setting timeout: un: 0x%p" 14155 " bp:0x%p un_retry_timeid:0x%p\n", 14156 un, bp, un->un_retry_timeid); 14157 } 14158 } else { 14159 /* 14160 * We only get in here if there is already another command 14161 * waiting to be retried. In this case, we just put the 14162 * given command onto the wait queue, so it can be transported 14163 * after the current retry command has completed. 14164 * 14165 * Also we have to make sure that if the command at the head 14166 * of the wait queue is the un_failfast_bp, that we do not 14167 * put ahead of it any other commands that are to be retried. 14168 */ 14169 if ((un->un_failfast_bp != NULL) && 14170 (un->un_failfast_bp == un->un_waitq_headp)) { 14171 /* 14172 * Enqueue this command AFTER the first command on 14173 * the wait queue (which is also un_failfast_bp). 14174 */ 14175 bp->av_forw = un->un_waitq_headp->av_forw; 14176 un->un_waitq_headp->av_forw = bp; 14177 if (un->un_waitq_headp == un->un_waitq_tailp) { 14178 un->un_waitq_tailp = bp; 14179 } 14180 } else { 14181 /* Enqueue this command at the head of the waitq. */ 14182 bp->av_forw = un->un_waitq_headp; 14183 un->un_waitq_headp = bp; 14184 if (un->un_waitq_tailp == NULL) { 14185 un->un_waitq_tailp = bp; 14186 } 14187 } 14188 14189 if (statp == NULL) { 14190 statp = kstat_waitq_enter; 14191 } 14192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14193 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14194 } 14195 14196 done: 14197 if (statp != NULL) { 14198 SD_UPDATE_KSTATS(un, statp, bp); 14199 } 14200 14201 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14202 "sd_set_retry_bp: exit un:0x%p\n", un); 14203 } 14204 14205 14206 /* 14207 * Function: sd_start_retry_command 14208 * 14209 * Description: Start the command that has been waiting on the target's 14210 * retry queue. Called from timeout(9F) context after the 14211 * retry delay interval has expired. 14212 * 14213 * Arguments: arg - pointer to associated softstate for the device. 14214 * 14215 * Context: timeout(9F) thread context. May not sleep. 14216 */ 14217 14218 static void 14219 sd_start_retry_command(void *arg) 14220 { 14221 struct sd_lun *un = arg; 14222 14223 ASSERT(un != NULL); 14224 ASSERT(!mutex_owned(SD_MUTEX(un))); 14225 14226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14227 "sd_start_retry_command: entry\n"); 14228 14229 mutex_enter(SD_MUTEX(un)); 14230 14231 un->un_retry_timeid = NULL; 14232 14233 if (un->un_retry_bp != NULL) { 14234 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14235 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14236 un, un->un_retry_bp); 14237 sd_start_cmds(un, un->un_retry_bp); 14238 } 14239 14240 mutex_exit(SD_MUTEX(un)); 14241 14242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14243 "sd_start_retry_command: exit\n"); 14244 } 14245 14246 14247 /* 14248 * Function: sd_start_direct_priority_command 14249 * 14250 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14251 * received TRAN_BUSY when we called scsi_transport() to send it 14252 * to the underlying HBA. This function is called from timeout(9F) 14253 * context after the delay interval has expired. 14254 * 14255 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14256 * 14257 * Context: timeout(9F) thread context. May not sleep. 14258 */ 14259 14260 static void 14261 sd_start_direct_priority_command(void *arg) 14262 { 14263 struct buf *priority_bp = arg; 14264 struct sd_lun *un; 14265 14266 ASSERT(priority_bp != NULL); 14267 un = SD_GET_UN(priority_bp); 14268 ASSERT(un != NULL); 14269 ASSERT(!mutex_owned(SD_MUTEX(un))); 14270 14271 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14272 "sd_start_direct_priority_command: entry\n"); 14273 14274 mutex_enter(SD_MUTEX(un)); 14275 un->un_direct_priority_timeid = NULL; 14276 sd_start_cmds(un, priority_bp); 14277 mutex_exit(SD_MUTEX(un)); 14278 14279 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14280 "sd_start_direct_priority_command: exit\n"); 14281 } 14282 14283 14284 /* 14285 * Function: sd_send_request_sense_command 14286 * 14287 * Description: Sends a REQUEST SENSE command to the target 14288 * 14289 * Context: May be called from interrupt context. 14290 */ 14291 14292 static void 14293 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14294 struct scsi_pkt *pktp) 14295 { 14296 ASSERT(bp != NULL); 14297 ASSERT(un != NULL); 14298 ASSERT(mutex_owned(SD_MUTEX(un))); 14299 14300 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14301 "entry: buf:0x%p\n", bp); 14302 14303 /* 14304 * If we are syncing or dumping, then fail the command to avoid a 14305 * recursive callback into scsi_transport(). Also fail the command 14306 * if we are suspended (legacy behavior). 14307 */ 14308 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14309 (un->un_state == SD_STATE_DUMPING)) { 14310 sd_return_failed_command(un, bp, EIO); 14311 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14312 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14313 return; 14314 } 14315 14316 /* 14317 * Retry the failed command and don't issue the request sense if: 14318 * 1) the sense buf is busy 14319 * 2) we have 1 or more outstanding commands on the target 14320 * (the sense data will be cleared or invalidated any way) 14321 * 14322 * Note: There could be an issue with not checking a retry limit here, 14323 * the problem is determining which retry limit to check. 14324 */ 14325 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14326 /* Don't retry if the command is flagged as non-retryable */ 14327 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14328 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14329 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14330 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14331 "sd_send_request_sense_command: " 14332 "at full throttle, retrying exit\n"); 14333 } else { 14334 sd_return_failed_command(un, bp, EIO); 14335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14336 "sd_send_request_sense_command: " 14337 "at full throttle, non-retryable exit\n"); 14338 } 14339 return; 14340 } 14341 14342 sd_mark_rqs_busy(un, bp); 14343 sd_start_cmds(un, un->un_rqs_bp); 14344 14345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14346 "sd_send_request_sense_command: exit\n"); 14347 } 14348 14349 14350 /* 14351 * Function: sd_mark_rqs_busy 14352 * 14353 * Description: Indicate that the request sense bp for this instance is 14354 * in use. 14355 * 14356 * Context: May be called under interrupt context 14357 */ 14358 14359 static void 14360 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14361 { 14362 struct sd_xbuf *sense_xp; 14363 14364 ASSERT(un != NULL); 14365 ASSERT(bp != NULL); 14366 ASSERT(mutex_owned(SD_MUTEX(un))); 14367 ASSERT(un->un_sense_isbusy == 0); 14368 14369 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14370 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14371 14372 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14373 ASSERT(sense_xp != NULL); 14374 14375 SD_INFO(SD_LOG_IO, un, 14376 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14377 14378 ASSERT(sense_xp->xb_pktp != NULL); 14379 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14380 == (FLAG_SENSING | FLAG_HEAD)); 14381 14382 un->un_sense_isbusy = 1; 14383 un->un_rqs_bp->b_resid = 0; 14384 sense_xp->xb_pktp->pkt_resid = 0; 14385 sense_xp->xb_pktp->pkt_reason = 0; 14386 14387 /* So we can get back the bp at interrupt time! */ 14388 sense_xp->xb_sense_bp = bp; 14389 14390 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14391 14392 /* 14393 * Mark this buf as awaiting sense data. (This is already set in 14394 * the pkt_flags for the RQS packet.) 14395 */ 14396 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14397 14398 sense_xp->xb_retry_count = 0; 14399 sense_xp->xb_victim_retry_count = 0; 14400 sense_xp->xb_ua_retry_count = 0; 14401 sense_xp->xb_nr_retry_count = 0; 14402 sense_xp->xb_dma_resid = 0; 14403 14404 /* Clean up the fields for auto-request sense */ 14405 sense_xp->xb_sense_status = 0; 14406 sense_xp->xb_sense_state = 0; 14407 sense_xp->xb_sense_resid = 0; 14408 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14409 14410 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14411 } 14412 14413 14414 /* 14415 * Function: sd_mark_rqs_idle 14416 * 14417 * Description: SD_MUTEX must be held continuously through this routine 14418 * to prevent reuse of the rqs struct before the caller can 14419 * complete it's processing. 14420 * 14421 * Return Code: Pointer to the RQS buf 14422 * 14423 * Context: May be called under interrupt context 14424 */ 14425 14426 static struct buf * 14427 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14428 { 14429 struct buf *bp; 14430 ASSERT(un != NULL); 14431 ASSERT(sense_xp != NULL); 14432 ASSERT(mutex_owned(SD_MUTEX(un))); 14433 ASSERT(un->un_sense_isbusy != 0); 14434 14435 un->un_sense_isbusy = 0; 14436 bp = sense_xp->xb_sense_bp; 14437 sense_xp->xb_sense_bp = NULL; 14438 14439 /* This pkt is no longer interested in getting sense data */ 14440 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14441 14442 return (bp); 14443 } 14444 14445 14446 14447 /* 14448 * Function: sd_alloc_rqs 14449 * 14450 * Description: Set up the unit to receive auto request sense data 14451 * 14452 * Return Code: DDI_SUCCESS or DDI_FAILURE 14453 * 14454 * Context: Called under attach(9E) context 14455 */ 14456 14457 static int 14458 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14459 { 14460 struct sd_xbuf *xp; 14461 14462 ASSERT(un != NULL); 14463 ASSERT(!mutex_owned(SD_MUTEX(un))); 14464 ASSERT(un->un_rqs_bp == NULL); 14465 ASSERT(un->un_rqs_pktp == NULL); 14466 14467 /* 14468 * First allocate the required buf and scsi_pkt structs, then set up 14469 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14470 */ 14471 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14472 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14473 if (un->un_rqs_bp == NULL) { 14474 return (DDI_FAILURE); 14475 } 14476 14477 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14478 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14479 14480 if (un->un_rqs_pktp == NULL) { 14481 sd_free_rqs(un); 14482 return (DDI_FAILURE); 14483 } 14484 14485 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14486 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14487 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14488 14489 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14490 14491 /* Set up the other needed members in the ARQ scsi_pkt. */ 14492 un->un_rqs_pktp->pkt_comp = sdintr; 14493 un->un_rqs_pktp->pkt_time = sd_io_time; 14494 un->un_rqs_pktp->pkt_flags |= 14495 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14496 14497 /* 14498 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14499 * provide any intpkt, destroypkt routines as we take care of 14500 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14501 */ 14502 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14503 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14504 xp->xb_pktp = un->un_rqs_pktp; 14505 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14506 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14507 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14508 14509 /* 14510 * Save the pointer to the request sense private bp so it can 14511 * be retrieved in sdintr. 14512 */ 14513 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14514 ASSERT(un->un_rqs_bp->b_private == xp); 14515 14516 /* 14517 * See if the HBA supports auto-request sense for the specified 14518 * target/lun. If it does, then try to enable it (if not already 14519 * enabled). 14520 * 14521 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14522 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14523 * return success. However, in both of these cases ARQ is always 14524 * enabled and scsi_ifgetcap will always return true. The best approach 14525 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14526 * 14527 * The 3rd case is the HBA (adp) always return enabled on 14528 * scsi_ifgetgetcap even when it's not enable, the best approach 14529 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14530 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14531 */ 14532 14533 if (un->un_f_is_fibre == TRUE) { 14534 un->un_f_arq_enabled = TRUE; 14535 } else { 14536 #if defined(__i386) || defined(__amd64) 14537 /* 14538 * Circumvent the Adaptec bug, remove this code when 14539 * the bug is fixed 14540 */ 14541 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14542 #endif 14543 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14544 case 0: 14545 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14546 "sd_alloc_rqs: HBA supports ARQ\n"); 14547 /* 14548 * ARQ is supported by this HBA but currently is not 14549 * enabled. Attempt to enable it and if successful then 14550 * mark this instance as ARQ enabled. 14551 */ 14552 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14553 == 1) { 14554 /* Successfully enabled ARQ in the HBA */ 14555 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14556 "sd_alloc_rqs: ARQ enabled\n"); 14557 un->un_f_arq_enabled = TRUE; 14558 } else { 14559 /* Could not enable ARQ in the HBA */ 14560 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14561 "sd_alloc_rqs: failed ARQ enable\n"); 14562 un->un_f_arq_enabled = FALSE; 14563 } 14564 break; 14565 case 1: 14566 /* 14567 * ARQ is supported by this HBA and is already enabled. 14568 * Just mark ARQ as enabled for this instance. 14569 */ 14570 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14571 "sd_alloc_rqs: ARQ already enabled\n"); 14572 un->un_f_arq_enabled = TRUE; 14573 break; 14574 default: 14575 /* 14576 * ARQ is not supported by this HBA; disable it for this 14577 * instance. 14578 */ 14579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14580 "sd_alloc_rqs: HBA does not support ARQ\n"); 14581 un->un_f_arq_enabled = FALSE; 14582 break; 14583 } 14584 } 14585 14586 return (DDI_SUCCESS); 14587 } 14588 14589 14590 /* 14591 * Function: sd_free_rqs 14592 * 14593 * Description: Cleanup for the pre-instance RQS command. 14594 * 14595 * Context: Kernel thread context 14596 */ 14597 14598 static void 14599 sd_free_rqs(struct sd_lun *un) 14600 { 14601 ASSERT(un != NULL); 14602 14603 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14604 14605 /* 14606 * If consistent memory is bound to a scsi_pkt, the pkt 14607 * has to be destroyed *before* freeing the consistent memory. 14608 * Don't change the sequence of this operations. 14609 * scsi_destroy_pkt() might access memory, which isn't allowed, 14610 * after it was freed in scsi_free_consistent_buf(). 14611 */ 14612 if (un->un_rqs_pktp != NULL) { 14613 scsi_destroy_pkt(un->un_rqs_pktp); 14614 un->un_rqs_pktp = NULL; 14615 } 14616 14617 if (un->un_rqs_bp != NULL) { 14618 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14619 if (xp != NULL) { 14620 kmem_free(xp, sizeof (struct sd_xbuf)); 14621 } 14622 scsi_free_consistent_buf(un->un_rqs_bp); 14623 un->un_rqs_bp = NULL; 14624 } 14625 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14626 } 14627 14628 14629 14630 /* 14631 * Function: sd_reduce_throttle 14632 * 14633 * Description: Reduces the maximum # of outstanding commands on a 14634 * target to the current number of outstanding commands. 14635 * Queues a tiemout(9F) callback to restore the limit 14636 * after a specified interval has elapsed. 14637 * Typically used when we get a TRAN_BUSY return code 14638 * back from scsi_transport(). 14639 * 14640 * Arguments: un - ptr to the sd_lun softstate struct 14641 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14642 * 14643 * Context: May be called from interrupt context 14644 */ 14645 14646 static void 14647 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14648 { 14649 ASSERT(un != NULL); 14650 ASSERT(mutex_owned(SD_MUTEX(un))); 14651 ASSERT(un->un_ncmds_in_transport >= 0); 14652 14653 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14654 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14655 un, un->un_throttle, un->un_ncmds_in_transport); 14656 14657 if (un->un_throttle > 1) { 14658 if (un->un_f_use_adaptive_throttle == TRUE) { 14659 switch (throttle_type) { 14660 case SD_THROTTLE_TRAN_BUSY: 14661 if (un->un_busy_throttle == 0) { 14662 un->un_busy_throttle = un->un_throttle; 14663 } 14664 break; 14665 case SD_THROTTLE_QFULL: 14666 un->un_busy_throttle = 0; 14667 break; 14668 default: 14669 ASSERT(FALSE); 14670 } 14671 14672 if (un->un_ncmds_in_transport > 0) { 14673 un->un_throttle = un->un_ncmds_in_transport; 14674 } 14675 14676 } else { 14677 if (un->un_ncmds_in_transport == 0) { 14678 un->un_throttle = 1; 14679 } else { 14680 un->un_throttle = un->un_ncmds_in_transport; 14681 } 14682 } 14683 } 14684 14685 /* Reschedule the timeout if none is currently active */ 14686 if (un->un_reset_throttle_timeid == NULL) { 14687 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14688 un, SD_THROTTLE_RESET_INTERVAL); 14689 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14690 "sd_reduce_throttle: timeout scheduled!\n"); 14691 } 14692 14693 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14694 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14695 } 14696 14697 14698 14699 /* 14700 * Function: sd_restore_throttle 14701 * 14702 * Description: Callback function for timeout(9F). Resets the current 14703 * value of un->un_throttle to its default. 14704 * 14705 * Arguments: arg - pointer to associated softstate for the device. 14706 * 14707 * Context: May be called from interrupt context 14708 */ 14709 14710 static void 14711 sd_restore_throttle(void *arg) 14712 { 14713 struct sd_lun *un = arg; 14714 14715 ASSERT(un != NULL); 14716 ASSERT(!mutex_owned(SD_MUTEX(un))); 14717 14718 mutex_enter(SD_MUTEX(un)); 14719 14720 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14721 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14722 14723 un->un_reset_throttle_timeid = NULL; 14724 14725 if (un->un_f_use_adaptive_throttle == TRUE) { 14726 /* 14727 * If un_busy_throttle is nonzero, then it contains the 14728 * value that un_throttle was when we got a TRAN_BUSY back 14729 * from scsi_transport(). We want to revert back to this 14730 * value. 14731 * 14732 * In the QFULL case, the throttle limit will incrementally 14733 * increase until it reaches max throttle. 14734 */ 14735 if (un->un_busy_throttle > 0) { 14736 un->un_throttle = un->un_busy_throttle; 14737 un->un_busy_throttle = 0; 14738 } else { 14739 /* 14740 * increase throttle by 10% open gate slowly, schedule 14741 * another restore if saved throttle has not been 14742 * reached 14743 */ 14744 short throttle; 14745 if (sd_qfull_throttle_enable) { 14746 throttle = un->un_throttle + 14747 max((un->un_throttle / 10), 1); 14748 un->un_throttle = 14749 (throttle < un->un_saved_throttle) ? 14750 throttle : un->un_saved_throttle; 14751 if (un->un_throttle < un->un_saved_throttle) { 14752 un->un_reset_throttle_timeid = 14753 timeout(sd_restore_throttle, 14754 un, 14755 SD_QFULL_THROTTLE_RESET_INTERVAL); 14756 } 14757 } 14758 } 14759 14760 /* 14761 * If un_throttle has fallen below the low-water mark, we 14762 * restore the maximum value here (and allow it to ratchet 14763 * down again if necessary). 14764 */ 14765 if (un->un_throttle < un->un_min_throttle) { 14766 un->un_throttle = un->un_saved_throttle; 14767 } 14768 } else { 14769 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14770 "restoring limit from 0x%x to 0x%x\n", 14771 un->un_throttle, un->un_saved_throttle); 14772 un->un_throttle = un->un_saved_throttle; 14773 } 14774 14775 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14776 "sd_restore_throttle: calling sd_start_cmds!\n"); 14777 14778 sd_start_cmds(un, NULL); 14779 14780 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14781 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14782 un, un->un_throttle); 14783 14784 mutex_exit(SD_MUTEX(un)); 14785 14786 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14787 } 14788 14789 /* 14790 * Function: sdrunout 14791 * 14792 * Description: Callback routine for scsi_init_pkt when a resource allocation 14793 * fails. 14794 * 14795 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14796 * soft state instance. 14797 * 14798 * Return Code: The scsi_init_pkt routine allows for the callback function to 14799 * return a 0 indicating the callback should be rescheduled or a 1 14800 * indicating not to reschedule. This routine always returns 1 14801 * because the driver always provides a callback function to 14802 * scsi_init_pkt. This results in a callback always being scheduled 14803 * (via the scsi_init_pkt callback implementation) if a resource 14804 * failure occurs. 14805 * 14806 * Context: This callback function may not block or call routines that block 14807 * 14808 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14809 * request persisting at the head of the list which cannot be 14810 * satisfied even after multiple retries. In the future the driver 14811 * may implement some time of maximum runout count before failing 14812 * an I/O. 14813 */ 14814 14815 static int 14816 sdrunout(caddr_t arg) 14817 { 14818 struct sd_lun *un = (struct sd_lun *)arg; 14819 14820 ASSERT(un != NULL); 14821 ASSERT(!mutex_owned(SD_MUTEX(un))); 14822 14823 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14824 14825 mutex_enter(SD_MUTEX(un)); 14826 sd_start_cmds(un, NULL); 14827 mutex_exit(SD_MUTEX(un)); 14828 /* 14829 * This callback routine always returns 1 (i.e. do not reschedule) 14830 * because we always specify sdrunout as the callback handler for 14831 * scsi_init_pkt inside the call to sd_start_cmds. 14832 */ 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14834 return (1); 14835 } 14836 14837 14838 /* 14839 * Function: sdintr 14840 * 14841 * Description: Completion callback routine for scsi_pkt(9S) structs 14842 * sent to the HBA driver via scsi_transport(9F). 14843 * 14844 * Context: Interrupt context 14845 */ 14846 14847 static void 14848 sdintr(struct scsi_pkt *pktp) 14849 { 14850 struct buf *bp; 14851 struct sd_xbuf *xp; 14852 struct sd_lun *un; 14853 size_t actual_len; 14854 14855 ASSERT(pktp != NULL); 14856 bp = (struct buf *)pktp->pkt_private; 14857 ASSERT(bp != NULL); 14858 xp = SD_GET_XBUF(bp); 14859 ASSERT(xp != NULL); 14860 ASSERT(xp->xb_pktp != NULL); 14861 un = SD_GET_UN(bp); 14862 ASSERT(un != NULL); 14863 ASSERT(!mutex_owned(SD_MUTEX(un))); 14864 14865 #ifdef SD_FAULT_INJECTION 14866 14867 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14868 /* SD FaultInjection */ 14869 sd_faultinjection(pktp); 14870 14871 #endif /* SD_FAULT_INJECTION */ 14872 14873 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14874 " xp:0x%p, un:0x%p\n", bp, xp, un); 14875 14876 mutex_enter(SD_MUTEX(un)); 14877 14878 /* Reduce the count of the #commands currently in transport */ 14879 un->un_ncmds_in_transport--; 14880 ASSERT(un->un_ncmds_in_transport >= 0); 14881 14882 /* Increment counter to indicate that the callback routine is active */ 14883 un->un_in_callback++; 14884 14885 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14886 14887 #ifdef SDDEBUG 14888 if (bp == un->un_retry_bp) { 14889 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14890 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14891 un, un->un_retry_bp, un->un_ncmds_in_transport); 14892 } 14893 #endif 14894 14895 /* 14896 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14897 * state if needed. 14898 */ 14899 if (pktp->pkt_reason == CMD_DEV_GONE) { 14900 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14901 "Command failed to complete...Device is gone\n"); 14902 if (un->un_mediastate != DKIO_DEV_GONE) { 14903 un->un_mediastate = DKIO_DEV_GONE; 14904 cv_broadcast(&un->un_state_cv); 14905 } 14906 sd_return_failed_command(un, bp, EIO); 14907 goto exit; 14908 } 14909 14910 if (pktp->pkt_state & STATE_XARQ_DONE) { 14911 SD_TRACE(SD_LOG_COMMON, un, 14912 "sdintr: extra sense data received. pkt=%p\n", pktp); 14913 } 14914 14915 /* 14916 * First see if the pkt has auto-request sense data with it.... 14917 * Look at the packet state first so we don't take a performance 14918 * hit looking at the arq enabled flag unless absolutely necessary. 14919 */ 14920 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14921 (un->un_f_arq_enabled == TRUE)) { 14922 /* 14923 * The HBA did an auto request sense for this command so check 14924 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14925 * driver command that should not be retried. 14926 */ 14927 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14928 /* 14929 * Save the relevant sense info into the xp for the 14930 * original cmd. 14931 */ 14932 struct scsi_arq_status *asp; 14933 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14934 xp->xb_sense_status = 14935 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14936 xp->xb_sense_state = asp->sts_rqpkt_state; 14937 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14938 if (pktp->pkt_state & STATE_XARQ_DONE) { 14939 actual_len = MAX_SENSE_LENGTH - 14940 xp->xb_sense_resid; 14941 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14942 MAX_SENSE_LENGTH); 14943 } else { 14944 if (xp->xb_sense_resid > SENSE_LENGTH) { 14945 actual_len = MAX_SENSE_LENGTH - 14946 xp->xb_sense_resid; 14947 } else { 14948 actual_len = SENSE_LENGTH - 14949 xp->xb_sense_resid; 14950 } 14951 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14952 if ((((struct uscsi_cmd *) 14953 (xp->xb_pktinfo))->uscsi_rqlen) > 14954 actual_len) { 14955 xp->xb_sense_resid = 14956 (((struct uscsi_cmd *) 14957 (xp->xb_pktinfo))-> 14958 uscsi_rqlen) - actual_len; 14959 } else { 14960 xp->xb_sense_resid = 0; 14961 } 14962 } 14963 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14964 SENSE_LENGTH); 14965 } 14966 14967 /* fail the command */ 14968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14969 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14970 sd_return_failed_command(un, bp, EIO); 14971 goto exit; 14972 } 14973 14974 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14975 /* 14976 * We want to either retry or fail this command, so free 14977 * the DMA resources here. If we retry the command then 14978 * the DMA resources will be reallocated in sd_start_cmds(). 14979 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14980 * causes the *entire* transfer to start over again from the 14981 * beginning of the request, even for PARTIAL chunks that 14982 * have already transferred successfully. 14983 */ 14984 if ((un->un_f_is_fibre == TRUE) && 14985 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14986 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14987 scsi_dmafree(pktp); 14988 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14989 } 14990 #endif 14991 14992 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14993 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14994 14995 sd_handle_auto_request_sense(un, bp, xp, pktp); 14996 goto exit; 14997 } 14998 14999 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15000 if (pktp->pkt_flags & FLAG_SENSING) { 15001 /* This pktp is from the unit's REQUEST_SENSE command */ 15002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15003 "sdintr: sd_handle_request_sense\n"); 15004 sd_handle_request_sense(un, bp, xp, pktp); 15005 goto exit; 15006 } 15007 15008 /* 15009 * Check to see if the command successfully completed as requested; 15010 * this is the most common case (and also the hot performance path). 15011 * 15012 * Requirements for successful completion are: 15013 * pkt_reason is CMD_CMPLT and packet status is status good. 15014 * In addition: 15015 * - A residual of zero indicates successful completion no matter what 15016 * the command is. 15017 * - If the residual is not zero and the command is not a read or 15018 * write, then it's still defined as successful completion. In other 15019 * words, if the command is a read or write the residual must be 15020 * zero for successful completion. 15021 * - If the residual is not zero and the command is a read or 15022 * write, and it's a USCSICMD, then it's still defined as 15023 * successful completion. 15024 */ 15025 if ((pktp->pkt_reason == CMD_CMPLT) && 15026 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15027 15028 /* 15029 * Since this command is returned with a good status, we 15030 * can reset the count for Sonoma failover. 15031 */ 15032 un->un_sonoma_failure_count = 0; 15033 15034 /* 15035 * Return all USCSI commands on good status 15036 */ 15037 if (pktp->pkt_resid == 0) { 15038 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15039 "sdintr: returning command for resid == 0\n"); 15040 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15041 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15042 SD_UPDATE_B_RESID(bp, pktp); 15043 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15044 "sdintr: returning command for resid != 0\n"); 15045 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15046 SD_UPDATE_B_RESID(bp, pktp); 15047 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15048 "sdintr: returning uscsi command\n"); 15049 } else { 15050 goto not_successful; 15051 } 15052 sd_return_command(un, bp); 15053 15054 /* 15055 * Decrement counter to indicate that the callback routine 15056 * is done. 15057 */ 15058 un->un_in_callback--; 15059 ASSERT(un->un_in_callback >= 0); 15060 mutex_exit(SD_MUTEX(un)); 15061 15062 return; 15063 } 15064 15065 not_successful: 15066 15067 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15068 /* 15069 * The following is based upon knowledge of the underlying transport 15070 * and its use of DMA resources. This code should be removed when 15071 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15072 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15073 * and sd_start_cmds(). 15074 * 15075 * Free any DMA resources associated with this command if there 15076 * is a chance it could be retried or enqueued for later retry. 15077 * If we keep the DMA binding then mpxio cannot reissue the 15078 * command on another path whenever a path failure occurs. 15079 * 15080 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15081 * causes the *entire* transfer to start over again from the 15082 * beginning of the request, even for PARTIAL chunks that 15083 * have already transferred successfully. 15084 * 15085 * This is only done for non-uscsi commands (and also skipped for the 15086 * driver's internal RQS command). Also just do this for Fibre Channel 15087 * devices as these are the only ones that support mpxio. 15088 */ 15089 if ((un->un_f_is_fibre == TRUE) && 15090 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15091 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15092 scsi_dmafree(pktp); 15093 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15094 } 15095 #endif 15096 15097 /* 15098 * The command did not successfully complete as requested so check 15099 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15100 * driver command that should not be retried so just return. If 15101 * FLAG_DIAGNOSE is not set the error will be processed below. 15102 */ 15103 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15104 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15105 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15106 /* 15107 * Issue a request sense if a check condition caused the error 15108 * (we handle the auto request sense case above), otherwise 15109 * just fail the command. 15110 */ 15111 if ((pktp->pkt_reason == CMD_CMPLT) && 15112 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15113 sd_send_request_sense_command(un, bp, pktp); 15114 } else { 15115 sd_return_failed_command(un, bp, EIO); 15116 } 15117 goto exit; 15118 } 15119 15120 /* 15121 * The command did not successfully complete as requested so process 15122 * the error, retry, and/or attempt recovery. 15123 */ 15124 switch (pktp->pkt_reason) { 15125 case CMD_CMPLT: 15126 switch (SD_GET_PKT_STATUS(pktp)) { 15127 case STATUS_GOOD: 15128 /* 15129 * The command completed successfully with a non-zero 15130 * residual 15131 */ 15132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15133 "sdintr: STATUS_GOOD \n"); 15134 sd_pkt_status_good(un, bp, xp, pktp); 15135 break; 15136 15137 case STATUS_CHECK: 15138 case STATUS_TERMINATED: 15139 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15140 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15141 sd_pkt_status_check_condition(un, bp, xp, pktp); 15142 break; 15143 15144 case STATUS_BUSY: 15145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15146 "sdintr: STATUS_BUSY\n"); 15147 sd_pkt_status_busy(un, bp, xp, pktp); 15148 break; 15149 15150 case STATUS_RESERVATION_CONFLICT: 15151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15152 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15153 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15154 break; 15155 15156 case STATUS_QFULL: 15157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15158 "sdintr: STATUS_QFULL\n"); 15159 sd_pkt_status_qfull(un, bp, xp, pktp); 15160 break; 15161 15162 case STATUS_MET: 15163 case STATUS_INTERMEDIATE: 15164 case STATUS_SCSI2: 15165 case STATUS_INTERMEDIATE_MET: 15166 case STATUS_ACA_ACTIVE: 15167 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15168 "Unexpected SCSI status received: 0x%x\n", 15169 SD_GET_PKT_STATUS(pktp)); 15170 sd_return_failed_command(un, bp, EIO); 15171 break; 15172 15173 default: 15174 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15175 "Invalid SCSI status received: 0x%x\n", 15176 SD_GET_PKT_STATUS(pktp)); 15177 sd_return_failed_command(un, bp, EIO); 15178 break; 15179 15180 } 15181 break; 15182 15183 case CMD_INCOMPLETE: 15184 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15185 "sdintr: CMD_INCOMPLETE\n"); 15186 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15187 break; 15188 case CMD_TRAN_ERR: 15189 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15190 "sdintr: CMD_TRAN_ERR\n"); 15191 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15192 break; 15193 case CMD_RESET: 15194 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15195 "sdintr: CMD_RESET \n"); 15196 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15197 break; 15198 case CMD_ABORTED: 15199 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15200 "sdintr: CMD_ABORTED \n"); 15201 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15202 break; 15203 case CMD_TIMEOUT: 15204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15205 "sdintr: CMD_TIMEOUT\n"); 15206 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15207 break; 15208 case CMD_UNX_BUS_FREE: 15209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15210 "sdintr: CMD_UNX_BUS_FREE \n"); 15211 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15212 break; 15213 case CMD_TAG_REJECT: 15214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15215 "sdintr: CMD_TAG_REJECT\n"); 15216 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15217 break; 15218 default: 15219 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15220 "sdintr: default\n"); 15221 sd_pkt_reason_default(un, bp, xp, pktp); 15222 break; 15223 } 15224 15225 exit: 15226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15227 15228 /* Decrement counter to indicate that the callback routine is done. */ 15229 un->un_in_callback--; 15230 ASSERT(un->un_in_callback >= 0); 15231 15232 /* 15233 * At this point, the pkt has been dispatched, ie, it is either 15234 * being re-tried or has been returned to its caller and should 15235 * not be referenced. 15236 */ 15237 15238 mutex_exit(SD_MUTEX(un)); 15239 } 15240 15241 15242 /* 15243 * Function: sd_print_incomplete_msg 15244 * 15245 * Description: Prints the error message for a CMD_INCOMPLETE error. 15246 * 15247 * Arguments: un - ptr to associated softstate for the device. 15248 * bp - ptr to the buf(9S) for the command. 15249 * arg - message string ptr 15250 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15251 * or SD_NO_RETRY_ISSUED. 15252 * 15253 * Context: May be called under interrupt context 15254 */ 15255 15256 static void 15257 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15258 { 15259 struct scsi_pkt *pktp; 15260 char *msgp; 15261 char *cmdp = arg; 15262 15263 ASSERT(un != NULL); 15264 ASSERT(mutex_owned(SD_MUTEX(un))); 15265 ASSERT(bp != NULL); 15266 ASSERT(arg != NULL); 15267 pktp = SD_GET_PKTP(bp); 15268 ASSERT(pktp != NULL); 15269 15270 switch (code) { 15271 case SD_DELAYED_RETRY_ISSUED: 15272 case SD_IMMEDIATE_RETRY_ISSUED: 15273 msgp = "retrying"; 15274 break; 15275 case SD_NO_RETRY_ISSUED: 15276 default: 15277 msgp = "giving up"; 15278 break; 15279 } 15280 15281 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15282 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15283 "incomplete %s- %s\n", cmdp, msgp); 15284 } 15285 } 15286 15287 15288 15289 /* 15290 * Function: sd_pkt_status_good 15291 * 15292 * Description: Processing for a STATUS_GOOD code in pkt_status. 15293 * 15294 * Context: May be called under interrupt context 15295 */ 15296 15297 static void 15298 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15300 { 15301 char *cmdp; 15302 15303 ASSERT(un != NULL); 15304 ASSERT(mutex_owned(SD_MUTEX(un))); 15305 ASSERT(bp != NULL); 15306 ASSERT(xp != NULL); 15307 ASSERT(pktp != NULL); 15308 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15309 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15310 ASSERT(pktp->pkt_resid != 0); 15311 15312 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15313 15314 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15315 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15316 case SCMD_READ: 15317 cmdp = "read"; 15318 break; 15319 case SCMD_WRITE: 15320 cmdp = "write"; 15321 break; 15322 default: 15323 SD_UPDATE_B_RESID(bp, pktp); 15324 sd_return_command(un, bp); 15325 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15326 return; 15327 } 15328 15329 /* 15330 * See if we can retry the read/write, preferrably immediately. 15331 * If retries are exhaused, then sd_retry_command() will update 15332 * the b_resid count. 15333 */ 15334 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15335 cmdp, EIO, (clock_t)0, NULL); 15336 15337 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15338 } 15339 15340 15341 15342 15343 15344 /* 15345 * Function: sd_handle_request_sense 15346 * 15347 * Description: Processing for non-auto Request Sense command. 15348 * 15349 * Arguments: un - ptr to associated softstate 15350 * sense_bp - ptr to buf(9S) for the RQS command 15351 * sense_xp - ptr to the sd_xbuf for the RQS command 15352 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15353 * 15354 * Context: May be called under interrupt context 15355 */ 15356 15357 static void 15358 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15359 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15360 { 15361 struct buf *cmd_bp; /* buf for the original command */ 15362 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15363 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15364 size_t actual_len; /* actual sense data length */ 15365 15366 ASSERT(un != NULL); 15367 ASSERT(mutex_owned(SD_MUTEX(un))); 15368 ASSERT(sense_bp != NULL); 15369 ASSERT(sense_xp != NULL); 15370 ASSERT(sense_pktp != NULL); 15371 15372 /* 15373 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15374 * RQS command and not the original command. 15375 */ 15376 ASSERT(sense_pktp == un->un_rqs_pktp); 15377 ASSERT(sense_bp == un->un_rqs_bp); 15378 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15379 (FLAG_SENSING | FLAG_HEAD)); 15380 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15381 FLAG_SENSING) == FLAG_SENSING); 15382 15383 /* These are the bp, xp, and pktp for the original command */ 15384 cmd_bp = sense_xp->xb_sense_bp; 15385 cmd_xp = SD_GET_XBUF(cmd_bp); 15386 cmd_pktp = SD_GET_PKTP(cmd_bp); 15387 15388 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15389 /* 15390 * The REQUEST SENSE command failed. Release the REQUEST 15391 * SENSE command for re-use, get back the bp for the original 15392 * command, and attempt to re-try the original command if 15393 * FLAG_DIAGNOSE is not set in the original packet. 15394 */ 15395 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15396 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15397 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15398 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15399 NULL, NULL, EIO, (clock_t)0, NULL); 15400 return; 15401 } 15402 } 15403 15404 /* 15405 * Save the relevant sense info into the xp for the original cmd. 15406 * 15407 * Note: if the request sense failed the state info will be zero 15408 * as set in sd_mark_rqs_busy() 15409 */ 15410 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15411 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15412 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15413 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15414 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15415 SENSE_LENGTH)) { 15416 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15417 MAX_SENSE_LENGTH); 15418 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15419 } else { 15420 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15421 SENSE_LENGTH); 15422 if (actual_len < SENSE_LENGTH) { 15423 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15424 } else { 15425 cmd_xp->xb_sense_resid = 0; 15426 } 15427 } 15428 15429 /* 15430 * Free up the RQS command.... 15431 * NOTE: 15432 * Must do this BEFORE calling sd_validate_sense_data! 15433 * sd_validate_sense_data may return the original command in 15434 * which case the pkt will be freed and the flags can no 15435 * longer be touched. 15436 * SD_MUTEX is held through this process until the command 15437 * is dispatched based upon the sense data, so there are 15438 * no race conditions. 15439 */ 15440 (void) sd_mark_rqs_idle(un, sense_xp); 15441 15442 /* 15443 * For a retryable command see if we have valid sense data, if so then 15444 * turn it over to sd_decode_sense() to figure out the right course of 15445 * action. Just fail a non-retryable command. 15446 */ 15447 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15448 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15449 SD_SENSE_DATA_IS_VALID) { 15450 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15451 } 15452 } else { 15453 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15454 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15455 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15456 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15457 sd_return_failed_command(un, cmd_bp, EIO); 15458 } 15459 } 15460 15461 15462 15463 15464 /* 15465 * Function: sd_handle_auto_request_sense 15466 * 15467 * Description: Processing for auto-request sense information. 15468 * 15469 * Arguments: un - ptr to associated softstate 15470 * bp - ptr to buf(9S) for the command 15471 * xp - ptr to the sd_xbuf for the command 15472 * pktp - ptr to the scsi_pkt(9S) for the command 15473 * 15474 * Context: May be called under interrupt context 15475 */ 15476 15477 static void 15478 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15479 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15480 { 15481 struct scsi_arq_status *asp; 15482 size_t actual_len; 15483 15484 ASSERT(un != NULL); 15485 ASSERT(mutex_owned(SD_MUTEX(un))); 15486 ASSERT(bp != NULL); 15487 ASSERT(xp != NULL); 15488 ASSERT(pktp != NULL); 15489 ASSERT(pktp != un->un_rqs_pktp); 15490 ASSERT(bp != un->un_rqs_bp); 15491 15492 /* 15493 * For auto-request sense, we get a scsi_arq_status back from 15494 * the HBA, with the sense data in the sts_sensedata member. 15495 * The pkt_scbp of the packet points to this scsi_arq_status. 15496 */ 15497 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15498 15499 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15500 /* 15501 * The auto REQUEST SENSE failed; see if we can re-try 15502 * the original command. 15503 */ 15504 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15505 "auto request sense failed (reason=%s)\n", 15506 scsi_rname(asp->sts_rqpkt_reason)); 15507 15508 sd_reset_target(un, pktp); 15509 15510 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15511 NULL, NULL, EIO, (clock_t)0, NULL); 15512 return; 15513 } 15514 15515 /* Save the relevant sense info into the xp for the original cmd. */ 15516 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15517 xp->xb_sense_state = asp->sts_rqpkt_state; 15518 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15519 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15520 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15521 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15522 MAX_SENSE_LENGTH); 15523 } else { 15524 if (xp->xb_sense_resid > SENSE_LENGTH) { 15525 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15526 } else { 15527 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15528 } 15529 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15530 if ((((struct uscsi_cmd *) 15531 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15532 xp->xb_sense_resid = (((struct uscsi_cmd *) 15533 (xp->xb_pktinfo))->uscsi_rqlen) - 15534 actual_len; 15535 } else { 15536 xp->xb_sense_resid = 0; 15537 } 15538 } 15539 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15540 } 15541 15542 /* 15543 * See if we have valid sense data, if so then turn it over to 15544 * sd_decode_sense() to figure out the right course of action. 15545 */ 15546 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15547 SD_SENSE_DATA_IS_VALID) { 15548 sd_decode_sense(un, bp, xp, pktp); 15549 } 15550 } 15551 15552 15553 /* 15554 * Function: sd_print_sense_failed_msg 15555 * 15556 * Description: Print log message when RQS has failed. 15557 * 15558 * Arguments: un - ptr to associated softstate 15559 * bp - ptr to buf(9S) for the command 15560 * arg - generic message string ptr 15561 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15562 * or SD_NO_RETRY_ISSUED 15563 * 15564 * Context: May be called from interrupt context 15565 */ 15566 15567 static void 15568 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15569 int code) 15570 { 15571 char *msgp = arg; 15572 15573 ASSERT(un != NULL); 15574 ASSERT(mutex_owned(SD_MUTEX(un))); 15575 ASSERT(bp != NULL); 15576 15577 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15579 } 15580 } 15581 15582 15583 /* 15584 * Function: sd_validate_sense_data 15585 * 15586 * Description: Check the given sense data for validity. 15587 * If the sense data is not valid, the command will 15588 * be either failed or retried! 15589 * 15590 * Return Code: SD_SENSE_DATA_IS_INVALID 15591 * SD_SENSE_DATA_IS_VALID 15592 * 15593 * Context: May be called from interrupt context 15594 */ 15595 15596 static int 15597 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15598 size_t actual_len) 15599 { 15600 struct scsi_extended_sense *esp; 15601 struct scsi_pkt *pktp; 15602 char *msgp = NULL; 15603 15604 ASSERT(un != NULL); 15605 ASSERT(mutex_owned(SD_MUTEX(un))); 15606 ASSERT(bp != NULL); 15607 ASSERT(bp != un->un_rqs_bp); 15608 ASSERT(xp != NULL); 15609 15610 pktp = SD_GET_PKTP(bp); 15611 ASSERT(pktp != NULL); 15612 15613 /* 15614 * Check the status of the RQS command (auto or manual). 15615 */ 15616 switch (xp->xb_sense_status & STATUS_MASK) { 15617 case STATUS_GOOD: 15618 break; 15619 15620 case STATUS_RESERVATION_CONFLICT: 15621 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15622 return (SD_SENSE_DATA_IS_INVALID); 15623 15624 case STATUS_BUSY: 15625 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15626 "Busy Status on REQUEST SENSE\n"); 15627 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15628 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15629 return (SD_SENSE_DATA_IS_INVALID); 15630 15631 case STATUS_QFULL: 15632 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15633 "QFULL Status on REQUEST SENSE\n"); 15634 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15635 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15636 return (SD_SENSE_DATA_IS_INVALID); 15637 15638 case STATUS_CHECK: 15639 case STATUS_TERMINATED: 15640 msgp = "Check Condition on REQUEST SENSE\n"; 15641 goto sense_failed; 15642 15643 default: 15644 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15645 goto sense_failed; 15646 } 15647 15648 /* 15649 * See if we got the minimum required amount of sense data. 15650 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15651 * or less. 15652 */ 15653 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15654 (actual_len == 0)) { 15655 msgp = "Request Sense couldn't get sense data\n"; 15656 goto sense_failed; 15657 } 15658 15659 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15660 msgp = "Not enough sense information\n"; 15661 goto sense_failed; 15662 } 15663 15664 /* 15665 * We require the extended sense data 15666 */ 15667 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15668 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15669 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15670 static char tmp[8]; 15671 static char buf[148]; 15672 char *p = (char *)(xp->xb_sense_data); 15673 int i; 15674 15675 mutex_enter(&sd_sense_mutex); 15676 (void) strcpy(buf, "undecodable sense information:"); 15677 for (i = 0; i < actual_len; i++) { 15678 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15679 (void) strcpy(&buf[strlen(buf)], tmp); 15680 } 15681 i = strlen(buf); 15682 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15683 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15684 mutex_exit(&sd_sense_mutex); 15685 } 15686 /* Note: Legacy behavior, fail the command with no retry */ 15687 sd_return_failed_command(un, bp, EIO); 15688 return (SD_SENSE_DATA_IS_INVALID); 15689 } 15690 15691 /* 15692 * Check that es_code is valid (es_class concatenated with es_code 15693 * make up the "response code" field. es_class will always be 7, so 15694 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15695 * format. 15696 */ 15697 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15698 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15699 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15700 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15701 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15702 goto sense_failed; 15703 } 15704 15705 return (SD_SENSE_DATA_IS_VALID); 15706 15707 sense_failed: 15708 /* 15709 * If the request sense failed (for whatever reason), attempt 15710 * to retry the original command. 15711 */ 15712 #if defined(__i386) || defined(__amd64) 15713 /* 15714 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15715 * sddef.h for Sparc platform, and x86 uses 1 binary 15716 * for both SCSI/FC. 15717 * The SD_RETRY_DELAY value need to be adjusted here 15718 * when SD_RETRY_DELAY change in sddef.h 15719 */ 15720 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15721 sd_print_sense_failed_msg, msgp, EIO, 15722 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15723 #else 15724 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15725 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15726 #endif 15727 15728 return (SD_SENSE_DATA_IS_INVALID); 15729 } 15730 15731 15732 15733 /* 15734 * Function: sd_decode_sense 15735 * 15736 * Description: Take recovery action(s) when SCSI Sense Data is received. 15737 * 15738 * Context: Interrupt context. 15739 */ 15740 15741 static void 15742 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15743 struct scsi_pkt *pktp) 15744 { 15745 uint8_t sense_key; 15746 15747 ASSERT(un != NULL); 15748 ASSERT(mutex_owned(SD_MUTEX(un))); 15749 ASSERT(bp != NULL); 15750 ASSERT(bp != un->un_rqs_bp); 15751 ASSERT(xp != NULL); 15752 ASSERT(pktp != NULL); 15753 15754 sense_key = scsi_sense_key(xp->xb_sense_data); 15755 15756 switch (sense_key) { 15757 case KEY_NO_SENSE: 15758 sd_sense_key_no_sense(un, bp, xp, pktp); 15759 break; 15760 case KEY_RECOVERABLE_ERROR: 15761 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15762 bp, xp, pktp); 15763 break; 15764 case KEY_NOT_READY: 15765 sd_sense_key_not_ready(un, xp->xb_sense_data, 15766 bp, xp, pktp); 15767 break; 15768 case KEY_MEDIUM_ERROR: 15769 case KEY_HARDWARE_ERROR: 15770 sd_sense_key_medium_or_hardware_error(un, 15771 xp->xb_sense_data, bp, xp, pktp); 15772 break; 15773 case KEY_ILLEGAL_REQUEST: 15774 sd_sense_key_illegal_request(un, bp, xp, pktp); 15775 break; 15776 case KEY_UNIT_ATTENTION: 15777 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15778 bp, xp, pktp); 15779 break; 15780 case KEY_WRITE_PROTECT: 15781 case KEY_VOLUME_OVERFLOW: 15782 case KEY_MISCOMPARE: 15783 sd_sense_key_fail_command(un, bp, xp, pktp); 15784 break; 15785 case KEY_BLANK_CHECK: 15786 sd_sense_key_blank_check(un, bp, xp, pktp); 15787 break; 15788 case KEY_ABORTED_COMMAND: 15789 sd_sense_key_aborted_command(un, bp, xp, pktp); 15790 break; 15791 case KEY_VENDOR_UNIQUE: 15792 case KEY_COPY_ABORTED: 15793 case KEY_EQUAL: 15794 case KEY_RESERVED: 15795 default: 15796 sd_sense_key_default(un, xp->xb_sense_data, 15797 bp, xp, pktp); 15798 break; 15799 } 15800 } 15801 15802 15803 /* 15804 * Function: sd_dump_memory 15805 * 15806 * Description: Debug logging routine to print the contents of a user provided 15807 * buffer. The output of the buffer is broken up into 256 byte 15808 * segments due to a size constraint of the scsi_log. 15809 * implementation. 15810 * 15811 * Arguments: un - ptr to softstate 15812 * comp - component mask 15813 * title - "title" string to preceed data when printed 15814 * data - ptr to data block to be printed 15815 * len - size of data block to be printed 15816 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15817 * 15818 * Context: May be called from interrupt context 15819 */ 15820 15821 #define SD_DUMP_MEMORY_BUF_SIZE 256 15822 15823 static char *sd_dump_format_string[] = { 15824 " 0x%02x", 15825 " %c" 15826 }; 15827 15828 static void 15829 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15830 int len, int fmt) 15831 { 15832 int i, j; 15833 int avail_count; 15834 int start_offset; 15835 int end_offset; 15836 size_t entry_len; 15837 char *bufp; 15838 char *local_buf; 15839 char *format_string; 15840 15841 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15842 15843 /* 15844 * In the debug version of the driver, this function is called from a 15845 * number of places which are NOPs in the release driver. 15846 * The debug driver therefore has additional methods of filtering 15847 * debug output. 15848 */ 15849 #ifdef SDDEBUG 15850 /* 15851 * In the debug version of the driver we can reduce the amount of debug 15852 * messages by setting sd_error_level to something other than 15853 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15854 * sd_component_mask. 15855 */ 15856 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15857 (sd_error_level != SCSI_ERR_ALL)) { 15858 return; 15859 } 15860 if (((sd_component_mask & comp) == 0) || 15861 (sd_error_level != SCSI_ERR_ALL)) { 15862 return; 15863 } 15864 #else 15865 if (sd_error_level != SCSI_ERR_ALL) { 15866 return; 15867 } 15868 #endif 15869 15870 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15871 bufp = local_buf; 15872 /* 15873 * Available length is the length of local_buf[], minus the 15874 * length of the title string, minus one for the ":", minus 15875 * one for the newline, minus one for the NULL terminator. 15876 * This gives the #bytes available for holding the printed 15877 * values from the given data buffer. 15878 */ 15879 if (fmt == SD_LOG_HEX) { 15880 format_string = sd_dump_format_string[0]; 15881 } else /* SD_LOG_CHAR */ { 15882 format_string = sd_dump_format_string[1]; 15883 } 15884 /* 15885 * Available count is the number of elements from the given 15886 * data buffer that we can fit into the available length. 15887 * This is based upon the size of the format string used. 15888 * Make one entry and find it's size. 15889 */ 15890 (void) sprintf(bufp, format_string, data[0]); 15891 entry_len = strlen(bufp); 15892 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15893 15894 j = 0; 15895 while (j < len) { 15896 bufp = local_buf; 15897 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15898 start_offset = j; 15899 15900 end_offset = start_offset + avail_count; 15901 15902 (void) sprintf(bufp, "%s:", title); 15903 bufp += strlen(bufp); 15904 for (i = start_offset; ((i < end_offset) && (j < len)); 15905 i++, j++) { 15906 (void) sprintf(bufp, format_string, data[i]); 15907 bufp += entry_len; 15908 } 15909 (void) sprintf(bufp, "\n"); 15910 15911 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15912 } 15913 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15914 } 15915 15916 /* 15917 * Function: sd_print_sense_msg 15918 * 15919 * Description: Log a message based upon the given sense data. 15920 * 15921 * Arguments: un - ptr to associated softstate 15922 * bp - ptr to buf(9S) for the command 15923 * arg - ptr to associate sd_sense_info struct 15924 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15925 * or SD_NO_RETRY_ISSUED 15926 * 15927 * Context: May be called from interrupt context 15928 */ 15929 15930 static void 15931 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15932 { 15933 struct sd_xbuf *xp; 15934 struct scsi_pkt *pktp; 15935 uint8_t *sensep; 15936 daddr_t request_blkno; 15937 diskaddr_t err_blkno; 15938 int severity; 15939 int pfa_flag; 15940 extern struct scsi_key_strings scsi_cmds[]; 15941 15942 ASSERT(un != NULL); 15943 ASSERT(mutex_owned(SD_MUTEX(un))); 15944 ASSERT(bp != NULL); 15945 xp = SD_GET_XBUF(bp); 15946 ASSERT(xp != NULL); 15947 pktp = SD_GET_PKTP(bp); 15948 ASSERT(pktp != NULL); 15949 ASSERT(arg != NULL); 15950 15951 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15952 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15953 15954 if ((code == SD_DELAYED_RETRY_ISSUED) || 15955 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15956 severity = SCSI_ERR_RETRYABLE; 15957 } 15958 15959 /* Use absolute block number for the request block number */ 15960 request_blkno = xp->xb_blkno; 15961 15962 /* 15963 * Now try to get the error block number from the sense data 15964 */ 15965 sensep = xp->xb_sense_data; 15966 15967 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15968 (uint64_t *)&err_blkno)) { 15969 /* 15970 * We retrieved the error block number from the information 15971 * portion of the sense data. 15972 * 15973 * For USCSI commands we are better off using the error 15974 * block no. as the requested block no. (This is the best 15975 * we can estimate.) 15976 */ 15977 if ((SD_IS_BUFIO(xp) == FALSE) && 15978 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15979 request_blkno = err_blkno; 15980 } 15981 } else { 15982 /* 15983 * Without the es_valid bit set (for fixed format) or an 15984 * information descriptor (for descriptor format) we cannot 15985 * be certain of the error blkno, so just use the 15986 * request_blkno. 15987 */ 15988 err_blkno = (diskaddr_t)request_blkno; 15989 } 15990 15991 /* 15992 * The following will log the buffer contents for the release driver 15993 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15994 * level is set to verbose. 15995 */ 15996 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15997 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15998 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15999 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16000 16001 if (pfa_flag == FALSE) { 16002 /* This is normally only set for USCSI */ 16003 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16004 return; 16005 } 16006 16007 if ((SD_IS_BUFIO(xp) == TRUE) && 16008 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16009 (severity < sd_error_level))) { 16010 return; 16011 } 16012 } 16013 16014 /* 16015 * Check for Sonoma Failover and keep a count of how many failed I/O's 16016 */ 16017 if ((SD_IS_LSI(un)) && 16018 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16019 (scsi_sense_asc(sensep) == 0x94) && 16020 (scsi_sense_ascq(sensep) == 0x01)) { 16021 un->un_sonoma_failure_count++; 16022 if (un->un_sonoma_failure_count > 1) { 16023 return; 16024 } 16025 } 16026 16027 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16028 request_blkno, err_blkno, scsi_cmds, 16029 (struct scsi_extended_sense *)sensep, 16030 un->un_additional_codes, NULL); 16031 } 16032 16033 /* 16034 * Function: sd_sense_key_no_sense 16035 * 16036 * Description: Recovery action when sense data was not received. 16037 * 16038 * Context: May be called from interrupt context 16039 */ 16040 16041 static void 16042 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16043 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16044 { 16045 struct sd_sense_info si; 16046 16047 ASSERT(un != NULL); 16048 ASSERT(mutex_owned(SD_MUTEX(un))); 16049 ASSERT(bp != NULL); 16050 ASSERT(xp != NULL); 16051 ASSERT(pktp != NULL); 16052 16053 si.ssi_severity = SCSI_ERR_FATAL; 16054 si.ssi_pfa_flag = FALSE; 16055 16056 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16057 16058 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16059 &si, EIO, (clock_t)0, NULL); 16060 } 16061 16062 16063 /* 16064 * Function: sd_sense_key_recoverable_error 16065 * 16066 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16067 * 16068 * Context: May be called from interrupt context 16069 */ 16070 16071 static void 16072 sd_sense_key_recoverable_error(struct sd_lun *un, 16073 uint8_t *sense_datap, 16074 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16075 { 16076 struct sd_sense_info si; 16077 uint8_t asc = scsi_sense_asc(sense_datap); 16078 16079 ASSERT(un != NULL); 16080 ASSERT(mutex_owned(SD_MUTEX(un))); 16081 ASSERT(bp != NULL); 16082 ASSERT(xp != NULL); 16083 ASSERT(pktp != NULL); 16084 16085 /* 16086 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16087 */ 16088 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16089 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16090 si.ssi_severity = SCSI_ERR_INFO; 16091 si.ssi_pfa_flag = TRUE; 16092 } else { 16093 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16094 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16095 si.ssi_severity = SCSI_ERR_RECOVERED; 16096 si.ssi_pfa_flag = FALSE; 16097 } 16098 16099 if (pktp->pkt_resid == 0) { 16100 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16101 sd_return_command(un, bp); 16102 return; 16103 } 16104 16105 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16106 &si, EIO, (clock_t)0, NULL); 16107 } 16108 16109 16110 16111 16112 /* 16113 * Function: sd_sense_key_not_ready 16114 * 16115 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16116 * 16117 * Context: May be called from interrupt context 16118 */ 16119 16120 static void 16121 sd_sense_key_not_ready(struct sd_lun *un, 16122 uint8_t *sense_datap, 16123 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16124 { 16125 struct sd_sense_info si; 16126 uint8_t asc = scsi_sense_asc(sense_datap); 16127 uint8_t ascq = scsi_sense_ascq(sense_datap); 16128 16129 ASSERT(un != NULL); 16130 ASSERT(mutex_owned(SD_MUTEX(un))); 16131 ASSERT(bp != NULL); 16132 ASSERT(xp != NULL); 16133 ASSERT(pktp != NULL); 16134 16135 si.ssi_severity = SCSI_ERR_FATAL; 16136 si.ssi_pfa_flag = FALSE; 16137 16138 /* 16139 * Update error stats after first NOT READY error. Disks may have 16140 * been powered down and may need to be restarted. For CDROMs, 16141 * report NOT READY errors only if media is present. 16142 */ 16143 if ((ISCD(un) && (asc == 0x3A)) || 16144 (xp->xb_nr_retry_count > 0)) { 16145 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16146 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16147 } 16148 16149 /* 16150 * Just fail if the "not ready" retry limit has been reached. 16151 */ 16152 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16153 /* Special check for error message printing for removables. */ 16154 if (un->un_f_has_removable_media && (asc == 0x04) && 16155 (ascq >= 0x04)) { 16156 si.ssi_severity = SCSI_ERR_ALL; 16157 } 16158 goto fail_command; 16159 } 16160 16161 /* 16162 * Check the ASC and ASCQ in the sense data as needed, to determine 16163 * what to do. 16164 */ 16165 switch (asc) { 16166 case 0x04: /* LOGICAL UNIT NOT READY */ 16167 /* 16168 * disk drives that don't spin up result in a very long delay 16169 * in format without warning messages. We will log a message 16170 * if the error level is set to verbose. 16171 */ 16172 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16173 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16174 "logical unit not ready, resetting disk\n"); 16175 } 16176 16177 /* 16178 * There are different requirements for CDROMs and disks for 16179 * the number of retries. If a CD-ROM is giving this, it is 16180 * probably reading TOC and is in the process of getting 16181 * ready, so we should keep on trying for a long time to make 16182 * sure that all types of media are taken in account (for 16183 * some media the drive takes a long time to read TOC). For 16184 * disks we do not want to retry this too many times as this 16185 * can cause a long hang in format when the drive refuses to 16186 * spin up (a very common failure). 16187 */ 16188 switch (ascq) { 16189 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16190 /* 16191 * Disk drives frequently refuse to spin up which 16192 * results in a very long hang in format without 16193 * warning messages. 16194 * 16195 * Note: This code preserves the legacy behavior of 16196 * comparing xb_nr_retry_count against zero for fibre 16197 * channel targets instead of comparing against the 16198 * un_reset_retry_count value. The reason for this 16199 * discrepancy has been so utterly lost beneath the 16200 * Sands of Time that even Indiana Jones could not 16201 * find it. 16202 */ 16203 if (un->un_f_is_fibre == TRUE) { 16204 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16205 (xp->xb_nr_retry_count > 0)) && 16206 (un->un_startstop_timeid == NULL)) { 16207 scsi_log(SD_DEVINFO(un), sd_label, 16208 CE_WARN, "logical unit not ready, " 16209 "resetting disk\n"); 16210 sd_reset_target(un, pktp); 16211 } 16212 } else { 16213 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16214 (xp->xb_nr_retry_count > 16215 un->un_reset_retry_count)) && 16216 (un->un_startstop_timeid == NULL)) { 16217 scsi_log(SD_DEVINFO(un), sd_label, 16218 CE_WARN, "logical unit not ready, " 16219 "resetting disk\n"); 16220 sd_reset_target(un, pktp); 16221 } 16222 } 16223 break; 16224 16225 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16226 /* 16227 * If the target is in the process of becoming 16228 * ready, just proceed with the retry. This can 16229 * happen with CD-ROMs that take a long time to 16230 * read TOC after a power cycle or reset. 16231 */ 16232 goto do_retry; 16233 16234 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16235 break; 16236 16237 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16238 /* 16239 * Retries cannot help here so just fail right away. 16240 */ 16241 goto fail_command; 16242 16243 case 0x88: 16244 /* 16245 * Vendor-unique code for T3/T4: it indicates a 16246 * path problem in a mutipathed config, but as far as 16247 * the target driver is concerned it equates to a fatal 16248 * error, so we should just fail the command right away 16249 * (without printing anything to the console). If this 16250 * is not a T3/T4, fall thru to the default recovery 16251 * action. 16252 * T3/T4 is FC only, don't need to check is_fibre 16253 */ 16254 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16255 sd_return_failed_command(un, bp, EIO); 16256 return; 16257 } 16258 /* FALLTHRU */ 16259 16260 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16261 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16262 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16263 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16264 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16265 default: /* Possible future codes in SCSI spec? */ 16266 /* 16267 * For removable-media devices, do not retry if 16268 * ASCQ > 2 as these result mostly from USCSI commands 16269 * on MMC devices issued to check status of an 16270 * operation initiated in immediate mode. Also for 16271 * ASCQ >= 4 do not print console messages as these 16272 * mainly represent a user-initiated operation 16273 * instead of a system failure. 16274 */ 16275 if (un->un_f_has_removable_media) { 16276 si.ssi_severity = SCSI_ERR_ALL; 16277 goto fail_command; 16278 } 16279 break; 16280 } 16281 16282 /* 16283 * As part of our recovery attempt for the NOT READY 16284 * condition, we issue a START STOP UNIT command. However 16285 * we want to wait for a short delay before attempting this 16286 * as there may still be more commands coming back from the 16287 * target with the check condition. To do this we use 16288 * timeout(9F) to call sd_start_stop_unit_callback() after 16289 * the delay interval expires. (sd_start_stop_unit_callback() 16290 * dispatches sd_start_stop_unit_task(), which will issue 16291 * the actual START STOP UNIT command. The delay interval 16292 * is one-half of the delay that we will use to retry the 16293 * command that generated the NOT READY condition. 16294 * 16295 * Note that we could just dispatch sd_start_stop_unit_task() 16296 * from here and allow it to sleep for the delay interval, 16297 * but then we would be tying up the taskq thread 16298 * uncesessarily for the duration of the delay. 16299 * 16300 * Do not issue the START STOP UNIT if the current command 16301 * is already a START STOP UNIT. 16302 */ 16303 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16304 break; 16305 } 16306 16307 /* 16308 * Do not schedule the timeout if one is already pending. 16309 */ 16310 if (un->un_startstop_timeid != NULL) { 16311 SD_INFO(SD_LOG_ERROR, un, 16312 "sd_sense_key_not_ready: restart already issued to" 16313 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16314 ddi_get_instance(SD_DEVINFO(un))); 16315 break; 16316 } 16317 16318 /* 16319 * Schedule the START STOP UNIT command, then queue the command 16320 * for a retry. 16321 * 16322 * Note: A timeout is not scheduled for this retry because we 16323 * want the retry to be serial with the START_STOP_UNIT. The 16324 * retry will be started when the START_STOP_UNIT is completed 16325 * in sd_start_stop_unit_task. 16326 */ 16327 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16328 un, SD_BSY_TIMEOUT / 2); 16329 xp->xb_nr_retry_count++; 16330 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16331 return; 16332 16333 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16334 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16335 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16336 "unit does not respond to selection\n"); 16337 } 16338 break; 16339 16340 case 0x3A: /* MEDIUM NOT PRESENT */ 16341 if (sd_error_level >= SCSI_ERR_FATAL) { 16342 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16343 "Caddy not inserted in drive\n"); 16344 } 16345 16346 sr_ejected(un); 16347 un->un_mediastate = DKIO_EJECTED; 16348 /* The state has changed, inform the media watch routines */ 16349 cv_broadcast(&un->un_state_cv); 16350 /* Just fail if no media is present in the drive. */ 16351 goto fail_command; 16352 16353 default: 16354 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16355 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16356 "Unit not Ready. Additional sense code 0x%x\n", 16357 asc); 16358 } 16359 break; 16360 } 16361 16362 do_retry: 16363 16364 /* 16365 * Retry the command, as some targets may report NOT READY for 16366 * several seconds after being reset. 16367 */ 16368 xp->xb_nr_retry_count++; 16369 si.ssi_severity = SCSI_ERR_RETRYABLE; 16370 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16371 &si, EIO, SD_BSY_TIMEOUT, NULL); 16372 16373 return; 16374 16375 fail_command: 16376 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16377 sd_return_failed_command(un, bp, EIO); 16378 } 16379 16380 16381 16382 /* 16383 * Function: sd_sense_key_medium_or_hardware_error 16384 * 16385 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16386 * sense key. 16387 * 16388 * Context: May be called from interrupt context 16389 */ 16390 16391 static void 16392 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16393 uint8_t *sense_datap, 16394 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16395 { 16396 struct sd_sense_info si; 16397 uint8_t sense_key = scsi_sense_key(sense_datap); 16398 uint8_t asc = scsi_sense_asc(sense_datap); 16399 16400 ASSERT(un != NULL); 16401 ASSERT(mutex_owned(SD_MUTEX(un))); 16402 ASSERT(bp != NULL); 16403 ASSERT(xp != NULL); 16404 ASSERT(pktp != NULL); 16405 16406 si.ssi_severity = SCSI_ERR_FATAL; 16407 si.ssi_pfa_flag = FALSE; 16408 16409 if (sense_key == KEY_MEDIUM_ERROR) { 16410 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16411 } 16412 16413 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16414 16415 if ((un->un_reset_retry_count != 0) && 16416 (xp->xb_retry_count == un->un_reset_retry_count)) { 16417 mutex_exit(SD_MUTEX(un)); 16418 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16419 if (un->un_f_allow_bus_device_reset == TRUE) { 16420 16421 boolean_t try_resetting_target = B_TRUE; 16422 16423 /* 16424 * We need to be able to handle specific ASC when we are 16425 * handling a KEY_HARDWARE_ERROR. In particular 16426 * taking the default action of resetting the target may 16427 * not be the appropriate way to attempt recovery. 16428 * Resetting a target because of a single LUN failure 16429 * victimizes all LUNs on that target. 16430 * 16431 * This is true for the LSI arrays, if an LSI 16432 * array controller returns an ASC of 0x84 (LUN Dead) we 16433 * should trust it. 16434 */ 16435 16436 if (sense_key == KEY_HARDWARE_ERROR) { 16437 switch (asc) { 16438 case 0x84: 16439 if (SD_IS_LSI(un)) { 16440 try_resetting_target = B_FALSE; 16441 } 16442 break; 16443 default: 16444 break; 16445 } 16446 } 16447 16448 if (try_resetting_target == B_TRUE) { 16449 int reset_retval = 0; 16450 if (un->un_f_lun_reset_enabled == TRUE) { 16451 SD_TRACE(SD_LOG_IO_CORE, un, 16452 "sd_sense_key_medium_or_hardware_" 16453 "error: issuing RESET_LUN\n"); 16454 reset_retval = 16455 scsi_reset(SD_ADDRESS(un), 16456 RESET_LUN); 16457 } 16458 if (reset_retval == 0) { 16459 SD_TRACE(SD_LOG_IO_CORE, un, 16460 "sd_sense_key_medium_or_hardware_" 16461 "error: issuing RESET_TARGET\n"); 16462 (void) scsi_reset(SD_ADDRESS(un), 16463 RESET_TARGET); 16464 } 16465 } 16466 } 16467 mutex_enter(SD_MUTEX(un)); 16468 } 16469 16470 /* 16471 * This really ought to be a fatal error, but we will retry anyway 16472 * as some drives report this as a spurious error. 16473 */ 16474 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16475 &si, EIO, (clock_t)0, NULL); 16476 } 16477 16478 16479 16480 /* 16481 * Function: sd_sense_key_illegal_request 16482 * 16483 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16484 * 16485 * Context: May be called from interrupt context 16486 */ 16487 16488 static void 16489 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16490 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16491 { 16492 struct sd_sense_info si; 16493 16494 ASSERT(un != NULL); 16495 ASSERT(mutex_owned(SD_MUTEX(un))); 16496 ASSERT(bp != NULL); 16497 ASSERT(xp != NULL); 16498 ASSERT(pktp != NULL); 16499 16500 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16501 16502 si.ssi_severity = SCSI_ERR_INFO; 16503 si.ssi_pfa_flag = FALSE; 16504 16505 /* Pointless to retry if the target thinks it's an illegal request */ 16506 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16507 sd_return_failed_command(un, bp, EIO); 16508 } 16509 16510 16511 16512 16513 /* 16514 * Function: sd_sense_key_unit_attention 16515 * 16516 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16517 * 16518 * Context: May be called from interrupt context 16519 */ 16520 16521 static void 16522 sd_sense_key_unit_attention(struct sd_lun *un, 16523 uint8_t *sense_datap, 16524 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16525 { 16526 /* 16527 * For UNIT ATTENTION we allow retries for one minute. Devices 16528 * like Sonoma can return UNIT ATTENTION close to a minute 16529 * under certain conditions. 16530 */ 16531 int retry_check_flag = SD_RETRIES_UA; 16532 boolean_t kstat_updated = B_FALSE; 16533 struct sd_sense_info si; 16534 uint8_t asc = scsi_sense_asc(sense_datap); 16535 uint8_t ascq = scsi_sense_ascq(sense_datap); 16536 16537 ASSERT(un != NULL); 16538 ASSERT(mutex_owned(SD_MUTEX(un))); 16539 ASSERT(bp != NULL); 16540 ASSERT(xp != NULL); 16541 ASSERT(pktp != NULL); 16542 16543 si.ssi_severity = SCSI_ERR_INFO; 16544 si.ssi_pfa_flag = FALSE; 16545 16546 16547 switch (asc) { 16548 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16549 if (sd_report_pfa != 0) { 16550 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16551 si.ssi_pfa_flag = TRUE; 16552 retry_check_flag = SD_RETRIES_STANDARD; 16553 goto do_retry; 16554 } 16555 16556 break; 16557 16558 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16559 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16560 un->un_resvd_status |= 16561 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16562 } 16563 #ifdef _LP64 16564 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16565 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16566 un, KM_NOSLEEP) == 0) { 16567 /* 16568 * If we can't dispatch the task we'll just 16569 * live without descriptor sense. We can 16570 * try again on the next "unit attention" 16571 */ 16572 SD_ERROR(SD_LOG_ERROR, un, 16573 "sd_sense_key_unit_attention: " 16574 "Could not dispatch " 16575 "sd_reenable_dsense_task\n"); 16576 } 16577 } 16578 #endif /* _LP64 */ 16579 /* FALLTHRU */ 16580 16581 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16582 if (!un->un_f_has_removable_media) { 16583 break; 16584 } 16585 16586 /* 16587 * When we get a unit attention from a removable-media device, 16588 * it may be in a state that will take a long time to recover 16589 * (e.g., from a reset). Since we are executing in interrupt 16590 * context here, we cannot wait around for the device to come 16591 * back. So hand this command off to sd_media_change_task() 16592 * for deferred processing under taskq thread context. (Note 16593 * that the command still may be failed if a problem is 16594 * encountered at a later time.) 16595 */ 16596 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16597 KM_NOSLEEP) == 0) { 16598 /* 16599 * Cannot dispatch the request so fail the command. 16600 */ 16601 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16602 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16603 si.ssi_severity = SCSI_ERR_FATAL; 16604 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16605 sd_return_failed_command(un, bp, EIO); 16606 } 16607 16608 /* 16609 * If failed to dispatch sd_media_change_task(), we already 16610 * updated kstat. If succeed to dispatch sd_media_change_task(), 16611 * we should update kstat later if it encounters an error. So, 16612 * we update kstat_updated flag here. 16613 */ 16614 kstat_updated = B_TRUE; 16615 16616 /* 16617 * Either the command has been successfully dispatched to a 16618 * task Q for retrying, or the dispatch failed. In either case 16619 * do NOT retry again by calling sd_retry_command. This sets up 16620 * two retries of the same command and when one completes and 16621 * frees the resources the other will access freed memory, 16622 * a bad thing. 16623 */ 16624 return; 16625 16626 default: 16627 break; 16628 } 16629 16630 /* 16631 * ASC ASCQ 16632 * 2A 09 Capacity data has changed 16633 * 2A 01 Mode parameters changed 16634 * 3F 0E Reported luns data has changed 16635 * Arrays that support logical unit expansion should report 16636 * capacity changes(2Ah/09). Mode parameters changed and 16637 * reported luns data has changed are the approximation. 16638 */ 16639 if (((asc == 0x2a) && (ascq == 0x09)) || 16640 ((asc == 0x2a) && (ascq == 0x01)) || 16641 ((asc == 0x3f) && (ascq == 0x0e))) { 16642 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16643 KM_NOSLEEP) == 0) { 16644 SD_ERROR(SD_LOG_ERROR, un, 16645 "sd_sense_key_unit_attention: " 16646 "Could not dispatch sd_target_change_task\n"); 16647 } 16648 } 16649 16650 /* 16651 * Update kstat if we haven't done that. 16652 */ 16653 if (!kstat_updated) { 16654 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16655 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16656 } 16657 16658 do_retry: 16659 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16660 EIO, SD_UA_RETRY_DELAY, NULL); 16661 } 16662 16663 16664 16665 /* 16666 * Function: sd_sense_key_fail_command 16667 * 16668 * Description: Use to fail a command when we don't like the sense key that 16669 * was returned. 16670 * 16671 * Context: May be called from interrupt context 16672 */ 16673 16674 static void 16675 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16676 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16677 { 16678 struct sd_sense_info si; 16679 16680 ASSERT(un != NULL); 16681 ASSERT(mutex_owned(SD_MUTEX(un))); 16682 ASSERT(bp != NULL); 16683 ASSERT(xp != NULL); 16684 ASSERT(pktp != NULL); 16685 16686 si.ssi_severity = SCSI_ERR_FATAL; 16687 si.ssi_pfa_flag = FALSE; 16688 16689 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16690 sd_return_failed_command(un, bp, EIO); 16691 } 16692 16693 16694 16695 /* 16696 * Function: sd_sense_key_blank_check 16697 * 16698 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16699 * Has no monetary connotation. 16700 * 16701 * Context: May be called from interrupt context 16702 */ 16703 16704 static void 16705 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16706 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16707 { 16708 struct sd_sense_info si; 16709 16710 ASSERT(un != NULL); 16711 ASSERT(mutex_owned(SD_MUTEX(un))); 16712 ASSERT(bp != NULL); 16713 ASSERT(xp != NULL); 16714 ASSERT(pktp != NULL); 16715 16716 /* 16717 * Blank check is not fatal for removable devices, therefore 16718 * it does not require a console message. 16719 */ 16720 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16721 SCSI_ERR_FATAL; 16722 si.ssi_pfa_flag = FALSE; 16723 16724 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16725 sd_return_failed_command(un, bp, EIO); 16726 } 16727 16728 16729 16730 16731 /* 16732 * Function: sd_sense_key_aborted_command 16733 * 16734 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16735 * 16736 * Context: May be called from interrupt context 16737 */ 16738 16739 static void 16740 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16741 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16742 { 16743 struct sd_sense_info si; 16744 16745 ASSERT(un != NULL); 16746 ASSERT(mutex_owned(SD_MUTEX(un))); 16747 ASSERT(bp != NULL); 16748 ASSERT(xp != NULL); 16749 ASSERT(pktp != NULL); 16750 16751 si.ssi_severity = SCSI_ERR_FATAL; 16752 si.ssi_pfa_flag = FALSE; 16753 16754 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16755 16756 /* 16757 * This really ought to be a fatal error, but we will retry anyway 16758 * as some drives report this as a spurious error. 16759 */ 16760 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16761 &si, EIO, drv_usectohz(100000), NULL); 16762 } 16763 16764 16765 16766 /* 16767 * Function: sd_sense_key_default 16768 * 16769 * Description: Default recovery action for several SCSI sense keys (basically 16770 * attempts a retry). 16771 * 16772 * Context: May be called from interrupt context 16773 */ 16774 16775 static void 16776 sd_sense_key_default(struct sd_lun *un, 16777 uint8_t *sense_datap, 16778 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16779 { 16780 struct sd_sense_info si; 16781 uint8_t sense_key = scsi_sense_key(sense_datap); 16782 16783 ASSERT(un != NULL); 16784 ASSERT(mutex_owned(SD_MUTEX(un))); 16785 ASSERT(bp != NULL); 16786 ASSERT(xp != NULL); 16787 ASSERT(pktp != NULL); 16788 16789 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16790 16791 /* 16792 * Undecoded sense key. Attempt retries and hope that will fix 16793 * the problem. Otherwise, we're dead. 16794 */ 16795 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16796 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16797 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16798 } 16799 16800 si.ssi_severity = SCSI_ERR_FATAL; 16801 si.ssi_pfa_flag = FALSE; 16802 16803 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16804 &si, EIO, (clock_t)0, NULL); 16805 } 16806 16807 16808 16809 /* 16810 * Function: sd_print_retry_msg 16811 * 16812 * Description: Print a message indicating the retry action being taken. 16813 * 16814 * Arguments: un - ptr to associated softstate 16815 * bp - ptr to buf(9S) for the command 16816 * arg - not used. 16817 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16818 * or SD_NO_RETRY_ISSUED 16819 * 16820 * Context: May be called from interrupt context 16821 */ 16822 /* ARGSUSED */ 16823 static void 16824 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16825 { 16826 struct sd_xbuf *xp; 16827 struct scsi_pkt *pktp; 16828 char *reasonp; 16829 char *msgp; 16830 16831 ASSERT(un != NULL); 16832 ASSERT(mutex_owned(SD_MUTEX(un))); 16833 ASSERT(bp != NULL); 16834 pktp = SD_GET_PKTP(bp); 16835 ASSERT(pktp != NULL); 16836 xp = SD_GET_XBUF(bp); 16837 ASSERT(xp != NULL); 16838 16839 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16840 mutex_enter(&un->un_pm_mutex); 16841 if ((un->un_state == SD_STATE_SUSPENDED) || 16842 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16843 (pktp->pkt_flags & FLAG_SILENT)) { 16844 mutex_exit(&un->un_pm_mutex); 16845 goto update_pkt_reason; 16846 } 16847 mutex_exit(&un->un_pm_mutex); 16848 16849 /* 16850 * Suppress messages if they are all the same pkt_reason; with 16851 * TQ, many (up to 256) are returned with the same pkt_reason. 16852 * If we are in panic, then suppress the retry messages. 16853 */ 16854 switch (flag) { 16855 case SD_NO_RETRY_ISSUED: 16856 msgp = "giving up"; 16857 break; 16858 case SD_IMMEDIATE_RETRY_ISSUED: 16859 case SD_DELAYED_RETRY_ISSUED: 16860 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16861 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16862 (sd_error_level != SCSI_ERR_ALL))) { 16863 return; 16864 } 16865 msgp = "retrying command"; 16866 break; 16867 default: 16868 goto update_pkt_reason; 16869 } 16870 16871 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16872 scsi_rname(pktp->pkt_reason)); 16873 16874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16875 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16876 16877 update_pkt_reason: 16878 /* 16879 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16880 * This is to prevent multiple console messages for the same failure 16881 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16882 * when the command is retried successfully because there still may be 16883 * more commands coming back with the same value of pktp->pkt_reason. 16884 */ 16885 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16886 un->un_last_pkt_reason = pktp->pkt_reason; 16887 } 16888 } 16889 16890 16891 /* 16892 * Function: sd_print_cmd_incomplete_msg 16893 * 16894 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16895 * 16896 * Arguments: un - ptr to associated softstate 16897 * bp - ptr to buf(9S) for the command 16898 * arg - passed to sd_print_retry_msg() 16899 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16900 * or SD_NO_RETRY_ISSUED 16901 * 16902 * Context: May be called from interrupt context 16903 */ 16904 16905 static void 16906 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16907 int code) 16908 { 16909 dev_info_t *dip; 16910 16911 ASSERT(un != NULL); 16912 ASSERT(mutex_owned(SD_MUTEX(un))); 16913 ASSERT(bp != NULL); 16914 16915 switch (code) { 16916 case SD_NO_RETRY_ISSUED: 16917 /* Command was failed. Someone turned off this target? */ 16918 if (un->un_state != SD_STATE_OFFLINE) { 16919 /* 16920 * Suppress message if we are detaching and 16921 * device has been disconnected 16922 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16923 * private interface and not part of the DDI 16924 */ 16925 dip = un->un_sd->sd_dev; 16926 if (!(DEVI_IS_DETACHING(dip) && 16927 DEVI_IS_DEVICE_REMOVED(dip))) { 16928 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16929 "disk not responding to selection\n"); 16930 } 16931 New_state(un, SD_STATE_OFFLINE); 16932 } 16933 break; 16934 16935 case SD_DELAYED_RETRY_ISSUED: 16936 case SD_IMMEDIATE_RETRY_ISSUED: 16937 default: 16938 /* Command was successfully queued for retry */ 16939 sd_print_retry_msg(un, bp, arg, code); 16940 break; 16941 } 16942 } 16943 16944 16945 /* 16946 * Function: sd_pkt_reason_cmd_incomplete 16947 * 16948 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16949 * 16950 * Context: May be called from interrupt context 16951 */ 16952 16953 static void 16954 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16955 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16956 { 16957 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16958 16959 ASSERT(un != NULL); 16960 ASSERT(mutex_owned(SD_MUTEX(un))); 16961 ASSERT(bp != NULL); 16962 ASSERT(xp != NULL); 16963 ASSERT(pktp != NULL); 16964 16965 /* Do not do a reset if selection did not complete */ 16966 /* Note: Should this not just check the bit? */ 16967 if (pktp->pkt_state != STATE_GOT_BUS) { 16968 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16969 sd_reset_target(un, pktp); 16970 } 16971 16972 /* 16973 * If the target was not successfully selected, then set 16974 * SD_RETRIES_FAILFAST to indicate that we lost communication 16975 * with the target, and further retries and/or commands are 16976 * likely to take a long time. 16977 */ 16978 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16979 flag |= SD_RETRIES_FAILFAST; 16980 } 16981 16982 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16983 16984 sd_retry_command(un, bp, flag, 16985 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16986 } 16987 16988 16989 16990 /* 16991 * Function: sd_pkt_reason_cmd_tran_err 16992 * 16993 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16994 * 16995 * Context: May be called from interrupt context 16996 */ 16997 16998 static void 16999 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17000 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17001 { 17002 ASSERT(un != NULL); 17003 ASSERT(mutex_owned(SD_MUTEX(un))); 17004 ASSERT(bp != NULL); 17005 ASSERT(xp != NULL); 17006 ASSERT(pktp != NULL); 17007 17008 /* 17009 * Do not reset if we got a parity error, or if 17010 * selection did not complete. 17011 */ 17012 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17013 /* Note: Should this not just check the bit for pkt_state? */ 17014 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17015 (pktp->pkt_state != STATE_GOT_BUS)) { 17016 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17017 sd_reset_target(un, pktp); 17018 } 17019 17020 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17021 17022 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17023 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17024 } 17025 17026 17027 17028 /* 17029 * Function: sd_pkt_reason_cmd_reset 17030 * 17031 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17032 * 17033 * Context: May be called from interrupt context 17034 */ 17035 17036 static void 17037 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17038 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17039 { 17040 ASSERT(un != NULL); 17041 ASSERT(mutex_owned(SD_MUTEX(un))); 17042 ASSERT(bp != NULL); 17043 ASSERT(xp != NULL); 17044 ASSERT(pktp != NULL); 17045 17046 /* The target may still be running the command, so try to reset. */ 17047 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17048 sd_reset_target(un, pktp); 17049 17050 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17051 17052 /* 17053 * If pkt_reason is CMD_RESET chances are that this pkt got 17054 * reset because another target on this bus caused it. The target 17055 * that caused it should get CMD_TIMEOUT with pkt_statistics 17056 * of STAT_TIMEOUT/STAT_DEV_RESET. 17057 */ 17058 17059 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17060 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17061 } 17062 17063 17064 17065 17066 /* 17067 * Function: sd_pkt_reason_cmd_aborted 17068 * 17069 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17070 * 17071 * Context: May be called from interrupt context 17072 */ 17073 17074 static void 17075 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17076 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17077 { 17078 ASSERT(un != NULL); 17079 ASSERT(mutex_owned(SD_MUTEX(un))); 17080 ASSERT(bp != NULL); 17081 ASSERT(xp != NULL); 17082 ASSERT(pktp != NULL); 17083 17084 /* The target may still be running the command, so try to reset. */ 17085 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17086 sd_reset_target(un, pktp); 17087 17088 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17089 17090 /* 17091 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17092 * aborted because another target on this bus caused it. The target 17093 * that caused it should get CMD_TIMEOUT with pkt_statistics 17094 * of STAT_TIMEOUT/STAT_DEV_RESET. 17095 */ 17096 17097 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17098 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17099 } 17100 17101 17102 17103 /* 17104 * Function: sd_pkt_reason_cmd_timeout 17105 * 17106 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17107 * 17108 * Context: May be called from interrupt context 17109 */ 17110 17111 static void 17112 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17113 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17114 { 17115 ASSERT(un != NULL); 17116 ASSERT(mutex_owned(SD_MUTEX(un))); 17117 ASSERT(bp != NULL); 17118 ASSERT(xp != NULL); 17119 ASSERT(pktp != NULL); 17120 17121 17122 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17123 sd_reset_target(un, pktp); 17124 17125 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17126 17127 /* 17128 * A command timeout indicates that we could not establish 17129 * communication with the target, so set SD_RETRIES_FAILFAST 17130 * as further retries/commands are likely to take a long time. 17131 */ 17132 sd_retry_command(un, bp, 17133 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17134 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17135 } 17136 17137 17138 17139 /* 17140 * Function: sd_pkt_reason_cmd_unx_bus_free 17141 * 17142 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17143 * 17144 * Context: May be called from interrupt context 17145 */ 17146 17147 static void 17148 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17149 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17150 { 17151 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17152 17153 ASSERT(un != NULL); 17154 ASSERT(mutex_owned(SD_MUTEX(un))); 17155 ASSERT(bp != NULL); 17156 ASSERT(xp != NULL); 17157 ASSERT(pktp != NULL); 17158 17159 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17160 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17161 17162 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17163 sd_print_retry_msg : NULL; 17164 17165 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17166 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17167 } 17168 17169 17170 /* 17171 * Function: sd_pkt_reason_cmd_tag_reject 17172 * 17173 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17174 * 17175 * Context: May be called from interrupt context 17176 */ 17177 17178 static void 17179 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17180 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17181 { 17182 ASSERT(un != NULL); 17183 ASSERT(mutex_owned(SD_MUTEX(un))); 17184 ASSERT(bp != NULL); 17185 ASSERT(xp != NULL); 17186 ASSERT(pktp != NULL); 17187 17188 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17189 pktp->pkt_flags = 0; 17190 un->un_tagflags = 0; 17191 if (un->un_f_opt_queueing == TRUE) { 17192 un->un_throttle = min(un->un_throttle, 3); 17193 } else { 17194 un->un_throttle = 1; 17195 } 17196 mutex_exit(SD_MUTEX(un)); 17197 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17198 mutex_enter(SD_MUTEX(un)); 17199 17200 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17201 17202 /* Legacy behavior not to check retry counts here. */ 17203 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17204 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17205 } 17206 17207 17208 /* 17209 * Function: sd_pkt_reason_default 17210 * 17211 * Description: Default recovery actions for SCSA pkt_reason values that 17212 * do not have more explicit recovery actions. 17213 * 17214 * Context: May be called from interrupt context 17215 */ 17216 17217 static void 17218 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17219 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17220 { 17221 ASSERT(un != NULL); 17222 ASSERT(mutex_owned(SD_MUTEX(un))); 17223 ASSERT(bp != NULL); 17224 ASSERT(xp != NULL); 17225 ASSERT(pktp != NULL); 17226 17227 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17228 sd_reset_target(un, pktp); 17229 17230 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17231 17232 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17233 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17234 } 17235 17236 17237 17238 /* 17239 * Function: sd_pkt_status_check_condition 17240 * 17241 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17242 * 17243 * Context: May be called from interrupt context 17244 */ 17245 17246 static void 17247 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17248 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17249 { 17250 ASSERT(un != NULL); 17251 ASSERT(mutex_owned(SD_MUTEX(un))); 17252 ASSERT(bp != NULL); 17253 ASSERT(xp != NULL); 17254 ASSERT(pktp != NULL); 17255 17256 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17257 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17258 17259 /* 17260 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17261 * command will be retried after the request sense). Otherwise, retry 17262 * the command. Note: we are issuing the request sense even though the 17263 * retry limit may have been reached for the failed command. 17264 */ 17265 if (un->un_f_arq_enabled == FALSE) { 17266 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17267 "no ARQ, sending request sense command\n"); 17268 sd_send_request_sense_command(un, bp, pktp); 17269 } else { 17270 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17271 "ARQ,retrying request sense command\n"); 17272 #if defined(__i386) || defined(__amd64) 17273 /* 17274 * The SD_RETRY_DELAY value need to be adjusted here 17275 * when SD_RETRY_DELAY change in sddef.h 17276 */ 17277 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17278 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17279 NULL); 17280 #else 17281 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17282 EIO, SD_RETRY_DELAY, NULL); 17283 #endif 17284 } 17285 17286 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17287 } 17288 17289 17290 /* 17291 * Function: sd_pkt_status_busy 17292 * 17293 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17294 * 17295 * Context: May be called from interrupt context 17296 */ 17297 17298 static void 17299 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17300 struct scsi_pkt *pktp) 17301 { 17302 ASSERT(un != NULL); 17303 ASSERT(mutex_owned(SD_MUTEX(un))); 17304 ASSERT(bp != NULL); 17305 ASSERT(xp != NULL); 17306 ASSERT(pktp != NULL); 17307 17308 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17309 "sd_pkt_status_busy: entry\n"); 17310 17311 /* If retries are exhausted, just fail the command. */ 17312 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17313 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17314 "device busy too long\n"); 17315 sd_return_failed_command(un, bp, EIO); 17316 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17317 "sd_pkt_status_busy: exit\n"); 17318 return; 17319 } 17320 xp->xb_retry_count++; 17321 17322 /* 17323 * Try to reset the target. However, we do not want to perform 17324 * more than one reset if the device continues to fail. The reset 17325 * will be performed when the retry count reaches the reset 17326 * threshold. This threshold should be set such that at least 17327 * one retry is issued before the reset is performed. 17328 */ 17329 if (xp->xb_retry_count == 17330 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17331 int rval = 0; 17332 mutex_exit(SD_MUTEX(un)); 17333 if (un->un_f_allow_bus_device_reset == TRUE) { 17334 /* 17335 * First try to reset the LUN; if we cannot then 17336 * try to reset the target. 17337 */ 17338 if (un->un_f_lun_reset_enabled == TRUE) { 17339 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17340 "sd_pkt_status_busy: RESET_LUN\n"); 17341 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17342 } 17343 if (rval == 0) { 17344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17345 "sd_pkt_status_busy: RESET_TARGET\n"); 17346 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17347 } 17348 } 17349 if (rval == 0) { 17350 /* 17351 * If the RESET_LUN and/or RESET_TARGET failed, 17352 * try RESET_ALL 17353 */ 17354 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17355 "sd_pkt_status_busy: RESET_ALL\n"); 17356 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17357 } 17358 mutex_enter(SD_MUTEX(un)); 17359 if (rval == 0) { 17360 /* 17361 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17362 * At this point we give up & fail the command. 17363 */ 17364 sd_return_failed_command(un, bp, EIO); 17365 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17366 "sd_pkt_status_busy: exit (failed cmd)\n"); 17367 return; 17368 } 17369 } 17370 17371 /* 17372 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17373 * we have already checked the retry counts above. 17374 */ 17375 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17376 EIO, SD_BSY_TIMEOUT, NULL); 17377 17378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17379 "sd_pkt_status_busy: exit\n"); 17380 } 17381 17382 17383 /* 17384 * Function: sd_pkt_status_reservation_conflict 17385 * 17386 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17387 * command status. 17388 * 17389 * Context: May be called from interrupt context 17390 */ 17391 17392 static void 17393 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17394 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17395 { 17396 ASSERT(un != NULL); 17397 ASSERT(mutex_owned(SD_MUTEX(un))); 17398 ASSERT(bp != NULL); 17399 ASSERT(xp != NULL); 17400 ASSERT(pktp != NULL); 17401 17402 /* 17403 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17404 * conflict could be due to various reasons like incorrect keys, not 17405 * registered or not reserved etc. So, we return EACCES to the caller. 17406 */ 17407 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17408 int cmd = SD_GET_PKT_OPCODE(pktp); 17409 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17410 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17411 sd_return_failed_command(un, bp, EACCES); 17412 return; 17413 } 17414 } 17415 17416 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17417 17418 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17419 if (sd_failfast_enable != 0) { 17420 /* By definition, we must panic here.... */ 17421 sd_panic_for_res_conflict(un); 17422 /*NOTREACHED*/ 17423 } 17424 SD_ERROR(SD_LOG_IO, un, 17425 "sd_handle_resv_conflict: Disk Reserved\n"); 17426 sd_return_failed_command(un, bp, EACCES); 17427 return; 17428 } 17429 17430 /* 17431 * 1147670: retry only if sd_retry_on_reservation_conflict 17432 * property is set (default is 1). Retries will not succeed 17433 * on a disk reserved by another initiator. HA systems 17434 * may reset this via sd.conf to avoid these retries. 17435 * 17436 * Note: The legacy return code for this failure is EIO, however EACCES 17437 * seems more appropriate for a reservation conflict. 17438 */ 17439 if (sd_retry_on_reservation_conflict == 0) { 17440 SD_ERROR(SD_LOG_IO, un, 17441 "sd_handle_resv_conflict: Device Reserved\n"); 17442 sd_return_failed_command(un, bp, EIO); 17443 return; 17444 } 17445 17446 /* 17447 * Retry the command if we can. 17448 * 17449 * Note: The legacy return code for this failure is EIO, however EACCES 17450 * seems more appropriate for a reservation conflict. 17451 */ 17452 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17453 (clock_t)2, NULL); 17454 } 17455 17456 17457 17458 /* 17459 * Function: sd_pkt_status_qfull 17460 * 17461 * Description: Handle a QUEUE FULL condition from the target. This can 17462 * occur if the HBA does not handle the queue full condition. 17463 * (Basically this means third-party HBAs as Sun HBAs will 17464 * handle the queue full condition.) Note that if there are 17465 * some commands already in the transport, then the queue full 17466 * has occurred because the queue for this nexus is actually 17467 * full. If there are no commands in the transport, then the 17468 * queue full is resulting from some other initiator or lun 17469 * consuming all the resources at the target. 17470 * 17471 * Context: May be called from interrupt context 17472 */ 17473 17474 static void 17475 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17476 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17477 { 17478 ASSERT(un != NULL); 17479 ASSERT(mutex_owned(SD_MUTEX(un))); 17480 ASSERT(bp != NULL); 17481 ASSERT(xp != NULL); 17482 ASSERT(pktp != NULL); 17483 17484 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17485 "sd_pkt_status_qfull: entry\n"); 17486 17487 /* 17488 * Just lower the QFULL throttle and retry the command. Note that 17489 * we do not limit the number of retries here. 17490 */ 17491 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17492 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17493 SD_RESTART_TIMEOUT, NULL); 17494 17495 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17496 "sd_pkt_status_qfull: exit\n"); 17497 } 17498 17499 17500 /* 17501 * Function: sd_reset_target 17502 * 17503 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17504 * RESET_TARGET, or RESET_ALL. 17505 * 17506 * Context: May be called under interrupt context. 17507 */ 17508 17509 static void 17510 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17511 { 17512 int rval = 0; 17513 17514 ASSERT(un != NULL); 17515 ASSERT(mutex_owned(SD_MUTEX(un))); 17516 ASSERT(pktp != NULL); 17517 17518 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17519 17520 /* 17521 * No need to reset if the transport layer has already done so. 17522 */ 17523 if ((pktp->pkt_statistics & 17524 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17525 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17526 "sd_reset_target: no reset\n"); 17527 return; 17528 } 17529 17530 mutex_exit(SD_MUTEX(un)); 17531 17532 if (un->un_f_allow_bus_device_reset == TRUE) { 17533 if (un->un_f_lun_reset_enabled == TRUE) { 17534 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17535 "sd_reset_target: RESET_LUN\n"); 17536 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17537 } 17538 if (rval == 0) { 17539 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17540 "sd_reset_target: RESET_TARGET\n"); 17541 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17542 } 17543 } 17544 17545 if (rval == 0) { 17546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17547 "sd_reset_target: RESET_ALL\n"); 17548 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17549 } 17550 17551 mutex_enter(SD_MUTEX(un)); 17552 17553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17554 } 17555 17556 /* 17557 * Function: sd_target_change_task 17558 * 17559 * Description: Handle dynamic target change 17560 * 17561 * Context: Executes in a taskq() thread context 17562 */ 17563 static void 17564 sd_target_change_task(void *arg) 17565 { 17566 struct sd_lun *un = arg; 17567 uint64_t capacity; 17568 diskaddr_t label_cap; 17569 uint_t lbasize; 17570 17571 ASSERT(un != NULL); 17572 ASSERT(!mutex_owned(SD_MUTEX(un))); 17573 17574 if ((un->un_f_blockcount_is_valid == FALSE) || 17575 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17576 return; 17577 } 17578 17579 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17580 &lbasize, SD_PATH_DIRECT) != 0) { 17581 SD_ERROR(SD_LOG_ERROR, un, 17582 "sd_target_change_task: fail to read capacity\n"); 17583 return; 17584 } 17585 17586 mutex_enter(SD_MUTEX(un)); 17587 if (capacity <= un->un_blockcount) { 17588 mutex_exit(SD_MUTEX(un)); 17589 return; 17590 } 17591 17592 sd_update_block_info(un, lbasize, capacity); 17593 mutex_exit(SD_MUTEX(un)); 17594 17595 /* 17596 * If lun is EFI labeled and lun capacity is greater than the 17597 * capacity contained in the label, log a sys event. 17598 */ 17599 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17600 (void*)SD_PATH_DIRECT) == 0) { 17601 mutex_enter(SD_MUTEX(un)); 17602 if (un->un_f_blockcount_is_valid && 17603 un->un_blockcount > label_cap) { 17604 mutex_exit(SD_MUTEX(un)); 17605 sd_log_lun_expansion_event(un, KM_SLEEP); 17606 } else { 17607 mutex_exit(SD_MUTEX(un)); 17608 } 17609 } 17610 } 17611 17612 /* 17613 * Function: sd_log_lun_expansion_event 17614 * 17615 * Description: Log lun expansion sys event 17616 * 17617 * Context: Never called from interrupt context 17618 */ 17619 static void 17620 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17621 { 17622 int err; 17623 char *path; 17624 nvlist_t *dle_attr_list; 17625 17626 /* Allocate and build sysevent attribute list */ 17627 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17628 if (err != 0) { 17629 SD_ERROR(SD_LOG_ERROR, un, 17630 "sd_log_lun_expansion_event: fail to allocate space\n"); 17631 return; 17632 } 17633 17634 path = kmem_alloc(MAXPATHLEN, km_flag); 17635 if (path == NULL) { 17636 nvlist_free(dle_attr_list); 17637 SD_ERROR(SD_LOG_ERROR, un, 17638 "sd_log_lun_expansion_event: fail to allocate space\n"); 17639 return; 17640 } 17641 /* 17642 * Add path attribute to identify the lun. 17643 * We are using minor node 'a' as the sysevent attribute. 17644 */ 17645 (void) snprintf(path, MAXPATHLEN, "/devices"); 17646 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17647 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17648 ":a"); 17649 17650 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17651 if (err != 0) { 17652 nvlist_free(dle_attr_list); 17653 kmem_free(path, MAXPATHLEN); 17654 SD_ERROR(SD_LOG_ERROR, un, 17655 "sd_log_lun_expansion_event: fail to add attribute\n"); 17656 return; 17657 } 17658 17659 /* Log dynamic lun expansion sysevent */ 17660 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17661 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17662 if (err != DDI_SUCCESS) { 17663 SD_ERROR(SD_LOG_ERROR, un, 17664 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17665 } 17666 17667 nvlist_free(dle_attr_list); 17668 kmem_free(path, MAXPATHLEN); 17669 } 17670 17671 /* 17672 * Function: sd_media_change_task 17673 * 17674 * Description: Recovery action for CDROM to become available. 17675 * 17676 * Context: Executes in a taskq() thread context 17677 */ 17678 17679 static void 17680 sd_media_change_task(void *arg) 17681 { 17682 struct scsi_pkt *pktp = arg; 17683 struct sd_lun *un; 17684 struct buf *bp; 17685 struct sd_xbuf *xp; 17686 int err = 0; 17687 int retry_count = 0; 17688 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17689 struct sd_sense_info si; 17690 17691 ASSERT(pktp != NULL); 17692 bp = (struct buf *)pktp->pkt_private; 17693 ASSERT(bp != NULL); 17694 xp = SD_GET_XBUF(bp); 17695 ASSERT(xp != NULL); 17696 un = SD_GET_UN(bp); 17697 ASSERT(un != NULL); 17698 ASSERT(!mutex_owned(SD_MUTEX(un))); 17699 ASSERT(un->un_f_monitor_media_state); 17700 17701 si.ssi_severity = SCSI_ERR_INFO; 17702 si.ssi_pfa_flag = FALSE; 17703 17704 /* 17705 * When a reset is issued on a CDROM, it takes a long time to 17706 * recover. First few attempts to read capacity and other things 17707 * related to handling unit attention fail (with a ASC 0x4 and 17708 * ASCQ 0x1). In that case we want to do enough retries and we want 17709 * to limit the retries in other cases of genuine failures like 17710 * no media in drive. 17711 */ 17712 while (retry_count++ < retry_limit) { 17713 if ((err = sd_handle_mchange(un)) == 0) { 17714 break; 17715 } 17716 if (err == EAGAIN) { 17717 retry_limit = SD_UNIT_ATTENTION_RETRY; 17718 } 17719 /* Sleep for 0.5 sec. & try again */ 17720 delay(drv_usectohz(500000)); 17721 } 17722 17723 /* 17724 * Dispatch (retry or fail) the original command here, 17725 * along with appropriate console messages.... 17726 * 17727 * Must grab the mutex before calling sd_retry_command, 17728 * sd_print_sense_msg and sd_return_failed_command. 17729 */ 17730 mutex_enter(SD_MUTEX(un)); 17731 if (err != SD_CMD_SUCCESS) { 17732 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17733 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17734 si.ssi_severity = SCSI_ERR_FATAL; 17735 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17736 sd_return_failed_command(un, bp, EIO); 17737 } else { 17738 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17739 &si, EIO, (clock_t)0, NULL); 17740 } 17741 mutex_exit(SD_MUTEX(un)); 17742 } 17743 17744 17745 17746 /* 17747 * Function: sd_handle_mchange 17748 * 17749 * Description: Perform geometry validation & other recovery when CDROM 17750 * has been removed from drive. 17751 * 17752 * Return Code: 0 for success 17753 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17754 * sd_send_scsi_READ_CAPACITY() 17755 * 17756 * Context: Executes in a taskq() thread context 17757 */ 17758 17759 static int 17760 sd_handle_mchange(struct sd_lun *un) 17761 { 17762 uint64_t capacity; 17763 uint32_t lbasize; 17764 int rval; 17765 17766 ASSERT(!mutex_owned(SD_MUTEX(un))); 17767 ASSERT(un->un_f_monitor_media_state); 17768 17769 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17770 SD_PATH_DIRECT_PRIORITY)) != 0) { 17771 return (rval); 17772 } 17773 17774 mutex_enter(SD_MUTEX(un)); 17775 sd_update_block_info(un, lbasize, capacity); 17776 17777 if (un->un_errstats != NULL) { 17778 struct sd_errstats *stp = 17779 (struct sd_errstats *)un->un_errstats->ks_data; 17780 stp->sd_capacity.value.ui64 = (uint64_t) 17781 ((uint64_t)un->un_blockcount * 17782 (uint64_t)un->un_tgt_blocksize); 17783 } 17784 17785 17786 /* 17787 * Check if the media in the device is writable or not 17788 */ 17789 if (ISCD(un)) 17790 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17791 17792 /* 17793 * Note: Maybe let the strategy/partitioning chain worry about getting 17794 * valid geometry. 17795 */ 17796 mutex_exit(SD_MUTEX(un)); 17797 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17798 17799 17800 if (cmlb_validate(un->un_cmlbhandle, 0, 17801 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17802 return (EIO); 17803 } else { 17804 if (un->un_f_pkstats_enabled) { 17805 sd_set_pstats(un); 17806 SD_TRACE(SD_LOG_IO_PARTITION, un, 17807 "sd_handle_mchange: un:0x%p pstats created and " 17808 "set\n", un); 17809 } 17810 } 17811 17812 17813 /* 17814 * Try to lock the door 17815 */ 17816 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17817 SD_PATH_DIRECT_PRIORITY)); 17818 } 17819 17820 17821 /* 17822 * Function: sd_send_scsi_DOORLOCK 17823 * 17824 * Description: Issue the scsi DOOR LOCK command 17825 * 17826 * Arguments: un - pointer to driver soft state (unit) structure for 17827 * this target. 17828 * flag - SD_REMOVAL_ALLOW 17829 * SD_REMOVAL_PREVENT 17830 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17831 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17832 * to use the USCSI "direct" chain and bypass the normal 17833 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17834 * command is issued as part of an error recovery action. 17835 * 17836 * Return Code: 0 - Success 17837 * errno return code from sd_send_scsi_cmd() 17838 * 17839 * Context: Can sleep. 17840 */ 17841 17842 static int 17843 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17844 { 17845 union scsi_cdb cdb; 17846 struct uscsi_cmd ucmd_buf; 17847 struct scsi_extended_sense sense_buf; 17848 int status; 17849 17850 ASSERT(un != NULL); 17851 ASSERT(!mutex_owned(SD_MUTEX(un))); 17852 17853 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17854 17855 /* already determined doorlock is not supported, fake success */ 17856 if (un->un_f_doorlock_supported == FALSE) { 17857 return (0); 17858 } 17859 17860 /* 17861 * If we are ejecting and see an SD_REMOVAL_PREVENT 17862 * ignore the command so we can complete the eject 17863 * operation. 17864 */ 17865 if (flag == SD_REMOVAL_PREVENT) { 17866 mutex_enter(SD_MUTEX(un)); 17867 if (un->un_f_ejecting == TRUE) { 17868 mutex_exit(SD_MUTEX(un)); 17869 return (EAGAIN); 17870 } 17871 mutex_exit(SD_MUTEX(un)); 17872 } 17873 17874 bzero(&cdb, sizeof (cdb)); 17875 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17876 17877 cdb.scc_cmd = SCMD_DOORLOCK; 17878 cdb.cdb_opaque[4] = (uchar_t)flag; 17879 17880 ucmd_buf.uscsi_cdb = (char *)&cdb; 17881 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17882 ucmd_buf.uscsi_bufaddr = NULL; 17883 ucmd_buf.uscsi_buflen = 0; 17884 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17885 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17886 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17887 ucmd_buf.uscsi_timeout = 15; 17888 17889 SD_TRACE(SD_LOG_IO, un, 17890 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17891 17892 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17893 UIO_SYSSPACE, path_flag); 17894 17895 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17896 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17897 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17898 /* fake success and skip subsequent doorlock commands */ 17899 un->un_f_doorlock_supported = FALSE; 17900 return (0); 17901 } 17902 17903 return (status); 17904 } 17905 17906 /* 17907 * Function: sd_send_scsi_READ_CAPACITY 17908 * 17909 * Description: This routine uses the scsi READ CAPACITY command to determine 17910 * the device capacity in number of blocks and the device native 17911 * block size. If this function returns a failure, then the 17912 * values in *capp and *lbap are undefined. If the capacity 17913 * returned is 0xffffffff then the lun is too large for a 17914 * normal READ CAPACITY command and the results of a 17915 * READ CAPACITY 16 will be used instead. 17916 * 17917 * Arguments: un - ptr to soft state struct for the target 17918 * capp - ptr to unsigned 64-bit variable to receive the 17919 * capacity value from the command. 17920 * lbap - ptr to unsigned 32-bit varaible to receive the 17921 * block size value from the command 17922 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17923 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17924 * to use the USCSI "direct" chain and bypass the normal 17925 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17926 * command is issued as part of an error recovery action. 17927 * 17928 * Return Code: 0 - Success 17929 * EIO - IO error 17930 * EACCES - Reservation conflict detected 17931 * EAGAIN - Device is becoming ready 17932 * errno return code from sd_send_scsi_cmd() 17933 * 17934 * Context: Can sleep. Blocks until command completes. 17935 */ 17936 17937 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17938 17939 static int 17940 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17941 int path_flag) 17942 { 17943 struct scsi_extended_sense sense_buf; 17944 struct uscsi_cmd ucmd_buf; 17945 union scsi_cdb cdb; 17946 uint32_t *capacity_buf; 17947 uint64_t capacity; 17948 uint32_t lbasize; 17949 int status; 17950 17951 ASSERT(un != NULL); 17952 ASSERT(!mutex_owned(SD_MUTEX(un))); 17953 ASSERT(capp != NULL); 17954 ASSERT(lbap != NULL); 17955 17956 SD_TRACE(SD_LOG_IO, un, 17957 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17958 17959 /* 17960 * First send a READ_CAPACITY command to the target. 17961 * (This command is mandatory under SCSI-2.) 17962 * 17963 * Set up the CDB for the READ_CAPACITY command. The Partial 17964 * Medium Indicator bit is cleared. The address field must be 17965 * zero if the PMI bit is zero. 17966 */ 17967 bzero(&cdb, sizeof (cdb)); 17968 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17969 17970 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17971 17972 cdb.scc_cmd = SCMD_READ_CAPACITY; 17973 17974 ucmd_buf.uscsi_cdb = (char *)&cdb; 17975 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17976 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17977 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17978 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17979 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17980 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17981 ucmd_buf.uscsi_timeout = 60; 17982 17983 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17984 UIO_SYSSPACE, path_flag); 17985 17986 switch (status) { 17987 case 0: 17988 /* Return failure if we did not get valid capacity data. */ 17989 if (ucmd_buf.uscsi_resid != 0) { 17990 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17991 return (EIO); 17992 } 17993 17994 /* 17995 * Read capacity and block size from the READ CAPACITY 10 data. 17996 * This data may be adjusted later due to device specific 17997 * issues. 17998 * 17999 * According to the SCSI spec, the READ CAPACITY 10 18000 * command returns the following: 18001 * 18002 * bytes 0-3: Maximum logical block address available. 18003 * (MSB in byte:0 & LSB in byte:3) 18004 * 18005 * bytes 4-7: Block length in bytes 18006 * (MSB in byte:4 & LSB in byte:7) 18007 * 18008 */ 18009 capacity = BE_32(capacity_buf[0]); 18010 lbasize = BE_32(capacity_buf[1]); 18011 18012 /* 18013 * Done with capacity_buf 18014 */ 18015 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18016 18017 /* 18018 * if the reported capacity is set to all 0xf's, then 18019 * this disk is too large and requires SBC-2 commands. 18020 * Reissue the request using READ CAPACITY 16. 18021 */ 18022 if (capacity == 0xffffffff) { 18023 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18024 &lbasize, path_flag); 18025 if (status != 0) { 18026 return (status); 18027 } 18028 } 18029 break; /* Success! */ 18030 case EIO: 18031 switch (ucmd_buf.uscsi_status) { 18032 case STATUS_RESERVATION_CONFLICT: 18033 status = EACCES; 18034 break; 18035 case STATUS_CHECK: 18036 /* 18037 * Check condition; look for ASC/ASCQ of 0x04/0x01 18038 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18039 */ 18040 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18041 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18042 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18043 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18044 return (EAGAIN); 18045 } 18046 break; 18047 default: 18048 break; 18049 } 18050 /* FALLTHRU */ 18051 default: 18052 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18053 return (status); 18054 } 18055 18056 /* 18057 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18058 * (2352 and 0 are common) so for these devices always force the value 18059 * to 2048 as required by the ATAPI specs. 18060 */ 18061 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18062 lbasize = 2048; 18063 } 18064 18065 /* 18066 * Get the maximum LBA value from the READ CAPACITY data. 18067 * Here we assume that the Partial Medium Indicator (PMI) bit 18068 * was cleared when issuing the command. This means that the LBA 18069 * returned from the device is the LBA of the last logical block 18070 * on the logical unit. The actual logical block count will be 18071 * this value plus one. 18072 * 18073 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18074 * so scale the capacity value to reflect this. 18075 */ 18076 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18077 18078 /* 18079 * Copy the values from the READ CAPACITY command into the space 18080 * provided by the caller. 18081 */ 18082 *capp = capacity; 18083 *lbap = lbasize; 18084 18085 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18086 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18087 18088 /* 18089 * Both the lbasize and capacity from the device must be nonzero, 18090 * otherwise we assume that the values are not valid and return 18091 * failure to the caller. (4203735) 18092 */ 18093 if ((capacity == 0) || (lbasize == 0)) { 18094 return (EIO); 18095 } 18096 18097 return (0); 18098 } 18099 18100 /* 18101 * Function: sd_send_scsi_READ_CAPACITY_16 18102 * 18103 * Description: This routine uses the scsi READ CAPACITY 16 command to 18104 * determine the device capacity in number of blocks and the 18105 * device native block size. If this function returns a failure, 18106 * then the values in *capp and *lbap are undefined. 18107 * This routine should always be called by 18108 * sd_send_scsi_READ_CAPACITY which will appy any device 18109 * specific adjustments to capacity and lbasize. 18110 * 18111 * Arguments: un - ptr to soft state struct for the target 18112 * capp - ptr to unsigned 64-bit variable to receive the 18113 * capacity value from the command. 18114 * lbap - ptr to unsigned 32-bit varaible to receive the 18115 * block size value from the command 18116 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18117 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18118 * to use the USCSI "direct" chain and bypass the normal 18119 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18120 * this command is issued as part of an error recovery 18121 * action. 18122 * 18123 * Return Code: 0 - Success 18124 * EIO - IO error 18125 * EACCES - Reservation conflict detected 18126 * EAGAIN - Device is becoming ready 18127 * errno return code from sd_send_scsi_cmd() 18128 * 18129 * Context: Can sleep. Blocks until command completes. 18130 */ 18131 18132 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18133 18134 static int 18135 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18136 uint32_t *lbap, int path_flag) 18137 { 18138 struct scsi_extended_sense sense_buf; 18139 struct uscsi_cmd ucmd_buf; 18140 union scsi_cdb cdb; 18141 uint64_t *capacity16_buf; 18142 uint64_t capacity; 18143 uint32_t lbasize; 18144 int status; 18145 18146 ASSERT(un != NULL); 18147 ASSERT(!mutex_owned(SD_MUTEX(un))); 18148 ASSERT(capp != NULL); 18149 ASSERT(lbap != NULL); 18150 18151 SD_TRACE(SD_LOG_IO, un, 18152 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18153 18154 /* 18155 * First send a READ_CAPACITY_16 command to the target. 18156 * 18157 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18158 * Medium Indicator bit is cleared. The address field must be 18159 * zero if the PMI bit is zero. 18160 */ 18161 bzero(&cdb, sizeof (cdb)); 18162 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18163 18164 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18165 18166 ucmd_buf.uscsi_cdb = (char *)&cdb; 18167 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18168 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18169 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18170 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18171 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18172 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18173 ucmd_buf.uscsi_timeout = 60; 18174 18175 /* 18176 * Read Capacity (16) is a Service Action In command. One 18177 * command byte (0x9E) is overloaded for multiple operations, 18178 * with the second CDB byte specifying the desired operation 18179 */ 18180 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18181 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18182 18183 /* 18184 * Fill in allocation length field 18185 */ 18186 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18187 18188 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18189 UIO_SYSSPACE, path_flag); 18190 18191 switch (status) { 18192 case 0: 18193 /* Return failure if we did not get valid capacity data. */ 18194 if (ucmd_buf.uscsi_resid > 20) { 18195 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18196 return (EIO); 18197 } 18198 18199 /* 18200 * Read capacity and block size from the READ CAPACITY 10 data. 18201 * This data may be adjusted later due to device specific 18202 * issues. 18203 * 18204 * According to the SCSI spec, the READ CAPACITY 10 18205 * command returns the following: 18206 * 18207 * bytes 0-7: Maximum logical block address available. 18208 * (MSB in byte:0 & LSB in byte:7) 18209 * 18210 * bytes 8-11: Block length in bytes 18211 * (MSB in byte:8 & LSB in byte:11) 18212 * 18213 */ 18214 capacity = BE_64(capacity16_buf[0]); 18215 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18216 18217 /* 18218 * Done with capacity16_buf 18219 */ 18220 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18221 18222 /* 18223 * if the reported capacity is set to all 0xf's, then 18224 * this disk is too large. This could only happen with 18225 * a device that supports LBAs larger than 64 bits which 18226 * are not defined by any current T10 standards. 18227 */ 18228 if (capacity == 0xffffffffffffffff) { 18229 return (EIO); 18230 } 18231 break; /* Success! */ 18232 case EIO: 18233 switch (ucmd_buf.uscsi_status) { 18234 case STATUS_RESERVATION_CONFLICT: 18235 status = EACCES; 18236 break; 18237 case STATUS_CHECK: 18238 /* 18239 * Check condition; look for ASC/ASCQ of 0x04/0x01 18240 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18241 */ 18242 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18243 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18244 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18245 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18246 return (EAGAIN); 18247 } 18248 break; 18249 default: 18250 break; 18251 } 18252 /* FALLTHRU */ 18253 default: 18254 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18255 return (status); 18256 } 18257 18258 *capp = capacity; 18259 *lbap = lbasize; 18260 18261 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18262 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18263 18264 return (0); 18265 } 18266 18267 18268 /* 18269 * Function: sd_send_scsi_START_STOP_UNIT 18270 * 18271 * Description: Issue a scsi START STOP UNIT command to the target. 18272 * 18273 * Arguments: un - pointer to driver soft state (unit) structure for 18274 * this target. 18275 * flag - SD_TARGET_START 18276 * SD_TARGET_STOP 18277 * SD_TARGET_EJECT 18278 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18279 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18280 * to use the USCSI "direct" chain and bypass the normal 18281 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18282 * command is issued as part of an error recovery action. 18283 * 18284 * Return Code: 0 - Success 18285 * EIO - IO error 18286 * EACCES - Reservation conflict detected 18287 * ENXIO - Not Ready, medium not present 18288 * errno return code from sd_send_scsi_cmd() 18289 * 18290 * Context: Can sleep. 18291 */ 18292 18293 static int 18294 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18295 { 18296 struct scsi_extended_sense sense_buf; 18297 union scsi_cdb cdb; 18298 struct uscsi_cmd ucmd_buf; 18299 int status; 18300 18301 ASSERT(un != NULL); 18302 ASSERT(!mutex_owned(SD_MUTEX(un))); 18303 18304 SD_TRACE(SD_LOG_IO, un, 18305 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18306 18307 if (un->un_f_check_start_stop && 18308 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18309 (un->un_f_start_stop_supported != TRUE)) { 18310 return (0); 18311 } 18312 18313 /* 18314 * If we are performing an eject operation and 18315 * we receive any command other than SD_TARGET_EJECT 18316 * we should immediately return. 18317 */ 18318 if (flag != SD_TARGET_EJECT) { 18319 mutex_enter(SD_MUTEX(un)); 18320 if (un->un_f_ejecting == TRUE) { 18321 mutex_exit(SD_MUTEX(un)); 18322 return (EAGAIN); 18323 } 18324 mutex_exit(SD_MUTEX(un)); 18325 } 18326 18327 bzero(&cdb, sizeof (cdb)); 18328 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18329 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18330 18331 cdb.scc_cmd = SCMD_START_STOP; 18332 cdb.cdb_opaque[4] = (uchar_t)flag; 18333 18334 ucmd_buf.uscsi_cdb = (char *)&cdb; 18335 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18336 ucmd_buf.uscsi_bufaddr = NULL; 18337 ucmd_buf.uscsi_buflen = 0; 18338 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18339 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18340 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18341 ucmd_buf.uscsi_timeout = 200; 18342 18343 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18344 UIO_SYSSPACE, path_flag); 18345 18346 switch (status) { 18347 case 0: 18348 break; /* Success! */ 18349 case EIO: 18350 switch (ucmd_buf.uscsi_status) { 18351 case STATUS_RESERVATION_CONFLICT: 18352 status = EACCES; 18353 break; 18354 case STATUS_CHECK: 18355 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18356 switch (scsi_sense_key( 18357 (uint8_t *)&sense_buf)) { 18358 case KEY_ILLEGAL_REQUEST: 18359 status = ENOTSUP; 18360 break; 18361 case KEY_NOT_READY: 18362 if (scsi_sense_asc( 18363 (uint8_t *)&sense_buf) 18364 == 0x3A) { 18365 status = ENXIO; 18366 } 18367 break; 18368 default: 18369 break; 18370 } 18371 } 18372 break; 18373 default: 18374 break; 18375 } 18376 break; 18377 default: 18378 break; 18379 } 18380 18381 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18382 18383 return (status); 18384 } 18385 18386 18387 /* 18388 * Function: sd_start_stop_unit_callback 18389 * 18390 * Description: timeout(9F) callback to begin recovery process for a 18391 * device that has spun down. 18392 * 18393 * Arguments: arg - pointer to associated softstate struct. 18394 * 18395 * Context: Executes in a timeout(9F) thread context 18396 */ 18397 18398 static void 18399 sd_start_stop_unit_callback(void *arg) 18400 { 18401 struct sd_lun *un = arg; 18402 ASSERT(un != NULL); 18403 ASSERT(!mutex_owned(SD_MUTEX(un))); 18404 18405 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18406 18407 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18408 } 18409 18410 18411 /* 18412 * Function: sd_start_stop_unit_task 18413 * 18414 * Description: Recovery procedure when a drive is spun down. 18415 * 18416 * Arguments: arg - pointer to associated softstate struct. 18417 * 18418 * Context: Executes in a taskq() thread context 18419 */ 18420 18421 static void 18422 sd_start_stop_unit_task(void *arg) 18423 { 18424 struct sd_lun *un = arg; 18425 18426 ASSERT(un != NULL); 18427 ASSERT(!mutex_owned(SD_MUTEX(un))); 18428 18429 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18430 18431 /* 18432 * Some unformatted drives report not ready error, no need to 18433 * restart if format has been initiated. 18434 */ 18435 mutex_enter(SD_MUTEX(un)); 18436 if (un->un_f_format_in_progress == TRUE) { 18437 mutex_exit(SD_MUTEX(un)); 18438 return; 18439 } 18440 mutex_exit(SD_MUTEX(un)); 18441 18442 /* 18443 * When a START STOP command is issued from here, it is part of a 18444 * failure recovery operation and must be issued before any other 18445 * commands, including any pending retries. Thus it must be sent 18446 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18447 * succeeds or not, we will start I/O after the attempt. 18448 */ 18449 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18450 SD_PATH_DIRECT_PRIORITY); 18451 18452 /* 18453 * The above call blocks until the START_STOP_UNIT command completes. 18454 * Now that it has completed, we must re-try the original IO that 18455 * received the NOT READY condition in the first place. There are 18456 * three possible conditions here: 18457 * 18458 * (1) The original IO is on un_retry_bp. 18459 * (2) The original IO is on the regular wait queue, and un_retry_bp 18460 * is NULL. 18461 * (3) The original IO is on the regular wait queue, and un_retry_bp 18462 * points to some other, unrelated bp. 18463 * 18464 * For each case, we must call sd_start_cmds() with un_retry_bp 18465 * as the argument. If un_retry_bp is NULL, this will initiate 18466 * processing of the regular wait queue. If un_retry_bp is not NULL, 18467 * then this will process the bp on un_retry_bp. That may or may not 18468 * be the original IO, but that does not matter: the important thing 18469 * is to keep the IO processing going at this point. 18470 * 18471 * Note: This is a very specific error recovery sequence associated 18472 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18473 * serialize the I/O with completion of the spin-up. 18474 */ 18475 mutex_enter(SD_MUTEX(un)); 18476 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18477 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18478 un, un->un_retry_bp); 18479 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18480 sd_start_cmds(un, un->un_retry_bp); 18481 mutex_exit(SD_MUTEX(un)); 18482 18483 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18484 } 18485 18486 18487 /* 18488 * Function: sd_send_scsi_INQUIRY 18489 * 18490 * Description: Issue the scsi INQUIRY command. 18491 * 18492 * Arguments: un 18493 * bufaddr 18494 * buflen 18495 * evpd 18496 * page_code 18497 * page_length 18498 * 18499 * Return Code: 0 - Success 18500 * errno return code from sd_send_scsi_cmd() 18501 * 18502 * Context: Can sleep. Does not return until command is completed. 18503 */ 18504 18505 static int 18506 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18507 uchar_t evpd, uchar_t page_code, size_t *residp) 18508 { 18509 union scsi_cdb cdb; 18510 struct uscsi_cmd ucmd_buf; 18511 int status; 18512 18513 ASSERT(un != NULL); 18514 ASSERT(!mutex_owned(SD_MUTEX(un))); 18515 ASSERT(bufaddr != NULL); 18516 18517 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18518 18519 bzero(&cdb, sizeof (cdb)); 18520 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18521 bzero(bufaddr, buflen); 18522 18523 cdb.scc_cmd = SCMD_INQUIRY; 18524 cdb.cdb_opaque[1] = evpd; 18525 cdb.cdb_opaque[2] = page_code; 18526 FORMG0COUNT(&cdb, buflen); 18527 18528 ucmd_buf.uscsi_cdb = (char *)&cdb; 18529 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18530 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18531 ucmd_buf.uscsi_buflen = buflen; 18532 ucmd_buf.uscsi_rqbuf = NULL; 18533 ucmd_buf.uscsi_rqlen = 0; 18534 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18535 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18536 18537 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18538 UIO_SYSSPACE, SD_PATH_DIRECT); 18539 18540 if ((status == 0) && (residp != NULL)) { 18541 *residp = ucmd_buf.uscsi_resid; 18542 } 18543 18544 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18545 18546 return (status); 18547 } 18548 18549 18550 /* 18551 * Function: sd_send_scsi_TEST_UNIT_READY 18552 * 18553 * Description: Issue the scsi TEST UNIT READY command. 18554 * This routine can be told to set the flag USCSI_DIAGNOSE to 18555 * prevent retrying failed commands. Use this when the intent 18556 * is either to check for device readiness, to clear a Unit 18557 * Attention, or to clear any outstanding sense data. 18558 * However under specific conditions the expected behavior 18559 * is for retries to bring a device ready, so use the flag 18560 * with caution. 18561 * 18562 * Arguments: un 18563 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18564 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18565 * 0: dont check for media present, do retries on cmd. 18566 * 18567 * Return Code: 0 - Success 18568 * EIO - IO error 18569 * EACCES - Reservation conflict detected 18570 * ENXIO - Not Ready, medium not present 18571 * errno return code from sd_send_scsi_cmd() 18572 * 18573 * Context: Can sleep. Does not return until command is completed. 18574 */ 18575 18576 static int 18577 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18578 { 18579 struct scsi_extended_sense sense_buf; 18580 union scsi_cdb cdb; 18581 struct uscsi_cmd ucmd_buf; 18582 int status; 18583 18584 ASSERT(un != NULL); 18585 ASSERT(!mutex_owned(SD_MUTEX(un))); 18586 18587 SD_TRACE(SD_LOG_IO, un, 18588 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18589 18590 /* 18591 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18592 * timeouts when they receive a TUR and the queue is not empty. Check 18593 * the configuration flag set during attach (indicating the drive has 18594 * this firmware bug) and un_ncmds_in_transport before issuing the 18595 * TUR. If there are 18596 * pending commands return success, this is a bit arbitrary but is ok 18597 * for non-removables (i.e. the eliteI disks) and non-clustering 18598 * configurations. 18599 */ 18600 if (un->un_f_cfg_tur_check == TRUE) { 18601 mutex_enter(SD_MUTEX(un)); 18602 if (un->un_ncmds_in_transport != 0) { 18603 mutex_exit(SD_MUTEX(un)); 18604 return (0); 18605 } 18606 mutex_exit(SD_MUTEX(un)); 18607 } 18608 18609 bzero(&cdb, sizeof (cdb)); 18610 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18611 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18612 18613 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18614 18615 ucmd_buf.uscsi_cdb = (char *)&cdb; 18616 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18617 ucmd_buf.uscsi_bufaddr = NULL; 18618 ucmd_buf.uscsi_buflen = 0; 18619 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18620 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18621 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18622 18623 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18624 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18625 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18626 } 18627 ucmd_buf.uscsi_timeout = 60; 18628 18629 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18630 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18631 SD_PATH_STANDARD)); 18632 18633 switch (status) { 18634 case 0: 18635 break; /* Success! */ 18636 case EIO: 18637 switch (ucmd_buf.uscsi_status) { 18638 case STATUS_RESERVATION_CONFLICT: 18639 status = EACCES; 18640 break; 18641 case STATUS_CHECK: 18642 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18643 break; 18644 } 18645 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18646 (scsi_sense_key((uint8_t *)&sense_buf) == 18647 KEY_NOT_READY) && 18648 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18649 status = ENXIO; 18650 } 18651 break; 18652 default: 18653 break; 18654 } 18655 break; 18656 default: 18657 break; 18658 } 18659 18660 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18661 18662 return (status); 18663 } 18664 18665 18666 /* 18667 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18668 * 18669 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18670 * 18671 * Arguments: un 18672 * 18673 * Return Code: 0 - Success 18674 * EACCES 18675 * ENOTSUP 18676 * errno return code from sd_send_scsi_cmd() 18677 * 18678 * Context: Can sleep. Does not return until command is completed. 18679 */ 18680 18681 static int 18682 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18683 uint16_t data_len, uchar_t *data_bufp) 18684 { 18685 struct scsi_extended_sense sense_buf; 18686 union scsi_cdb cdb; 18687 struct uscsi_cmd ucmd_buf; 18688 int status; 18689 int no_caller_buf = FALSE; 18690 18691 ASSERT(un != NULL); 18692 ASSERT(!mutex_owned(SD_MUTEX(un))); 18693 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18694 18695 SD_TRACE(SD_LOG_IO, un, 18696 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18697 18698 bzero(&cdb, sizeof (cdb)); 18699 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18700 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18701 if (data_bufp == NULL) { 18702 /* Allocate a default buf if the caller did not give one */ 18703 ASSERT(data_len == 0); 18704 data_len = MHIOC_RESV_KEY_SIZE; 18705 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18706 no_caller_buf = TRUE; 18707 } 18708 18709 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18710 cdb.cdb_opaque[1] = usr_cmd; 18711 FORMG1COUNT(&cdb, data_len); 18712 18713 ucmd_buf.uscsi_cdb = (char *)&cdb; 18714 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18715 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18716 ucmd_buf.uscsi_buflen = data_len; 18717 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18718 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18719 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18720 ucmd_buf.uscsi_timeout = 60; 18721 18722 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18723 UIO_SYSSPACE, SD_PATH_STANDARD); 18724 18725 switch (status) { 18726 case 0: 18727 break; /* Success! */ 18728 case EIO: 18729 switch (ucmd_buf.uscsi_status) { 18730 case STATUS_RESERVATION_CONFLICT: 18731 status = EACCES; 18732 break; 18733 case STATUS_CHECK: 18734 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18735 (scsi_sense_key((uint8_t *)&sense_buf) == 18736 KEY_ILLEGAL_REQUEST)) { 18737 status = ENOTSUP; 18738 } 18739 break; 18740 default: 18741 break; 18742 } 18743 break; 18744 default: 18745 break; 18746 } 18747 18748 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18749 18750 if (no_caller_buf == TRUE) { 18751 kmem_free(data_bufp, data_len); 18752 } 18753 18754 return (status); 18755 } 18756 18757 18758 /* 18759 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18760 * 18761 * Description: This routine is the driver entry point for handling CD-ROM 18762 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18763 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18764 * device. 18765 * 18766 * Arguments: un - Pointer to soft state struct for the target. 18767 * usr_cmd SCSI-3 reservation facility command (one of 18768 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18769 * SD_SCSI3_PREEMPTANDABORT) 18770 * usr_bufp - user provided pointer register, reserve descriptor or 18771 * preempt and abort structure (mhioc_register_t, 18772 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18773 * 18774 * Return Code: 0 - Success 18775 * EACCES 18776 * ENOTSUP 18777 * errno return code from sd_send_scsi_cmd() 18778 * 18779 * Context: Can sleep. Does not return until command is completed. 18780 */ 18781 18782 static int 18783 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18784 uchar_t *usr_bufp) 18785 { 18786 struct scsi_extended_sense sense_buf; 18787 union scsi_cdb cdb; 18788 struct uscsi_cmd ucmd_buf; 18789 int status; 18790 uchar_t data_len = sizeof (sd_prout_t); 18791 sd_prout_t *prp; 18792 18793 ASSERT(un != NULL); 18794 ASSERT(!mutex_owned(SD_MUTEX(un))); 18795 ASSERT(data_len == 24); /* required by scsi spec */ 18796 18797 SD_TRACE(SD_LOG_IO, un, 18798 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18799 18800 if (usr_bufp == NULL) { 18801 return (EINVAL); 18802 } 18803 18804 bzero(&cdb, sizeof (cdb)); 18805 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18806 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18807 prp = kmem_zalloc(data_len, KM_SLEEP); 18808 18809 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18810 cdb.cdb_opaque[1] = usr_cmd; 18811 FORMG1COUNT(&cdb, data_len); 18812 18813 ucmd_buf.uscsi_cdb = (char *)&cdb; 18814 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18815 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18816 ucmd_buf.uscsi_buflen = data_len; 18817 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18818 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18819 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18820 ucmd_buf.uscsi_timeout = 60; 18821 18822 switch (usr_cmd) { 18823 case SD_SCSI3_REGISTER: { 18824 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18825 18826 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18827 bcopy(ptr->newkey.key, prp->service_key, 18828 MHIOC_RESV_KEY_SIZE); 18829 prp->aptpl = ptr->aptpl; 18830 break; 18831 } 18832 case SD_SCSI3_RESERVE: 18833 case SD_SCSI3_RELEASE: { 18834 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18835 18836 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18837 prp->scope_address = BE_32(ptr->scope_specific_addr); 18838 cdb.cdb_opaque[2] = ptr->type; 18839 break; 18840 } 18841 case SD_SCSI3_PREEMPTANDABORT: { 18842 mhioc_preemptandabort_t *ptr = 18843 (mhioc_preemptandabort_t *)usr_bufp; 18844 18845 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18846 bcopy(ptr->victim_key.key, prp->service_key, 18847 MHIOC_RESV_KEY_SIZE); 18848 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18849 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18850 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18851 break; 18852 } 18853 case SD_SCSI3_REGISTERANDIGNOREKEY: 18854 { 18855 mhioc_registerandignorekey_t *ptr; 18856 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18857 bcopy(ptr->newkey.key, 18858 prp->service_key, MHIOC_RESV_KEY_SIZE); 18859 prp->aptpl = ptr->aptpl; 18860 break; 18861 } 18862 default: 18863 ASSERT(FALSE); 18864 break; 18865 } 18866 18867 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18868 UIO_SYSSPACE, SD_PATH_STANDARD); 18869 18870 switch (status) { 18871 case 0: 18872 break; /* Success! */ 18873 case EIO: 18874 switch (ucmd_buf.uscsi_status) { 18875 case STATUS_RESERVATION_CONFLICT: 18876 status = EACCES; 18877 break; 18878 case STATUS_CHECK: 18879 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18880 (scsi_sense_key((uint8_t *)&sense_buf) == 18881 KEY_ILLEGAL_REQUEST)) { 18882 status = ENOTSUP; 18883 } 18884 break; 18885 default: 18886 break; 18887 } 18888 break; 18889 default: 18890 break; 18891 } 18892 18893 kmem_free(prp, data_len); 18894 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18895 return (status); 18896 } 18897 18898 18899 /* 18900 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18901 * 18902 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18903 * 18904 * Arguments: un - pointer to the target's soft state struct 18905 * dkc - pointer to the callback structure 18906 * 18907 * Return Code: 0 - success 18908 * errno-type error code 18909 * 18910 * Context: kernel thread context only. 18911 * 18912 * _______________________________________________________________ 18913 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18914 * |FLUSH_VOLATILE| | operation | 18915 * |______________|______________|_________________________________| 18916 * | 0 | NULL | Synchronous flush on both | 18917 * | | | volatile and non-volatile cache | 18918 * |______________|______________|_________________________________| 18919 * | 1 | NULL | Synchronous flush on volatile | 18920 * | | | cache; disk drivers may suppress| 18921 * | | | flush if disk table indicates | 18922 * | | | non-volatile cache | 18923 * |______________|______________|_________________________________| 18924 * | 0 | !NULL | Asynchronous flush on both | 18925 * | | | volatile and non-volatile cache;| 18926 * |______________|______________|_________________________________| 18927 * | 1 | !NULL | Asynchronous flush on volatile | 18928 * | | | cache; disk drivers may suppress| 18929 * | | | flush if disk table indicates | 18930 * | | | non-volatile cache | 18931 * |______________|______________|_________________________________| 18932 * 18933 */ 18934 18935 static int 18936 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18937 { 18938 struct sd_uscsi_info *uip; 18939 struct uscsi_cmd *uscmd; 18940 union scsi_cdb *cdb; 18941 struct buf *bp; 18942 int rval = 0; 18943 int is_async; 18944 18945 SD_TRACE(SD_LOG_IO, un, 18946 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18947 18948 ASSERT(un != NULL); 18949 ASSERT(!mutex_owned(SD_MUTEX(un))); 18950 18951 if (dkc == NULL || dkc->dkc_callback == NULL) { 18952 is_async = FALSE; 18953 } else { 18954 is_async = TRUE; 18955 } 18956 18957 mutex_enter(SD_MUTEX(un)); 18958 /* check whether cache flush should be suppressed */ 18959 if (un->un_f_suppress_cache_flush == TRUE) { 18960 mutex_exit(SD_MUTEX(un)); 18961 /* 18962 * suppress the cache flush if the device is told to do 18963 * so by sd.conf or disk table 18964 */ 18965 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18966 skip the cache flush since suppress_cache_flush is %d!\n", 18967 un->un_f_suppress_cache_flush); 18968 18969 if (is_async == TRUE) { 18970 /* invoke callback for asynchronous flush */ 18971 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18972 } 18973 return (rval); 18974 } 18975 mutex_exit(SD_MUTEX(un)); 18976 18977 /* 18978 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18979 * set properly 18980 */ 18981 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18982 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18983 18984 mutex_enter(SD_MUTEX(un)); 18985 if (dkc != NULL && un->un_f_sync_nv_supported && 18986 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18987 /* 18988 * if the device supports SYNC_NV bit, turn on 18989 * the SYNC_NV bit to only flush volatile cache 18990 */ 18991 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18992 } 18993 mutex_exit(SD_MUTEX(un)); 18994 18995 /* 18996 * First get some memory for the uscsi_cmd struct and cdb 18997 * and initialize for SYNCHRONIZE_CACHE cmd. 18998 */ 18999 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19000 uscmd->uscsi_cdblen = CDB_GROUP1; 19001 uscmd->uscsi_cdb = (caddr_t)cdb; 19002 uscmd->uscsi_bufaddr = NULL; 19003 uscmd->uscsi_buflen = 0; 19004 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19005 uscmd->uscsi_rqlen = SENSE_LENGTH; 19006 uscmd->uscsi_rqresid = SENSE_LENGTH; 19007 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19008 uscmd->uscsi_timeout = sd_io_time; 19009 19010 /* 19011 * Allocate an sd_uscsi_info struct and fill it with the info 19012 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19013 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19014 * since we allocate the buf here in this function, we do not 19015 * need to preserve the prior contents of b_private. 19016 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19017 */ 19018 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19019 uip->ui_flags = SD_PATH_DIRECT; 19020 uip->ui_cmdp = uscmd; 19021 19022 bp = getrbuf(KM_SLEEP); 19023 bp->b_private = uip; 19024 19025 /* 19026 * Setup buffer to carry uscsi request. 19027 */ 19028 bp->b_flags = B_BUSY; 19029 bp->b_bcount = 0; 19030 bp->b_blkno = 0; 19031 19032 if (is_async == TRUE) { 19033 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19034 uip->ui_dkc = *dkc; 19035 } 19036 19037 bp->b_edev = SD_GET_DEV(un); 19038 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19039 19040 (void) sd_uscsi_strategy(bp); 19041 19042 /* 19043 * If synchronous request, wait for completion 19044 * If async just return and let b_iodone callback 19045 * cleanup. 19046 * NOTE: On return, u_ncmds_in_driver will be decremented, 19047 * but it was also incremented in sd_uscsi_strategy(), so 19048 * we should be ok. 19049 */ 19050 if (is_async == FALSE) { 19051 (void) biowait(bp); 19052 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19053 } 19054 19055 return (rval); 19056 } 19057 19058 19059 static int 19060 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19061 { 19062 struct sd_uscsi_info *uip; 19063 struct uscsi_cmd *uscmd; 19064 uint8_t *sense_buf; 19065 struct sd_lun *un; 19066 int status; 19067 union scsi_cdb *cdb; 19068 19069 uip = (struct sd_uscsi_info *)(bp->b_private); 19070 ASSERT(uip != NULL); 19071 19072 uscmd = uip->ui_cmdp; 19073 ASSERT(uscmd != NULL); 19074 19075 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19076 ASSERT(sense_buf != NULL); 19077 19078 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19079 ASSERT(un != NULL); 19080 19081 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19082 19083 status = geterror(bp); 19084 switch (status) { 19085 case 0: 19086 break; /* Success! */ 19087 case EIO: 19088 switch (uscmd->uscsi_status) { 19089 case STATUS_RESERVATION_CONFLICT: 19090 /* Ignore reservation conflict */ 19091 status = 0; 19092 goto done; 19093 19094 case STATUS_CHECK: 19095 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19096 (scsi_sense_key(sense_buf) == 19097 KEY_ILLEGAL_REQUEST)) { 19098 /* Ignore Illegal Request error */ 19099 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19100 mutex_enter(SD_MUTEX(un)); 19101 un->un_f_sync_nv_supported = FALSE; 19102 mutex_exit(SD_MUTEX(un)); 19103 status = 0; 19104 SD_TRACE(SD_LOG_IO, un, 19105 "un_f_sync_nv_supported \ 19106 is set to false.\n"); 19107 goto done; 19108 } 19109 19110 mutex_enter(SD_MUTEX(un)); 19111 un->un_f_sync_cache_supported = FALSE; 19112 mutex_exit(SD_MUTEX(un)); 19113 SD_TRACE(SD_LOG_IO, un, 19114 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19115 un_f_sync_cache_supported set to false \ 19116 with asc = %x, ascq = %x\n", 19117 scsi_sense_asc(sense_buf), 19118 scsi_sense_ascq(sense_buf)); 19119 status = ENOTSUP; 19120 goto done; 19121 } 19122 break; 19123 default: 19124 break; 19125 } 19126 /* FALLTHRU */ 19127 default: 19128 /* 19129 * Don't log an error message if this device 19130 * has removable media. 19131 */ 19132 if (!un->un_f_has_removable_media) { 19133 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19134 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19135 } 19136 break; 19137 } 19138 19139 done: 19140 if (uip->ui_dkc.dkc_callback != NULL) { 19141 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19142 } 19143 19144 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19145 freerbuf(bp); 19146 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19147 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19148 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19149 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19150 19151 return (status); 19152 } 19153 19154 19155 /* 19156 * Function: sd_send_scsi_GET_CONFIGURATION 19157 * 19158 * Description: Issues the get configuration command to the device. 19159 * Called from sd_check_for_writable_cd & sd_get_media_info 19160 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19161 * Arguments: un 19162 * ucmdbuf 19163 * rqbuf 19164 * rqbuflen 19165 * bufaddr 19166 * buflen 19167 * path_flag 19168 * 19169 * Return Code: 0 - Success 19170 * errno return code from sd_send_scsi_cmd() 19171 * 19172 * Context: Can sleep. Does not return until command is completed. 19173 * 19174 */ 19175 19176 static int 19177 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19178 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19179 int path_flag) 19180 { 19181 char cdb[CDB_GROUP1]; 19182 int status; 19183 19184 ASSERT(un != NULL); 19185 ASSERT(!mutex_owned(SD_MUTEX(un))); 19186 ASSERT(bufaddr != NULL); 19187 ASSERT(ucmdbuf != NULL); 19188 ASSERT(rqbuf != NULL); 19189 19190 SD_TRACE(SD_LOG_IO, un, 19191 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19192 19193 bzero(cdb, sizeof (cdb)); 19194 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19195 bzero(rqbuf, rqbuflen); 19196 bzero(bufaddr, buflen); 19197 19198 /* 19199 * Set up cdb field for the get configuration command. 19200 */ 19201 cdb[0] = SCMD_GET_CONFIGURATION; 19202 cdb[1] = 0x02; /* Requested Type */ 19203 cdb[8] = SD_PROFILE_HEADER_LEN; 19204 ucmdbuf->uscsi_cdb = cdb; 19205 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19206 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19207 ucmdbuf->uscsi_buflen = buflen; 19208 ucmdbuf->uscsi_timeout = sd_io_time; 19209 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19210 ucmdbuf->uscsi_rqlen = rqbuflen; 19211 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19212 19213 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19214 UIO_SYSSPACE, path_flag); 19215 19216 switch (status) { 19217 case 0: 19218 break; /* Success! */ 19219 case EIO: 19220 switch (ucmdbuf->uscsi_status) { 19221 case STATUS_RESERVATION_CONFLICT: 19222 status = EACCES; 19223 break; 19224 default: 19225 break; 19226 } 19227 break; 19228 default: 19229 break; 19230 } 19231 19232 if (status == 0) { 19233 SD_DUMP_MEMORY(un, SD_LOG_IO, 19234 "sd_send_scsi_GET_CONFIGURATION: data", 19235 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19236 } 19237 19238 SD_TRACE(SD_LOG_IO, un, 19239 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19240 19241 return (status); 19242 } 19243 19244 /* 19245 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19246 * 19247 * Description: Issues the get configuration command to the device to 19248 * retrieve a specific feature. Called from 19249 * sd_check_for_writable_cd & sd_set_mmc_caps. 19250 * Arguments: un 19251 * ucmdbuf 19252 * rqbuf 19253 * rqbuflen 19254 * bufaddr 19255 * buflen 19256 * feature 19257 * 19258 * Return Code: 0 - Success 19259 * errno return code from sd_send_scsi_cmd() 19260 * 19261 * Context: Can sleep. Does not return until command is completed. 19262 * 19263 */ 19264 static int 19265 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19266 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19267 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19268 { 19269 char cdb[CDB_GROUP1]; 19270 int status; 19271 19272 ASSERT(un != NULL); 19273 ASSERT(!mutex_owned(SD_MUTEX(un))); 19274 ASSERT(bufaddr != NULL); 19275 ASSERT(ucmdbuf != NULL); 19276 ASSERT(rqbuf != NULL); 19277 19278 SD_TRACE(SD_LOG_IO, un, 19279 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19280 19281 bzero(cdb, sizeof (cdb)); 19282 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19283 bzero(rqbuf, rqbuflen); 19284 bzero(bufaddr, buflen); 19285 19286 /* 19287 * Set up cdb field for the get configuration command. 19288 */ 19289 cdb[0] = SCMD_GET_CONFIGURATION; 19290 cdb[1] = 0x02; /* Requested Type */ 19291 cdb[3] = feature; 19292 cdb[8] = buflen; 19293 ucmdbuf->uscsi_cdb = cdb; 19294 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19295 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19296 ucmdbuf->uscsi_buflen = buflen; 19297 ucmdbuf->uscsi_timeout = sd_io_time; 19298 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19299 ucmdbuf->uscsi_rqlen = rqbuflen; 19300 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19301 19302 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19303 UIO_SYSSPACE, path_flag); 19304 19305 switch (status) { 19306 case 0: 19307 break; /* Success! */ 19308 case EIO: 19309 switch (ucmdbuf->uscsi_status) { 19310 case STATUS_RESERVATION_CONFLICT: 19311 status = EACCES; 19312 break; 19313 default: 19314 break; 19315 } 19316 break; 19317 default: 19318 break; 19319 } 19320 19321 if (status == 0) { 19322 SD_DUMP_MEMORY(un, SD_LOG_IO, 19323 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19324 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19325 } 19326 19327 SD_TRACE(SD_LOG_IO, un, 19328 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19329 19330 return (status); 19331 } 19332 19333 19334 /* 19335 * Function: sd_send_scsi_MODE_SENSE 19336 * 19337 * Description: Utility function for issuing a scsi MODE SENSE command. 19338 * Note: This routine uses a consistent implementation for Group0, 19339 * Group1, and Group2 commands across all platforms. ATAPI devices 19340 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19341 * 19342 * Arguments: un - pointer to the softstate struct for the target. 19343 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19344 * CDB_GROUP[1|2] (10 byte). 19345 * bufaddr - buffer for page data retrieved from the target. 19346 * buflen - size of page to be retrieved. 19347 * page_code - page code of data to be retrieved from the target. 19348 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19349 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19350 * to use the USCSI "direct" chain and bypass the normal 19351 * command waitq. 19352 * 19353 * Return Code: 0 - Success 19354 * errno return code from sd_send_scsi_cmd() 19355 * 19356 * Context: Can sleep. Does not return until command is completed. 19357 */ 19358 19359 static int 19360 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19361 size_t buflen, uchar_t page_code, int path_flag) 19362 { 19363 struct scsi_extended_sense sense_buf; 19364 union scsi_cdb cdb; 19365 struct uscsi_cmd ucmd_buf; 19366 int status; 19367 int headlen; 19368 19369 ASSERT(un != NULL); 19370 ASSERT(!mutex_owned(SD_MUTEX(un))); 19371 ASSERT(bufaddr != NULL); 19372 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19373 (cdbsize == CDB_GROUP2)); 19374 19375 SD_TRACE(SD_LOG_IO, un, 19376 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19377 19378 bzero(&cdb, sizeof (cdb)); 19379 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19380 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19381 bzero(bufaddr, buflen); 19382 19383 if (cdbsize == CDB_GROUP0) { 19384 cdb.scc_cmd = SCMD_MODE_SENSE; 19385 cdb.cdb_opaque[2] = page_code; 19386 FORMG0COUNT(&cdb, buflen); 19387 headlen = MODE_HEADER_LENGTH; 19388 } else { 19389 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19390 cdb.cdb_opaque[2] = page_code; 19391 FORMG1COUNT(&cdb, buflen); 19392 headlen = MODE_HEADER_LENGTH_GRP2; 19393 } 19394 19395 ASSERT(headlen <= buflen); 19396 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19397 19398 ucmd_buf.uscsi_cdb = (char *)&cdb; 19399 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19400 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19401 ucmd_buf.uscsi_buflen = buflen; 19402 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19403 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19404 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19405 ucmd_buf.uscsi_timeout = 60; 19406 19407 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19408 UIO_SYSSPACE, path_flag); 19409 19410 switch (status) { 19411 case 0: 19412 /* 19413 * sr_check_wp() uses 0x3f page code and check the header of 19414 * mode page to determine if target device is write-protected. 19415 * But some USB devices return 0 bytes for 0x3f page code. For 19416 * this case, make sure that mode page header is returned at 19417 * least. 19418 */ 19419 if (buflen - ucmd_buf.uscsi_resid < headlen) 19420 status = EIO; 19421 break; /* Success! */ 19422 case EIO: 19423 switch (ucmd_buf.uscsi_status) { 19424 case STATUS_RESERVATION_CONFLICT: 19425 status = EACCES; 19426 break; 19427 default: 19428 break; 19429 } 19430 break; 19431 default: 19432 break; 19433 } 19434 19435 if (status == 0) { 19436 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19437 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19438 } 19439 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19440 19441 return (status); 19442 } 19443 19444 19445 /* 19446 * Function: sd_send_scsi_MODE_SELECT 19447 * 19448 * Description: Utility function for issuing a scsi MODE SELECT command. 19449 * Note: This routine uses a consistent implementation for Group0, 19450 * Group1, and Group2 commands across all platforms. ATAPI devices 19451 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19452 * 19453 * Arguments: un - pointer to the softstate struct for the target. 19454 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19455 * CDB_GROUP[1|2] (10 byte). 19456 * bufaddr - buffer for page data retrieved from the target. 19457 * buflen - size of page to be retrieved. 19458 * save_page - boolean to determin if SP bit should be set. 19459 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19460 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19461 * to use the USCSI "direct" chain and bypass the normal 19462 * command waitq. 19463 * 19464 * Return Code: 0 - Success 19465 * errno return code from sd_send_scsi_cmd() 19466 * 19467 * Context: Can sleep. Does not return until command is completed. 19468 */ 19469 19470 static int 19471 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19472 size_t buflen, uchar_t save_page, int path_flag) 19473 { 19474 struct scsi_extended_sense sense_buf; 19475 union scsi_cdb cdb; 19476 struct uscsi_cmd ucmd_buf; 19477 int status; 19478 19479 ASSERT(un != NULL); 19480 ASSERT(!mutex_owned(SD_MUTEX(un))); 19481 ASSERT(bufaddr != NULL); 19482 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19483 (cdbsize == CDB_GROUP2)); 19484 19485 SD_TRACE(SD_LOG_IO, un, 19486 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19487 19488 bzero(&cdb, sizeof (cdb)); 19489 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19490 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19491 19492 /* Set the PF bit for many third party drives */ 19493 cdb.cdb_opaque[1] = 0x10; 19494 19495 /* Set the savepage(SP) bit if given */ 19496 if (save_page == SD_SAVE_PAGE) { 19497 cdb.cdb_opaque[1] |= 0x01; 19498 } 19499 19500 if (cdbsize == CDB_GROUP0) { 19501 cdb.scc_cmd = SCMD_MODE_SELECT; 19502 FORMG0COUNT(&cdb, buflen); 19503 } else { 19504 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19505 FORMG1COUNT(&cdb, buflen); 19506 } 19507 19508 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19509 19510 ucmd_buf.uscsi_cdb = (char *)&cdb; 19511 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19512 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19513 ucmd_buf.uscsi_buflen = buflen; 19514 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19515 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19516 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19517 ucmd_buf.uscsi_timeout = 60; 19518 19519 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19520 UIO_SYSSPACE, path_flag); 19521 19522 switch (status) { 19523 case 0: 19524 break; /* Success! */ 19525 case EIO: 19526 switch (ucmd_buf.uscsi_status) { 19527 case STATUS_RESERVATION_CONFLICT: 19528 status = EACCES; 19529 break; 19530 default: 19531 break; 19532 } 19533 break; 19534 default: 19535 break; 19536 } 19537 19538 if (status == 0) { 19539 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19540 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19541 } 19542 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19543 19544 return (status); 19545 } 19546 19547 19548 /* 19549 * Function: sd_send_scsi_RDWR 19550 * 19551 * Description: Issue a scsi READ or WRITE command with the given parameters. 19552 * 19553 * Arguments: un: Pointer to the sd_lun struct for the target. 19554 * cmd: SCMD_READ or SCMD_WRITE 19555 * bufaddr: Address of caller's buffer to receive the RDWR data 19556 * buflen: Length of caller's buffer receive the RDWR data. 19557 * start_block: Block number for the start of the RDWR operation. 19558 * (Assumes target-native block size.) 19559 * residp: Pointer to variable to receive the redisual of the 19560 * RDWR operation (may be NULL of no residual requested). 19561 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19562 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19563 * to use the USCSI "direct" chain and bypass the normal 19564 * command waitq. 19565 * 19566 * Return Code: 0 - Success 19567 * errno return code from sd_send_scsi_cmd() 19568 * 19569 * Context: Can sleep. Does not return until command is completed. 19570 */ 19571 19572 static int 19573 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19574 size_t buflen, daddr_t start_block, int path_flag) 19575 { 19576 struct scsi_extended_sense sense_buf; 19577 union scsi_cdb cdb; 19578 struct uscsi_cmd ucmd_buf; 19579 uint32_t block_count; 19580 int status; 19581 int cdbsize; 19582 uchar_t flag; 19583 19584 ASSERT(un != NULL); 19585 ASSERT(!mutex_owned(SD_MUTEX(un))); 19586 ASSERT(bufaddr != NULL); 19587 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19588 19589 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19590 19591 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19592 return (EINVAL); 19593 } 19594 19595 mutex_enter(SD_MUTEX(un)); 19596 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19597 mutex_exit(SD_MUTEX(un)); 19598 19599 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19600 19601 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19602 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19603 bufaddr, buflen, start_block, block_count); 19604 19605 bzero(&cdb, sizeof (cdb)); 19606 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19607 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19608 19609 /* Compute CDB size to use */ 19610 if (start_block > 0xffffffff) 19611 cdbsize = CDB_GROUP4; 19612 else if ((start_block & 0xFFE00000) || 19613 (un->un_f_cfg_is_atapi == TRUE)) 19614 cdbsize = CDB_GROUP1; 19615 else 19616 cdbsize = CDB_GROUP0; 19617 19618 switch (cdbsize) { 19619 case CDB_GROUP0: /* 6-byte CDBs */ 19620 cdb.scc_cmd = cmd; 19621 FORMG0ADDR(&cdb, start_block); 19622 FORMG0COUNT(&cdb, block_count); 19623 break; 19624 case CDB_GROUP1: /* 10-byte CDBs */ 19625 cdb.scc_cmd = cmd | SCMD_GROUP1; 19626 FORMG1ADDR(&cdb, start_block); 19627 FORMG1COUNT(&cdb, block_count); 19628 break; 19629 case CDB_GROUP4: /* 16-byte CDBs */ 19630 cdb.scc_cmd = cmd | SCMD_GROUP4; 19631 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19632 FORMG4COUNT(&cdb, block_count); 19633 break; 19634 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19635 default: 19636 /* All others reserved */ 19637 return (EINVAL); 19638 } 19639 19640 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19641 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19642 19643 ucmd_buf.uscsi_cdb = (char *)&cdb; 19644 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19645 ucmd_buf.uscsi_bufaddr = bufaddr; 19646 ucmd_buf.uscsi_buflen = buflen; 19647 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19648 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19649 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19650 ucmd_buf.uscsi_timeout = 60; 19651 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19652 UIO_SYSSPACE, path_flag); 19653 switch (status) { 19654 case 0: 19655 break; /* Success! */ 19656 case EIO: 19657 switch (ucmd_buf.uscsi_status) { 19658 case STATUS_RESERVATION_CONFLICT: 19659 status = EACCES; 19660 break; 19661 default: 19662 break; 19663 } 19664 break; 19665 default: 19666 break; 19667 } 19668 19669 if (status == 0) { 19670 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19671 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19672 } 19673 19674 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19675 19676 return (status); 19677 } 19678 19679 19680 /* 19681 * Function: sd_send_scsi_LOG_SENSE 19682 * 19683 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19684 * 19685 * Arguments: un: Pointer to the sd_lun struct for the target. 19686 * 19687 * Return Code: 0 - Success 19688 * errno return code from sd_send_scsi_cmd() 19689 * 19690 * Context: Can sleep. Does not return until command is completed. 19691 */ 19692 19693 static int 19694 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19695 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19696 int path_flag) 19697 19698 { 19699 struct scsi_extended_sense sense_buf; 19700 union scsi_cdb cdb; 19701 struct uscsi_cmd ucmd_buf; 19702 int status; 19703 19704 ASSERT(un != NULL); 19705 ASSERT(!mutex_owned(SD_MUTEX(un))); 19706 19707 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19708 19709 bzero(&cdb, sizeof (cdb)); 19710 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19711 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19712 19713 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19714 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19715 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19716 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19717 FORMG1COUNT(&cdb, buflen); 19718 19719 ucmd_buf.uscsi_cdb = (char *)&cdb; 19720 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19721 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19722 ucmd_buf.uscsi_buflen = buflen; 19723 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19724 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19725 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19726 ucmd_buf.uscsi_timeout = 60; 19727 19728 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19729 UIO_SYSSPACE, path_flag); 19730 19731 switch (status) { 19732 case 0: 19733 break; 19734 case EIO: 19735 switch (ucmd_buf.uscsi_status) { 19736 case STATUS_RESERVATION_CONFLICT: 19737 status = EACCES; 19738 break; 19739 case STATUS_CHECK: 19740 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19741 (scsi_sense_key((uint8_t *)&sense_buf) == 19742 KEY_ILLEGAL_REQUEST) && 19743 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19744 /* 19745 * ASC 0x24: INVALID FIELD IN CDB 19746 */ 19747 switch (page_code) { 19748 case START_STOP_CYCLE_PAGE: 19749 /* 19750 * The start stop cycle counter is 19751 * implemented as page 0x31 in earlier 19752 * generation disks. In new generation 19753 * disks the start stop cycle counter is 19754 * implemented as page 0xE. To properly 19755 * handle this case if an attempt for 19756 * log page 0xE is made and fails we 19757 * will try again using page 0x31. 19758 * 19759 * Network storage BU committed to 19760 * maintain the page 0x31 for this 19761 * purpose and will not have any other 19762 * page implemented with page code 0x31 19763 * until all disks transition to the 19764 * standard page. 19765 */ 19766 mutex_enter(SD_MUTEX(un)); 19767 un->un_start_stop_cycle_page = 19768 START_STOP_CYCLE_VU_PAGE; 19769 cdb.cdb_opaque[2] = 19770 (char)(page_control << 6) | 19771 un->un_start_stop_cycle_page; 19772 mutex_exit(SD_MUTEX(un)); 19773 status = sd_send_scsi_cmd( 19774 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19775 UIO_SYSSPACE, path_flag); 19776 19777 break; 19778 case TEMPERATURE_PAGE: 19779 status = ENOTTY; 19780 break; 19781 default: 19782 break; 19783 } 19784 } 19785 break; 19786 default: 19787 break; 19788 } 19789 break; 19790 default: 19791 break; 19792 } 19793 19794 if (status == 0) { 19795 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19796 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19797 } 19798 19799 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19800 19801 return (status); 19802 } 19803 19804 19805 /* 19806 * Function: sdioctl 19807 * 19808 * Description: Driver's ioctl(9e) entry point function. 19809 * 19810 * Arguments: dev - device number 19811 * cmd - ioctl operation to be performed 19812 * arg - user argument, contains data to be set or reference 19813 * parameter for get 19814 * flag - bit flag, indicating open settings, 32/64 bit type 19815 * cred_p - user credential pointer 19816 * rval_p - calling process return value (OPT) 19817 * 19818 * Return Code: EINVAL 19819 * ENOTTY 19820 * ENXIO 19821 * EIO 19822 * EFAULT 19823 * ENOTSUP 19824 * EPERM 19825 * 19826 * Context: Called from the device switch at normal priority. 19827 */ 19828 19829 static int 19830 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19831 { 19832 struct sd_lun *un = NULL; 19833 int err = 0; 19834 int i = 0; 19835 cred_t *cr; 19836 int tmprval = EINVAL; 19837 int is_valid; 19838 19839 /* 19840 * All device accesses go thru sdstrategy where we check on suspend 19841 * status 19842 */ 19843 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19844 return (ENXIO); 19845 } 19846 19847 ASSERT(!mutex_owned(SD_MUTEX(un))); 19848 19849 19850 is_valid = SD_IS_VALID_LABEL(un); 19851 19852 /* 19853 * Moved this wait from sd_uscsi_strategy to here for 19854 * reasons of deadlock prevention. Internal driver commands, 19855 * specifically those to change a devices power level, result 19856 * in a call to sd_uscsi_strategy. 19857 */ 19858 mutex_enter(SD_MUTEX(un)); 19859 while ((un->un_state == SD_STATE_SUSPENDED) || 19860 (un->un_state == SD_STATE_PM_CHANGING)) { 19861 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19862 } 19863 /* 19864 * Twiddling the counter here protects commands from now 19865 * through to the top of sd_uscsi_strategy. Without the 19866 * counter inc. a power down, for example, could get in 19867 * after the above check for state is made and before 19868 * execution gets to the top of sd_uscsi_strategy. 19869 * That would cause problems. 19870 */ 19871 un->un_ncmds_in_driver++; 19872 19873 if (!is_valid && 19874 (flag & (FNDELAY | FNONBLOCK))) { 19875 switch (cmd) { 19876 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19877 case DKIOCGVTOC: 19878 case DKIOCGAPART: 19879 case DKIOCPARTINFO: 19880 case DKIOCSGEOM: 19881 case DKIOCSAPART: 19882 case DKIOCGETEFI: 19883 case DKIOCPARTITION: 19884 case DKIOCSVTOC: 19885 case DKIOCSETEFI: 19886 case DKIOCGMBOOT: 19887 case DKIOCSMBOOT: 19888 case DKIOCG_PHYGEOM: 19889 case DKIOCG_VIRTGEOM: 19890 /* let cmlb handle it */ 19891 goto skip_ready_valid; 19892 19893 case CDROMPAUSE: 19894 case CDROMRESUME: 19895 case CDROMPLAYMSF: 19896 case CDROMPLAYTRKIND: 19897 case CDROMREADTOCHDR: 19898 case CDROMREADTOCENTRY: 19899 case CDROMSTOP: 19900 case CDROMSTART: 19901 case CDROMVOLCTRL: 19902 case CDROMSUBCHNL: 19903 case CDROMREADMODE2: 19904 case CDROMREADMODE1: 19905 case CDROMREADOFFSET: 19906 case CDROMSBLKMODE: 19907 case CDROMGBLKMODE: 19908 case CDROMGDRVSPEED: 19909 case CDROMSDRVSPEED: 19910 case CDROMCDDA: 19911 case CDROMCDXA: 19912 case CDROMSUBCODE: 19913 if (!ISCD(un)) { 19914 un->un_ncmds_in_driver--; 19915 ASSERT(un->un_ncmds_in_driver >= 0); 19916 mutex_exit(SD_MUTEX(un)); 19917 return (ENOTTY); 19918 } 19919 break; 19920 case FDEJECT: 19921 case DKIOCEJECT: 19922 case CDROMEJECT: 19923 if (!un->un_f_eject_media_supported) { 19924 un->un_ncmds_in_driver--; 19925 ASSERT(un->un_ncmds_in_driver >= 0); 19926 mutex_exit(SD_MUTEX(un)); 19927 return (ENOTTY); 19928 } 19929 break; 19930 case DKIOCFLUSHWRITECACHE: 19931 mutex_exit(SD_MUTEX(un)); 19932 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19933 if (err != 0) { 19934 mutex_enter(SD_MUTEX(un)); 19935 un->un_ncmds_in_driver--; 19936 ASSERT(un->un_ncmds_in_driver >= 0); 19937 mutex_exit(SD_MUTEX(un)); 19938 return (EIO); 19939 } 19940 mutex_enter(SD_MUTEX(un)); 19941 /* FALLTHROUGH */ 19942 case DKIOCREMOVABLE: 19943 case DKIOCHOTPLUGGABLE: 19944 case DKIOCINFO: 19945 case DKIOCGMEDIAINFO: 19946 case MHIOCENFAILFAST: 19947 case MHIOCSTATUS: 19948 case MHIOCTKOWN: 19949 case MHIOCRELEASE: 19950 case MHIOCGRP_INKEYS: 19951 case MHIOCGRP_INRESV: 19952 case MHIOCGRP_REGISTER: 19953 case MHIOCGRP_RESERVE: 19954 case MHIOCGRP_PREEMPTANDABORT: 19955 case MHIOCGRP_REGISTERANDIGNOREKEY: 19956 case CDROMCLOSETRAY: 19957 case USCSICMD: 19958 goto skip_ready_valid; 19959 default: 19960 break; 19961 } 19962 19963 mutex_exit(SD_MUTEX(un)); 19964 err = sd_ready_and_valid(un); 19965 mutex_enter(SD_MUTEX(un)); 19966 19967 if (err != SD_READY_VALID) { 19968 switch (cmd) { 19969 case DKIOCSTATE: 19970 case CDROMGDRVSPEED: 19971 case CDROMSDRVSPEED: 19972 case FDEJECT: /* for eject command */ 19973 case DKIOCEJECT: 19974 case CDROMEJECT: 19975 case DKIOCREMOVABLE: 19976 case DKIOCHOTPLUGGABLE: 19977 break; 19978 default: 19979 if (un->un_f_has_removable_media) { 19980 err = ENXIO; 19981 } else { 19982 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19983 if (err == SD_RESERVED_BY_OTHERS) { 19984 err = EACCES; 19985 } else { 19986 err = EIO; 19987 } 19988 } 19989 un->un_ncmds_in_driver--; 19990 ASSERT(un->un_ncmds_in_driver >= 0); 19991 mutex_exit(SD_MUTEX(un)); 19992 return (err); 19993 } 19994 } 19995 } 19996 19997 skip_ready_valid: 19998 mutex_exit(SD_MUTEX(un)); 19999 20000 switch (cmd) { 20001 case DKIOCINFO: 20002 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20003 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20004 break; 20005 20006 case DKIOCGMEDIAINFO: 20007 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20008 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20009 break; 20010 20011 case DKIOCGGEOM: 20012 case DKIOCGVTOC: 20013 case DKIOCGAPART: 20014 case DKIOCPARTINFO: 20015 case DKIOCSGEOM: 20016 case DKIOCSAPART: 20017 case DKIOCGETEFI: 20018 case DKIOCPARTITION: 20019 case DKIOCSVTOC: 20020 case DKIOCSETEFI: 20021 case DKIOCGMBOOT: 20022 case DKIOCSMBOOT: 20023 case DKIOCG_PHYGEOM: 20024 case DKIOCG_VIRTGEOM: 20025 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20026 20027 /* TUR should spin up */ 20028 20029 if (un->un_f_has_removable_media) 20030 err = sd_send_scsi_TEST_UNIT_READY(un, 20031 SD_CHECK_FOR_MEDIA); 20032 else 20033 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20034 20035 if (err != 0) 20036 break; 20037 20038 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20039 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20040 20041 if ((err == 0) && 20042 ((cmd == DKIOCSETEFI) || 20043 (un->un_f_pkstats_enabled) && 20044 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20045 20046 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20047 (void *)SD_PATH_DIRECT); 20048 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20049 sd_set_pstats(un); 20050 SD_TRACE(SD_LOG_IO_PARTITION, un, 20051 "sd_ioctl: un:0x%p pstats created and " 20052 "set\n", un); 20053 } 20054 } 20055 20056 if ((cmd == DKIOCSVTOC) || 20057 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20058 20059 mutex_enter(SD_MUTEX(un)); 20060 if (un->un_f_devid_supported && 20061 (un->un_f_opt_fab_devid == TRUE)) { 20062 if (un->un_devid == NULL) { 20063 sd_register_devid(un, SD_DEVINFO(un), 20064 SD_TARGET_IS_UNRESERVED); 20065 } else { 20066 /* 20067 * The device id for this disk 20068 * has been fabricated. The 20069 * device id must be preserved 20070 * by writing it back out to 20071 * disk. 20072 */ 20073 if (sd_write_deviceid(un) != 0) { 20074 ddi_devid_free(un->un_devid); 20075 un->un_devid = NULL; 20076 } 20077 } 20078 } 20079 mutex_exit(SD_MUTEX(un)); 20080 } 20081 20082 break; 20083 20084 case DKIOCLOCK: 20085 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20086 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20087 SD_PATH_STANDARD); 20088 break; 20089 20090 case DKIOCUNLOCK: 20091 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20092 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20093 SD_PATH_STANDARD); 20094 break; 20095 20096 case DKIOCSTATE: { 20097 enum dkio_state state; 20098 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20099 20100 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20101 err = EFAULT; 20102 } else { 20103 err = sd_check_media(dev, state); 20104 if (err == 0) { 20105 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20106 sizeof (int), flag) != 0) 20107 err = EFAULT; 20108 } 20109 } 20110 break; 20111 } 20112 20113 case DKIOCREMOVABLE: 20114 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20115 i = un->un_f_has_removable_media ? 1 : 0; 20116 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20117 err = EFAULT; 20118 } else { 20119 err = 0; 20120 } 20121 break; 20122 20123 case DKIOCHOTPLUGGABLE: 20124 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20125 i = un->un_f_is_hotpluggable ? 1 : 0; 20126 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20127 err = EFAULT; 20128 } else { 20129 err = 0; 20130 } 20131 break; 20132 20133 case DKIOCGTEMPERATURE: 20134 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20135 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20136 break; 20137 20138 case MHIOCENFAILFAST: 20139 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20140 if ((err = drv_priv(cred_p)) == 0) { 20141 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20142 } 20143 break; 20144 20145 case MHIOCTKOWN: 20146 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20147 if ((err = drv_priv(cred_p)) == 0) { 20148 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20149 } 20150 break; 20151 20152 case MHIOCRELEASE: 20153 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20154 if ((err = drv_priv(cred_p)) == 0) { 20155 err = sd_mhdioc_release(dev); 20156 } 20157 break; 20158 20159 case MHIOCSTATUS: 20160 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20161 if ((err = drv_priv(cred_p)) == 0) { 20162 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20163 case 0: 20164 err = 0; 20165 break; 20166 case EACCES: 20167 *rval_p = 1; 20168 err = 0; 20169 break; 20170 default: 20171 err = EIO; 20172 break; 20173 } 20174 } 20175 break; 20176 20177 case MHIOCQRESERVE: 20178 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20179 if ((err = drv_priv(cred_p)) == 0) { 20180 err = sd_reserve_release(dev, SD_RESERVE); 20181 } 20182 break; 20183 20184 case MHIOCREREGISTERDEVID: 20185 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20186 if (drv_priv(cred_p) == EPERM) { 20187 err = EPERM; 20188 } else if (!un->un_f_devid_supported) { 20189 err = ENOTTY; 20190 } else { 20191 err = sd_mhdioc_register_devid(dev); 20192 } 20193 break; 20194 20195 case MHIOCGRP_INKEYS: 20196 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20197 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20198 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20199 err = ENOTSUP; 20200 } else { 20201 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20202 flag); 20203 } 20204 } 20205 break; 20206 20207 case MHIOCGRP_INRESV: 20208 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20209 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20210 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20211 err = ENOTSUP; 20212 } else { 20213 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20214 } 20215 } 20216 break; 20217 20218 case MHIOCGRP_REGISTER: 20219 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20220 if ((err = drv_priv(cred_p)) != EPERM) { 20221 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20222 err = ENOTSUP; 20223 } else if (arg != NULL) { 20224 mhioc_register_t reg; 20225 if (ddi_copyin((void *)arg, ®, 20226 sizeof (mhioc_register_t), flag) != 0) { 20227 err = EFAULT; 20228 } else { 20229 err = 20230 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20231 un, SD_SCSI3_REGISTER, 20232 (uchar_t *)®); 20233 } 20234 } 20235 } 20236 break; 20237 20238 case MHIOCGRP_RESERVE: 20239 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20240 if ((err = drv_priv(cred_p)) != EPERM) { 20241 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20242 err = ENOTSUP; 20243 } else if (arg != NULL) { 20244 mhioc_resv_desc_t resv_desc; 20245 if (ddi_copyin((void *)arg, &resv_desc, 20246 sizeof (mhioc_resv_desc_t), flag) != 0) { 20247 err = EFAULT; 20248 } else { 20249 err = 20250 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20251 un, SD_SCSI3_RESERVE, 20252 (uchar_t *)&resv_desc); 20253 } 20254 } 20255 } 20256 break; 20257 20258 case MHIOCGRP_PREEMPTANDABORT: 20259 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20260 if ((err = drv_priv(cred_p)) != EPERM) { 20261 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20262 err = ENOTSUP; 20263 } else if (arg != NULL) { 20264 mhioc_preemptandabort_t preempt_abort; 20265 if (ddi_copyin((void *)arg, &preempt_abort, 20266 sizeof (mhioc_preemptandabort_t), 20267 flag) != 0) { 20268 err = EFAULT; 20269 } else { 20270 err = 20271 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20272 un, SD_SCSI3_PREEMPTANDABORT, 20273 (uchar_t *)&preempt_abort); 20274 } 20275 } 20276 } 20277 break; 20278 20279 case MHIOCGRP_REGISTERANDIGNOREKEY: 20280 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20281 if ((err = drv_priv(cred_p)) != EPERM) { 20282 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20283 err = ENOTSUP; 20284 } else if (arg != NULL) { 20285 mhioc_registerandignorekey_t r_and_i; 20286 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20287 sizeof (mhioc_registerandignorekey_t), 20288 flag) != 0) { 20289 err = EFAULT; 20290 } else { 20291 err = 20292 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20293 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20294 (uchar_t *)&r_and_i); 20295 } 20296 } 20297 } 20298 break; 20299 20300 case USCSICMD: 20301 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20302 cr = ddi_get_cred(); 20303 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20304 err = EPERM; 20305 } else { 20306 enum uio_seg uioseg; 20307 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20308 UIO_USERSPACE; 20309 if (un->un_f_format_in_progress == TRUE) { 20310 err = EAGAIN; 20311 break; 20312 } 20313 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20314 flag, uioseg, SD_PATH_STANDARD); 20315 } 20316 break; 20317 20318 case CDROMPAUSE: 20319 case CDROMRESUME: 20320 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20321 if (!ISCD(un)) { 20322 err = ENOTTY; 20323 } else { 20324 err = sr_pause_resume(dev, cmd); 20325 } 20326 break; 20327 20328 case CDROMPLAYMSF: 20329 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20330 if (!ISCD(un)) { 20331 err = ENOTTY; 20332 } else { 20333 err = sr_play_msf(dev, (caddr_t)arg, flag); 20334 } 20335 break; 20336 20337 case CDROMPLAYTRKIND: 20338 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20339 #if defined(__i386) || defined(__amd64) 20340 /* 20341 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20342 */ 20343 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20344 #else 20345 if (!ISCD(un)) { 20346 #endif 20347 err = ENOTTY; 20348 } else { 20349 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20350 } 20351 break; 20352 20353 case CDROMREADTOCHDR: 20354 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20355 if (!ISCD(un)) { 20356 err = ENOTTY; 20357 } else { 20358 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20359 } 20360 break; 20361 20362 case CDROMREADTOCENTRY: 20363 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20364 if (!ISCD(un)) { 20365 err = ENOTTY; 20366 } else { 20367 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20368 } 20369 break; 20370 20371 case CDROMSTOP: 20372 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20373 if (!ISCD(un)) { 20374 err = ENOTTY; 20375 } else { 20376 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20377 SD_PATH_STANDARD); 20378 } 20379 break; 20380 20381 case CDROMSTART: 20382 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20383 if (!ISCD(un)) { 20384 err = ENOTTY; 20385 } else { 20386 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20387 SD_PATH_STANDARD); 20388 } 20389 break; 20390 20391 case CDROMCLOSETRAY: 20392 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20393 if (!ISCD(un)) { 20394 err = ENOTTY; 20395 } else { 20396 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20397 SD_PATH_STANDARD); 20398 } 20399 break; 20400 20401 case FDEJECT: /* for eject command */ 20402 case DKIOCEJECT: 20403 case CDROMEJECT: 20404 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20405 if (!un->un_f_eject_media_supported) { 20406 err = ENOTTY; 20407 } else { 20408 err = sr_eject(dev); 20409 } 20410 break; 20411 20412 case CDROMVOLCTRL: 20413 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20414 if (!ISCD(un)) { 20415 err = ENOTTY; 20416 } else { 20417 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20418 } 20419 break; 20420 20421 case CDROMSUBCHNL: 20422 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20423 if (!ISCD(un)) { 20424 err = ENOTTY; 20425 } else { 20426 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20427 } 20428 break; 20429 20430 case CDROMREADMODE2: 20431 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20432 if (!ISCD(un)) { 20433 err = ENOTTY; 20434 } else if (un->un_f_cfg_is_atapi == TRUE) { 20435 /* 20436 * If the drive supports READ CD, use that instead of 20437 * switching the LBA size via a MODE SELECT 20438 * Block Descriptor 20439 */ 20440 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20441 } else { 20442 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20443 } 20444 break; 20445 20446 case CDROMREADMODE1: 20447 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20448 if (!ISCD(un)) { 20449 err = ENOTTY; 20450 } else { 20451 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20452 } 20453 break; 20454 20455 case CDROMREADOFFSET: 20456 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20457 if (!ISCD(un)) { 20458 err = ENOTTY; 20459 } else { 20460 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20461 flag); 20462 } 20463 break; 20464 20465 case CDROMSBLKMODE: 20466 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20467 /* 20468 * There is no means of changing block size in case of atapi 20469 * drives, thus return ENOTTY if drive type is atapi 20470 */ 20471 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20472 err = ENOTTY; 20473 } else if (un->un_f_mmc_cap == TRUE) { 20474 20475 /* 20476 * MMC Devices do not support changing the 20477 * logical block size 20478 * 20479 * Note: EINVAL is being returned instead of ENOTTY to 20480 * maintain consistancy with the original mmc 20481 * driver update. 20482 */ 20483 err = EINVAL; 20484 } else { 20485 mutex_enter(SD_MUTEX(un)); 20486 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20487 (un->un_ncmds_in_transport > 0)) { 20488 mutex_exit(SD_MUTEX(un)); 20489 err = EINVAL; 20490 } else { 20491 mutex_exit(SD_MUTEX(un)); 20492 err = sr_change_blkmode(dev, cmd, arg, flag); 20493 } 20494 } 20495 break; 20496 20497 case CDROMGBLKMODE: 20498 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20499 if (!ISCD(un)) { 20500 err = ENOTTY; 20501 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20502 (un->un_f_blockcount_is_valid != FALSE)) { 20503 /* 20504 * Drive is an ATAPI drive so return target block 20505 * size for ATAPI drives since we cannot change the 20506 * blocksize on ATAPI drives. Used primarily to detect 20507 * if an ATAPI cdrom is present. 20508 */ 20509 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20510 sizeof (int), flag) != 0) { 20511 err = EFAULT; 20512 } else { 20513 err = 0; 20514 } 20515 20516 } else { 20517 /* 20518 * Drive supports changing block sizes via a Mode 20519 * Select. 20520 */ 20521 err = sr_change_blkmode(dev, cmd, arg, flag); 20522 } 20523 break; 20524 20525 case CDROMGDRVSPEED: 20526 case CDROMSDRVSPEED: 20527 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20528 if (!ISCD(un)) { 20529 err = ENOTTY; 20530 } else if (un->un_f_mmc_cap == TRUE) { 20531 /* 20532 * Note: In the future the driver implementation 20533 * for getting and 20534 * setting cd speed should entail: 20535 * 1) If non-mmc try the Toshiba mode page 20536 * (sr_change_speed) 20537 * 2) If mmc but no support for Real Time Streaming try 20538 * the SET CD SPEED (0xBB) command 20539 * (sr_atapi_change_speed) 20540 * 3) If mmc and support for Real Time Streaming 20541 * try the GET PERFORMANCE and SET STREAMING 20542 * commands (not yet implemented, 4380808) 20543 */ 20544 /* 20545 * As per recent MMC spec, CD-ROM speed is variable 20546 * and changes with LBA. Since there is no such 20547 * things as drive speed now, fail this ioctl. 20548 * 20549 * Note: EINVAL is returned for consistancy of original 20550 * implementation which included support for getting 20551 * the drive speed of mmc devices but not setting 20552 * the drive speed. Thus EINVAL would be returned 20553 * if a set request was made for an mmc device. 20554 * We no longer support get or set speed for 20555 * mmc but need to remain consistent with regard 20556 * to the error code returned. 20557 */ 20558 err = EINVAL; 20559 } else if (un->un_f_cfg_is_atapi == TRUE) { 20560 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20561 } else { 20562 err = sr_change_speed(dev, cmd, arg, flag); 20563 } 20564 break; 20565 20566 case CDROMCDDA: 20567 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20568 if (!ISCD(un)) { 20569 err = ENOTTY; 20570 } else { 20571 err = sr_read_cdda(dev, (void *)arg, flag); 20572 } 20573 break; 20574 20575 case CDROMCDXA: 20576 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20577 if (!ISCD(un)) { 20578 err = ENOTTY; 20579 } else { 20580 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20581 } 20582 break; 20583 20584 case CDROMSUBCODE: 20585 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20586 if (!ISCD(un)) { 20587 err = ENOTTY; 20588 } else { 20589 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20590 } 20591 break; 20592 20593 20594 #ifdef SDDEBUG 20595 /* RESET/ABORTS testing ioctls */ 20596 case DKIOCRESET: { 20597 int reset_level; 20598 20599 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20600 err = EFAULT; 20601 } else { 20602 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20603 "reset_level = 0x%lx\n", reset_level); 20604 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20605 err = 0; 20606 } else { 20607 err = EIO; 20608 } 20609 } 20610 break; 20611 } 20612 20613 case DKIOCABORT: 20614 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20615 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20616 err = 0; 20617 } else { 20618 err = EIO; 20619 } 20620 break; 20621 #endif 20622 20623 #ifdef SD_FAULT_INJECTION 20624 /* SDIOC FaultInjection testing ioctls */ 20625 case SDIOCSTART: 20626 case SDIOCSTOP: 20627 case SDIOCINSERTPKT: 20628 case SDIOCINSERTXB: 20629 case SDIOCINSERTUN: 20630 case SDIOCINSERTARQ: 20631 case SDIOCPUSH: 20632 case SDIOCRETRIEVE: 20633 case SDIOCRUN: 20634 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20635 "SDIOC detected cmd:0x%X:\n", cmd); 20636 /* call error generator */ 20637 sd_faultinjection_ioctl(cmd, arg, un); 20638 err = 0; 20639 break; 20640 20641 #endif /* SD_FAULT_INJECTION */ 20642 20643 case DKIOCFLUSHWRITECACHE: 20644 { 20645 struct dk_callback *dkc = (struct dk_callback *)arg; 20646 20647 mutex_enter(SD_MUTEX(un)); 20648 if (!un->un_f_sync_cache_supported || 20649 !un->un_f_write_cache_enabled) { 20650 err = un->un_f_sync_cache_supported ? 20651 0 : ENOTSUP; 20652 mutex_exit(SD_MUTEX(un)); 20653 if ((flag & FKIOCTL) && dkc != NULL && 20654 dkc->dkc_callback != NULL) { 20655 (*dkc->dkc_callback)(dkc->dkc_cookie, 20656 err); 20657 /* 20658 * Did callback and reported error. 20659 * Since we did a callback, ioctl 20660 * should return 0. 20661 */ 20662 err = 0; 20663 } 20664 break; 20665 } 20666 mutex_exit(SD_MUTEX(un)); 20667 20668 if ((flag & FKIOCTL) && dkc != NULL && 20669 dkc->dkc_callback != NULL) { 20670 /* async SYNC CACHE request */ 20671 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20672 } else { 20673 /* synchronous SYNC CACHE request */ 20674 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20675 } 20676 } 20677 break; 20678 20679 case DKIOCGETWCE: { 20680 20681 int wce; 20682 20683 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20684 break; 20685 } 20686 20687 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20688 err = EFAULT; 20689 } 20690 break; 20691 } 20692 20693 case DKIOCSETWCE: { 20694 20695 int wce, sync_supported; 20696 20697 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20698 err = EFAULT; 20699 break; 20700 } 20701 20702 /* 20703 * Synchronize multiple threads trying to enable 20704 * or disable the cache via the un_f_wcc_cv 20705 * condition variable. 20706 */ 20707 mutex_enter(SD_MUTEX(un)); 20708 20709 /* 20710 * Don't allow the cache to be enabled if the 20711 * config file has it disabled. 20712 */ 20713 if (un->un_f_opt_disable_cache && wce) { 20714 mutex_exit(SD_MUTEX(un)); 20715 err = EINVAL; 20716 break; 20717 } 20718 20719 /* 20720 * Wait for write cache change in progress 20721 * bit to be clear before proceeding. 20722 */ 20723 while (un->un_f_wcc_inprog) 20724 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20725 20726 un->un_f_wcc_inprog = 1; 20727 20728 if (un->un_f_write_cache_enabled && wce == 0) { 20729 /* 20730 * Disable the write cache. Don't clear 20731 * un_f_write_cache_enabled until after 20732 * the mode select and flush are complete. 20733 */ 20734 sync_supported = un->un_f_sync_cache_supported; 20735 20736 /* 20737 * If cache flush is suppressed, we assume that the 20738 * controller firmware will take care of managing the 20739 * write cache for us: no need to explicitly 20740 * disable it. 20741 */ 20742 if (!un->un_f_suppress_cache_flush) { 20743 mutex_exit(SD_MUTEX(un)); 20744 if ((err = sd_cache_control(un, 20745 SD_CACHE_NOCHANGE, 20746 SD_CACHE_DISABLE)) == 0 && 20747 sync_supported) { 20748 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20749 NULL); 20750 } 20751 } else { 20752 mutex_exit(SD_MUTEX(un)); 20753 } 20754 20755 mutex_enter(SD_MUTEX(un)); 20756 if (err == 0) { 20757 un->un_f_write_cache_enabled = 0; 20758 } 20759 20760 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20761 /* 20762 * Set un_f_write_cache_enabled first, so there is 20763 * no window where the cache is enabled, but the 20764 * bit says it isn't. 20765 */ 20766 un->un_f_write_cache_enabled = 1; 20767 20768 /* 20769 * If cache flush is suppressed, we assume that the 20770 * controller firmware will take care of managing the 20771 * write cache for us: no need to explicitly 20772 * enable it. 20773 */ 20774 if (!un->un_f_suppress_cache_flush) { 20775 mutex_exit(SD_MUTEX(un)); 20776 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20777 SD_CACHE_ENABLE); 20778 } else { 20779 mutex_exit(SD_MUTEX(un)); 20780 } 20781 20782 mutex_enter(SD_MUTEX(un)); 20783 20784 if (err) { 20785 un->un_f_write_cache_enabled = 0; 20786 } 20787 } 20788 20789 un->un_f_wcc_inprog = 0; 20790 cv_broadcast(&un->un_wcc_cv); 20791 mutex_exit(SD_MUTEX(un)); 20792 break; 20793 } 20794 20795 default: 20796 err = ENOTTY; 20797 break; 20798 } 20799 mutex_enter(SD_MUTEX(un)); 20800 un->un_ncmds_in_driver--; 20801 ASSERT(un->un_ncmds_in_driver >= 0); 20802 mutex_exit(SD_MUTEX(un)); 20803 20804 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20805 return (err); 20806 } 20807 20808 20809 /* 20810 * Function: sd_dkio_ctrl_info 20811 * 20812 * Description: This routine is the driver entry point for handling controller 20813 * information ioctl requests (DKIOCINFO). 20814 * 20815 * Arguments: dev - the device number 20816 * arg - pointer to user provided dk_cinfo structure 20817 * specifying the controller type and attributes. 20818 * flag - this argument is a pass through to ddi_copyxxx() 20819 * directly from the mode argument of ioctl(). 20820 * 20821 * Return Code: 0 20822 * EFAULT 20823 * ENXIO 20824 */ 20825 20826 static int 20827 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20828 { 20829 struct sd_lun *un = NULL; 20830 struct dk_cinfo *info; 20831 dev_info_t *pdip; 20832 int lun, tgt; 20833 20834 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20835 return (ENXIO); 20836 } 20837 20838 info = (struct dk_cinfo *) 20839 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20840 20841 switch (un->un_ctype) { 20842 case CTYPE_CDROM: 20843 info->dki_ctype = DKC_CDROM; 20844 break; 20845 default: 20846 info->dki_ctype = DKC_SCSI_CCS; 20847 break; 20848 } 20849 pdip = ddi_get_parent(SD_DEVINFO(un)); 20850 info->dki_cnum = ddi_get_instance(pdip); 20851 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20852 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20853 } else { 20854 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20855 DK_DEVLEN - 1); 20856 } 20857 20858 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20859 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20860 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20861 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20862 20863 /* Unit Information */ 20864 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20865 info->dki_slave = ((tgt << 3) | lun); 20866 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20867 DK_DEVLEN - 1); 20868 info->dki_flags = DKI_FMTVOL; 20869 info->dki_partition = SDPART(dev); 20870 20871 /* Max Transfer size of this device in blocks */ 20872 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20873 info->dki_addr = 0; 20874 info->dki_space = 0; 20875 info->dki_prio = 0; 20876 info->dki_vec = 0; 20877 20878 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20879 kmem_free(info, sizeof (struct dk_cinfo)); 20880 return (EFAULT); 20881 } else { 20882 kmem_free(info, sizeof (struct dk_cinfo)); 20883 return (0); 20884 } 20885 } 20886 20887 20888 /* 20889 * Function: sd_get_media_info 20890 * 20891 * Description: This routine is the driver entry point for handling ioctl 20892 * requests for the media type or command set profile used by the 20893 * drive to operate on the media (DKIOCGMEDIAINFO). 20894 * 20895 * Arguments: dev - the device number 20896 * arg - pointer to user provided dk_minfo structure 20897 * specifying the media type, logical block size and 20898 * drive capacity. 20899 * flag - this argument is a pass through to ddi_copyxxx() 20900 * directly from the mode argument of ioctl(). 20901 * 20902 * Return Code: 0 20903 * EACCESS 20904 * EFAULT 20905 * ENXIO 20906 * EIO 20907 */ 20908 20909 static int 20910 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20911 { 20912 struct sd_lun *un = NULL; 20913 struct uscsi_cmd com; 20914 struct scsi_inquiry *sinq; 20915 struct dk_minfo media_info; 20916 u_longlong_t media_capacity; 20917 uint64_t capacity; 20918 uint_t lbasize; 20919 uchar_t *out_data; 20920 uchar_t *rqbuf; 20921 int rval = 0; 20922 int rtn; 20923 20924 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20925 (un->un_state == SD_STATE_OFFLINE)) { 20926 return (ENXIO); 20927 } 20928 20929 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20930 20931 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20932 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20933 20934 /* Issue a TUR to determine if the drive is ready with media present */ 20935 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20936 if (rval == ENXIO) { 20937 goto done; 20938 } 20939 20940 /* Now get configuration data */ 20941 if (ISCD(un)) { 20942 media_info.dki_media_type = DK_CDROM; 20943 20944 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20945 if (un->un_f_mmc_cap == TRUE) { 20946 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20947 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20948 SD_PATH_STANDARD); 20949 20950 if (rtn) { 20951 /* 20952 * Failed for other than an illegal request 20953 * or command not supported 20954 */ 20955 if ((com.uscsi_status == STATUS_CHECK) && 20956 (com.uscsi_rqstatus == STATUS_GOOD)) { 20957 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20958 (rqbuf[12] != 0x20)) { 20959 rval = EIO; 20960 goto done; 20961 } 20962 } 20963 } else { 20964 /* 20965 * The GET CONFIGURATION command succeeded 20966 * so set the media type according to the 20967 * returned data 20968 */ 20969 media_info.dki_media_type = out_data[6]; 20970 media_info.dki_media_type <<= 8; 20971 media_info.dki_media_type |= out_data[7]; 20972 } 20973 } 20974 } else { 20975 /* 20976 * The profile list is not available, so we attempt to identify 20977 * the media type based on the inquiry data 20978 */ 20979 sinq = un->un_sd->sd_inq; 20980 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20981 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20982 /* This is a direct access device or optical disk */ 20983 media_info.dki_media_type = DK_FIXED_DISK; 20984 20985 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20986 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20987 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20988 media_info.dki_media_type = DK_ZIP; 20989 } else if ( 20990 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20991 media_info.dki_media_type = DK_JAZ; 20992 } 20993 } 20994 } else { 20995 /* 20996 * Not a CD, direct access or optical disk so return 20997 * unknown media 20998 */ 20999 media_info.dki_media_type = DK_UNKNOWN; 21000 } 21001 } 21002 21003 /* Now read the capacity so we can provide the lbasize and capacity */ 21004 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21005 SD_PATH_DIRECT)) { 21006 case 0: 21007 break; 21008 case EACCES: 21009 rval = EACCES; 21010 goto done; 21011 default: 21012 rval = EIO; 21013 goto done; 21014 } 21015 21016 /* 21017 * If lun is expanded dynamically, update the un structure. 21018 */ 21019 mutex_enter(SD_MUTEX(un)); 21020 if ((un->un_f_blockcount_is_valid == TRUE) && 21021 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21022 (capacity > un->un_blockcount)) { 21023 sd_update_block_info(un, lbasize, capacity); 21024 } 21025 mutex_exit(SD_MUTEX(un)); 21026 21027 media_info.dki_lbsize = lbasize; 21028 media_capacity = capacity; 21029 21030 /* 21031 * sd_send_scsi_READ_CAPACITY() reports capacity in 21032 * un->un_sys_blocksize chunks. So we need to convert it into 21033 * cap.lbasize chunks. 21034 */ 21035 media_capacity *= un->un_sys_blocksize; 21036 media_capacity /= lbasize; 21037 media_info.dki_capacity = media_capacity; 21038 21039 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21040 rval = EFAULT; 21041 /* Put goto. Anybody might add some code below in future */ 21042 goto done; 21043 } 21044 done: 21045 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21046 kmem_free(rqbuf, SENSE_LENGTH); 21047 return (rval); 21048 } 21049 21050 21051 /* 21052 * Function: sd_check_media 21053 * 21054 * Description: This utility routine implements the functionality for the 21055 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21056 * driver state changes from that specified by the user 21057 * (inserted or ejected). For example, if the user specifies 21058 * DKIO_EJECTED and the current media state is inserted this 21059 * routine will immediately return DKIO_INSERTED. However, if the 21060 * current media state is not inserted the user thread will be 21061 * blocked until the drive state changes. If DKIO_NONE is specified 21062 * the user thread will block until a drive state change occurs. 21063 * 21064 * Arguments: dev - the device number 21065 * state - user pointer to a dkio_state, updated with the current 21066 * drive state at return. 21067 * 21068 * Return Code: ENXIO 21069 * EIO 21070 * EAGAIN 21071 * EINTR 21072 */ 21073 21074 static int 21075 sd_check_media(dev_t dev, enum dkio_state state) 21076 { 21077 struct sd_lun *un = NULL; 21078 enum dkio_state prev_state; 21079 opaque_t token = NULL; 21080 int rval = 0; 21081 21082 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21083 return (ENXIO); 21084 } 21085 21086 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21087 21088 mutex_enter(SD_MUTEX(un)); 21089 21090 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21091 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21092 21093 prev_state = un->un_mediastate; 21094 21095 /* is there anything to do? */ 21096 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21097 /* 21098 * submit the request to the scsi_watch service; 21099 * scsi_media_watch_cb() does the real work 21100 */ 21101 mutex_exit(SD_MUTEX(un)); 21102 21103 /* 21104 * This change handles the case where a scsi watch request is 21105 * added to a device that is powered down. To accomplish this 21106 * we power up the device before adding the scsi watch request, 21107 * since the scsi watch sends a TUR directly to the device 21108 * which the device cannot handle if it is powered down. 21109 */ 21110 if (sd_pm_entry(un) != DDI_SUCCESS) { 21111 mutex_enter(SD_MUTEX(un)); 21112 goto done; 21113 } 21114 21115 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21116 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21117 (caddr_t)dev); 21118 21119 sd_pm_exit(un); 21120 21121 mutex_enter(SD_MUTEX(un)); 21122 if (token == NULL) { 21123 rval = EAGAIN; 21124 goto done; 21125 } 21126 21127 /* 21128 * This is a special case IOCTL that doesn't return 21129 * until the media state changes. Routine sdpower 21130 * knows about and handles this so don't count it 21131 * as an active cmd in the driver, which would 21132 * keep the device busy to the pm framework. 21133 * If the count isn't decremented the device can't 21134 * be powered down. 21135 */ 21136 un->un_ncmds_in_driver--; 21137 ASSERT(un->un_ncmds_in_driver >= 0); 21138 21139 /* 21140 * if a prior request had been made, this will be the same 21141 * token, as scsi_watch was designed that way. 21142 */ 21143 un->un_swr_token = token; 21144 un->un_specified_mediastate = state; 21145 21146 /* 21147 * now wait for media change 21148 * we will not be signalled unless mediastate == state but it is 21149 * still better to test for this condition, since there is a 21150 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21151 */ 21152 SD_TRACE(SD_LOG_COMMON, un, 21153 "sd_check_media: waiting for media state change\n"); 21154 while (un->un_mediastate == state) { 21155 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21156 SD_TRACE(SD_LOG_COMMON, un, 21157 "sd_check_media: waiting for media state " 21158 "was interrupted\n"); 21159 un->un_ncmds_in_driver++; 21160 rval = EINTR; 21161 goto done; 21162 } 21163 SD_TRACE(SD_LOG_COMMON, un, 21164 "sd_check_media: received signal, state=%x\n", 21165 un->un_mediastate); 21166 } 21167 /* 21168 * Inc the counter to indicate the device once again 21169 * has an active outstanding cmd. 21170 */ 21171 un->un_ncmds_in_driver++; 21172 } 21173 21174 /* invalidate geometry */ 21175 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21176 sr_ejected(un); 21177 } 21178 21179 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21180 uint64_t capacity; 21181 uint_t lbasize; 21182 21183 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21184 mutex_exit(SD_MUTEX(un)); 21185 /* 21186 * Since the following routines use SD_PATH_DIRECT, we must 21187 * call PM directly before the upcoming disk accesses. This 21188 * may cause the disk to be power/spin up. 21189 */ 21190 21191 if (sd_pm_entry(un) == DDI_SUCCESS) { 21192 rval = sd_send_scsi_READ_CAPACITY(un, 21193 &capacity, 21194 &lbasize, SD_PATH_DIRECT); 21195 if (rval != 0) { 21196 sd_pm_exit(un); 21197 mutex_enter(SD_MUTEX(un)); 21198 goto done; 21199 } 21200 } else { 21201 rval = EIO; 21202 mutex_enter(SD_MUTEX(un)); 21203 goto done; 21204 } 21205 mutex_enter(SD_MUTEX(un)); 21206 21207 sd_update_block_info(un, lbasize, capacity); 21208 21209 /* 21210 * Check if the media in the device is writable or not 21211 */ 21212 if (ISCD(un)) 21213 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21214 21215 mutex_exit(SD_MUTEX(un)); 21216 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21217 if ((cmlb_validate(un->un_cmlbhandle, 0, 21218 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21219 sd_set_pstats(un); 21220 SD_TRACE(SD_LOG_IO_PARTITION, un, 21221 "sd_check_media: un:0x%p pstats created and " 21222 "set\n", un); 21223 } 21224 21225 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21226 SD_PATH_DIRECT); 21227 sd_pm_exit(un); 21228 21229 mutex_enter(SD_MUTEX(un)); 21230 } 21231 done: 21232 un->un_f_watcht_stopped = FALSE; 21233 if (un->un_swr_token) { 21234 /* 21235 * Use of this local token and the mutex ensures that we avoid 21236 * some race conditions associated with terminating the 21237 * scsi watch. 21238 */ 21239 token = un->un_swr_token; 21240 un->un_swr_token = (opaque_t)NULL; 21241 mutex_exit(SD_MUTEX(un)); 21242 (void) scsi_watch_request_terminate(token, 21243 SCSI_WATCH_TERMINATE_WAIT); 21244 mutex_enter(SD_MUTEX(un)); 21245 } 21246 21247 /* 21248 * Update the capacity kstat value, if no media previously 21249 * (capacity kstat is 0) and a media has been inserted 21250 * (un_f_blockcount_is_valid == TRUE) 21251 */ 21252 if (un->un_errstats) { 21253 struct sd_errstats *stp = NULL; 21254 21255 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21256 if ((stp->sd_capacity.value.ui64 == 0) && 21257 (un->un_f_blockcount_is_valid == TRUE)) { 21258 stp->sd_capacity.value.ui64 = 21259 (uint64_t)((uint64_t)un->un_blockcount * 21260 un->un_sys_blocksize); 21261 } 21262 } 21263 mutex_exit(SD_MUTEX(un)); 21264 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21265 return (rval); 21266 } 21267 21268 21269 /* 21270 * Function: sd_delayed_cv_broadcast 21271 * 21272 * Description: Delayed cv_broadcast to allow for target to recover from media 21273 * insertion. 21274 * 21275 * Arguments: arg - driver soft state (unit) structure 21276 */ 21277 21278 static void 21279 sd_delayed_cv_broadcast(void *arg) 21280 { 21281 struct sd_lun *un = arg; 21282 21283 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21284 21285 mutex_enter(SD_MUTEX(un)); 21286 un->un_dcvb_timeid = NULL; 21287 cv_broadcast(&un->un_state_cv); 21288 mutex_exit(SD_MUTEX(un)); 21289 } 21290 21291 21292 /* 21293 * Function: sd_media_watch_cb 21294 * 21295 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21296 * routine processes the TUR sense data and updates the driver 21297 * state if a transition has occurred. The user thread 21298 * (sd_check_media) is then signalled. 21299 * 21300 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21301 * among multiple watches that share this callback function 21302 * resultp - scsi watch facility result packet containing scsi 21303 * packet, status byte and sense data 21304 * 21305 * Return Code: 0 for success, -1 for failure 21306 */ 21307 21308 static int 21309 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21310 { 21311 struct sd_lun *un; 21312 struct scsi_status *statusp = resultp->statusp; 21313 uint8_t *sensep = (uint8_t *)resultp->sensep; 21314 enum dkio_state state = DKIO_NONE; 21315 dev_t dev = (dev_t)arg; 21316 uchar_t actual_sense_length; 21317 uint8_t skey, asc, ascq; 21318 21319 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21320 return (-1); 21321 } 21322 actual_sense_length = resultp->actual_sense_length; 21323 21324 mutex_enter(SD_MUTEX(un)); 21325 SD_TRACE(SD_LOG_COMMON, un, 21326 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21327 *((char *)statusp), (void *)sensep, actual_sense_length); 21328 21329 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21330 un->un_mediastate = DKIO_DEV_GONE; 21331 cv_broadcast(&un->un_state_cv); 21332 mutex_exit(SD_MUTEX(un)); 21333 21334 return (0); 21335 } 21336 21337 /* 21338 * If there was a check condition then sensep points to valid sense data 21339 * If status was not a check condition but a reservation or busy status 21340 * then the new state is DKIO_NONE 21341 */ 21342 if (sensep != NULL) { 21343 skey = scsi_sense_key(sensep); 21344 asc = scsi_sense_asc(sensep); 21345 ascq = scsi_sense_ascq(sensep); 21346 21347 SD_INFO(SD_LOG_COMMON, un, 21348 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21349 skey, asc, ascq); 21350 /* This routine only uses up to 13 bytes of sense data. */ 21351 if (actual_sense_length >= 13) { 21352 if (skey == KEY_UNIT_ATTENTION) { 21353 if (asc == 0x28) { 21354 state = DKIO_INSERTED; 21355 } 21356 } else if (skey == KEY_NOT_READY) { 21357 /* 21358 * if 02/04/02 means that the host 21359 * should send start command. Explicitly 21360 * leave the media state as is 21361 * (inserted) as the media is inserted 21362 * and host has stopped device for PM 21363 * reasons. Upon next true read/write 21364 * to this media will bring the 21365 * device to the right state good for 21366 * media access. 21367 */ 21368 if (asc == 0x3a) { 21369 state = DKIO_EJECTED; 21370 } else { 21371 /* 21372 * If the drive is busy with an 21373 * operation or long write, keep the 21374 * media in an inserted state. 21375 */ 21376 21377 if ((asc == 0x04) && 21378 ((ascq == 0x02) || 21379 (ascq == 0x07) || 21380 (ascq == 0x08))) { 21381 state = DKIO_INSERTED; 21382 } 21383 } 21384 } else if (skey == KEY_NO_SENSE) { 21385 if ((asc == 0x00) && (ascq == 0x00)) { 21386 /* 21387 * Sense Data 00/00/00 does not provide 21388 * any information about the state of 21389 * the media. Ignore it. 21390 */ 21391 mutex_exit(SD_MUTEX(un)); 21392 return (0); 21393 } 21394 } 21395 } 21396 } else if ((*((char *)statusp) == STATUS_GOOD) && 21397 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21398 state = DKIO_INSERTED; 21399 } 21400 21401 SD_TRACE(SD_LOG_COMMON, un, 21402 "sd_media_watch_cb: state=%x, specified=%x\n", 21403 state, un->un_specified_mediastate); 21404 21405 /* 21406 * now signal the waiting thread if this is *not* the specified state; 21407 * delay the signal if the state is DKIO_INSERTED to allow the target 21408 * to recover 21409 */ 21410 if (state != un->un_specified_mediastate) { 21411 un->un_mediastate = state; 21412 if (state == DKIO_INSERTED) { 21413 /* 21414 * delay the signal to give the drive a chance 21415 * to do what it apparently needs to do 21416 */ 21417 SD_TRACE(SD_LOG_COMMON, un, 21418 "sd_media_watch_cb: delayed cv_broadcast\n"); 21419 if (un->un_dcvb_timeid == NULL) { 21420 un->un_dcvb_timeid = 21421 timeout(sd_delayed_cv_broadcast, un, 21422 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21423 } 21424 } else { 21425 SD_TRACE(SD_LOG_COMMON, un, 21426 "sd_media_watch_cb: immediate cv_broadcast\n"); 21427 cv_broadcast(&un->un_state_cv); 21428 } 21429 } 21430 mutex_exit(SD_MUTEX(un)); 21431 return (0); 21432 } 21433 21434 21435 /* 21436 * Function: sd_dkio_get_temp 21437 * 21438 * Description: This routine is the driver entry point for handling ioctl 21439 * requests to get the disk temperature. 21440 * 21441 * Arguments: dev - the device number 21442 * arg - pointer to user provided dk_temperature structure. 21443 * flag - this argument is a pass through to ddi_copyxxx() 21444 * directly from the mode argument of ioctl(). 21445 * 21446 * Return Code: 0 21447 * EFAULT 21448 * ENXIO 21449 * EAGAIN 21450 */ 21451 21452 static int 21453 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21454 { 21455 struct sd_lun *un = NULL; 21456 struct dk_temperature *dktemp = NULL; 21457 uchar_t *temperature_page; 21458 int rval = 0; 21459 int path_flag = SD_PATH_STANDARD; 21460 21461 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21462 return (ENXIO); 21463 } 21464 21465 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21466 21467 /* copyin the disk temp argument to get the user flags */ 21468 if (ddi_copyin((void *)arg, dktemp, 21469 sizeof (struct dk_temperature), flag) != 0) { 21470 rval = EFAULT; 21471 goto done; 21472 } 21473 21474 /* Initialize the temperature to invalid. */ 21475 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21476 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21477 21478 /* 21479 * Note: Investigate removing the "bypass pm" semantic. 21480 * Can we just bypass PM always? 21481 */ 21482 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21483 path_flag = SD_PATH_DIRECT; 21484 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21485 mutex_enter(&un->un_pm_mutex); 21486 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21487 /* 21488 * If DKT_BYPASS_PM is set, and the drive happens to be 21489 * in low power mode, we can not wake it up, Need to 21490 * return EAGAIN. 21491 */ 21492 mutex_exit(&un->un_pm_mutex); 21493 rval = EAGAIN; 21494 goto done; 21495 } else { 21496 /* 21497 * Indicate to PM the device is busy. This is required 21498 * to avoid a race - i.e. the ioctl is issuing a 21499 * command and the pm framework brings down the device 21500 * to low power mode (possible power cut-off on some 21501 * platforms). 21502 */ 21503 mutex_exit(&un->un_pm_mutex); 21504 if (sd_pm_entry(un) != DDI_SUCCESS) { 21505 rval = EAGAIN; 21506 goto done; 21507 } 21508 } 21509 } 21510 21511 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21512 21513 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21514 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21515 goto done2; 21516 } 21517 21518 /* 21519 * For the current temperature verify that the parameter length is 0x02 21520 * and the parameter code is 0x00 21521 */ 21522 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21523 (temperature_page[5] == 0x00)) { 21524 if (temperature_page[9] == 0xFF) { 21525 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21526 } else { 21527 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21528 } 21529 } 21530 21531 /* 21532 * For the reference temperature verify that the parameter 21533 * length is 0x02 and the parameter code is 0x01 21534 */ 21535 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21536 (temperature_page[11] == 0x01)) { 21537 if (temperature_page[15] == 0xFF) { 21538 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21539 } else { 21540 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21541 } 21542 } 21543 21544 /* Do the copyout regardless of the temperature commands status. */ 21545 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21546 flag) != 0) { 21547 rval = EFAULT; 21548 } 21549 21550 done2: 21551 if (path_flag == SD_PATH_DIRECT) { 21552 sd_pm_exit(un); 21553 } 21554 21555 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21556 done: 21557 if (dktemp != NULL) { 21558 kmem_free(dktemp, sizeof (struct dk_temperature)); 21559 } 21560 21561 return (rval); 21562 } 21563 21564 21565 /* 21566 * Function: sd_log_page_supported 21567 * 21568 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21569 * supported log pages. 21570 * 21571 * Arguments: un - 21572 * log_page - 21573 * 21574 * Return Code: -1 - on error (log sense is optional and may not be supported). 21575 * 0 - log page not found. 21576 * 1 - log page found. 21577 */ 21578 21579 static int 21580 sd_log_page_supported(struct sd_lun *un, int log_page) 21581 { 21582 uchar_t *log_page_data; 21583 int i; 21584 int match = 0; 21585 int log_size; 21586 21587 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21588 21589 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21590 SD_PATH_DIRECT) != 0) { 21591 SD_ERROR(SD_LOG_COMMON, un, 21592 "sd_log_page_supported: failed log page retrieval\n"); 21593 kmem_free(log_page_data, 0xFF); 21594 return (-1); 21595 } 21596 log_size = log_page_data[3]; 21597 21598 /* 21599 * The list of supported log pages start from the fourth byte. Check 21600 * until we run out of log pages or a match is found. 21601 */ 21602 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21603 if (log_page_data[i] == log_page) { 21604 match++; 21605 } 21606 } 21607 kmem_free(log_page_data, 0xFF); 21608 return (match); 21609 } 21610 21611 21612 /* 21613 * Function: sd_mhdioc_failfast 21614 * 21615 * Description: This routine is the driver entry point for handling ioctl 21616 * requests to enable/disable the multihost failfast option. 21617 * (MHIOCENFAILFAST) 21618 * 21619 * Arguments: dev - the device number 21620 * arg - user specified probing interval. 21621 * flag - this argument is a pass through to ddi_copyxxx() 21622 * directly from the mode argument of ioctl(). 21623 * 21624 * Return Code: 0 21625 * EFAULT 21626 * ENXIO 21627 */ 21628 21629 static int 21630 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21631 { 21632 struct sd_lun *un = NULL; 21633 int mh_time; 21634 int rval = 0; 21635 21636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21637 return (ENXIO); 21638 } 21639 21640 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21641 return (EFAULT); 21642 21643 if (mh_time) { 21644 mutex_enter(SD_MUTEX(un)); 21645 un->un_resvd_status |= SD_FAILFAST; 21646 mutex_exit(SD_MUTEX(un)); 21647 /* 21648 * If mh_time is INT_MAX, then this ioctl is being used for 21649 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21650 */ 21651 if (mh_time != INT_MAX) { 21652 rval = sd_check_mhd(dev, mh_time); 21653 } 21654 } else { 21655 (void) sd_check_mhd(dev, 0); 21656 mutex_enter(SD_MUTEX(un)); 21657 un->un_resvd_status &= ~SD_FAILFAST; 21658 mutex_exit(SD_MUTEX(un)); 21659 } 21660 return (rval); 21661 } 21662 21663 21664 /* 21665 * Function: sd_mhdioc_takeown 21666 * 21667 * Description: This routine is the driver entry point for handling ioctl 21668 * requests to forcefully acquire exclusive access rights to the 21669 * multihost disk (MHIOCTKOWN). 21670 * 21671 * Arguments: dev - the device number 21672 * arg - user provided structure specifying the delay 21673 * parameters in milliseconds 21674 * flag - this argument is a pass through to ddi_copyxxx() 21675 * directly from the mode argument of ioctl(). 21676 * 21677 * Return Code: 0 21678 * EFAULT 21679 * ENXIO 21680 */ 21681 21682 static int 21683 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21684 { 21685 struct sd_lun *un = NULL; 21686 struct mhioctkown *tkown = NULL; 21687 int rval = 0; 21688 21689 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21690 return (ENXIO); 21691 } 21692 21693 if (arg != NULL) { 21694 tkown = (struct mhioctkown *) 21695 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21696 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21697 if (rval != 0) { 21698 rval = EFAULT; 21699 goto error; 21700 } 21701 } 21702 21703 rval = sd_take_ownership(dev, tkown); 21704 mutex_enter(SD_MUTEX(un)); 21705 if (rval == 0) { 21706 un->un_resvd_status |= SD_RESERVE; 21707 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21708 sd_reinstate_resv_delay = 21709 tkown->reinstate_resv_delay * 1000; 21710 } else { 21711 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21712 } 21713 /* 21714 * Give the scsi_watch routine interval set by 21715 * the MHIOCENFAILFAST ioctl precedence here. 21716 */ 21717 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21718 mutex_exit(SD_MUTEX(un)); 21719 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21720 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21721 "sd_mhdioc_takeown : %d\n", 21722 sd_reinstate_resv_delay); 21723 } else { 21724 mutex_exit(SD_MUTEX(un)); 21725 } 21726 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21727 sd_mhd_reset_notify_cb, (caddr_t)un); 21728 } else { 21729 un->un_resvd_status &= ~SD_RESERVE; 21730 mutex_exit(SD_MUTEX(un)); 21731 } 21732 21733 error: 21734 if (tkown != NULL) { 21735 kmem_free(tkown, sizeof (struct mhioctkown)); 21736 } 21737 return (rval); 21738 } 21739 21740 21741 /* 21742 * Function: sd_mhdioc_release 21743 * 21744 * Description: This routine is the driver entry point for handling ioctl 21745 * requests to release exclusive access rights to the multihost 21746 * disk (MHIOCRELEASE). 21747 * 21748 * Arguments: dev - the device number 21749 * 21750 * Return Code: 0 21751 * ENXIO 21752 */ 21753 21754 static int 21755 sd_mhdioc_release(dev_t dev) 21756 { 21757 struct sd_lun *un = NULL; 21758 timeout_id_t resvd_timeid_save; 21759 int resvd_status_save; 21760 int rval = 0; 21761 21762 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21763 return (ENXIO); 21764 } 21765 21766 mutex_enter(SD_MUTEX(un)); 21767 resvd_status_save = un->un_resvd_status; 21768 un->un_resvd_status &= 21769 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21770 if (un->un_resvd_timeid) { 21771 resvd_timeid_save = un->un_resvd_timeid; 21772 un->un_resvd_timeid = NULL; 21773 mutex_exit(SD_MUTEX(un)); 21774 (void) untimeout(resvd_timeid_save); 21775 } else { 21776 mutex_exit(SD_MUTEX(un)); 21777 } 21778 21779 /* 21780 * destroy any pending timeout thread that may be attempting to 21781 * reinstate reservation on this device. 21782 */ 21783 sd_rmv_resv_reclaim_req(dev); 21784 21785 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21786 mutex_enter(SD_MUTEX(un)); 21787 if ((un->un_mhd_token) && 21788 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21789 mutex_exit(SD_MUTEX(un)); 21790 (void) sd_check_mhd(dev, 0); 21791 } else { 21792 mutex_exit(SD_MUTEX(un)); 21793 } 21794 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21795 sd_mhd_reset_notify_cb, (caddr_t)un); 21796 } else { 21797 /* 21798 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21799 */ 21800 mutex_enter(SD_MUTEX(un)); 21801 un->un_resvd_status = resvd_status_save; 21802 mutex_exit(SD_MUTEX(un)); 21803 } 21804 return (rval); 21805 } 21806 21807 21808 /* 21809 * Function: sd_mhdioc_register_devid 21810 * 21811 * Description: This routine is the driver entry point for handling ioctl 21812 * requests to register the device id (MHIOCREREGISTERDEVID). 21813 * 21814 * Note: The implementation for this ioctl has been updated to 21815 * be consistent with the original PSARC case (1999/357) 21816 * (4375899, 4241671, 4220005) 21817 * 21818 * Arguments: dev - the device number 21819 * 21820 * Return Code: 0 21821 * ENXIO 21822 */ 21823 21824 static int 21825 sd_mhdioc_register_devid(dev_t dev) 21826 { 21827 struct sd_lun *un = NULL; 21828 int rval = 0; 21829 21830 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21831 return (ENXIO); 21832 } 21833 21834 ASSERT(!mutex_owned(SD_MUTEX(un))); 21835 21836 mutex_enter(SD_MUTEX(un)); 21837 21838 /* If a devid already exists, de-register it */ 21839 if (un->un_devid != NULL) { 21840 ddi_devid_unregister(SD_DEVINFO(un)); 21841 /* 21842 * After unregister devid, needs to free devid memory 21843 */ 21844 ddi_devid_free(un->un_devid); 21845 un->un_devid = NULL; 21846 } 21847 21848 /* Check for reservation conflict */ 21849 mutex_exit(SD_MUTEX(un)); 21850 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21851 mutex_enter(SD_MUTEX(un)); 21852 21853 switch (rval) { 21854 case 0: 21855 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21856 break; 21857 case EACCES: 21858 break; 21859 default: 21860 rval = EIO; 21861 } 21862 21863 mutex_exit(SD_MUTEX(un)); 21864 return (rval); 21865 } 21866 21867 21868 /* 21869 * Function: sd_mhdioc_inkeys 21870 * 21871 * Description: This routine is the driver entry point for handling ioctl 21872 * requests to issue the SCSI-3 Persistent In Read Keys command 21873 * to the device (MHIOCGRP_INKEYS). 21874 * 21875 * Arguments: dev - the device number 21876 * arg - user provided in_keys structure 21877 * flag - this argument is a pass through to ddi_copyxxx() 21878 * directly from the mode argument of ioctl(). 21879 * 21880 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21881 * ENXIO 21882 * EFAULT 21883 */ 21884 21885 static int 21886 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21887 { 21888 struct sd_lun *un; 21889 mhioc_inkeys_t inkeys; 21890 int rval = 0; 21891 21892 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21893 return (ENXIO); 21894 } 21895 21896 #ifdef _MULTI_DATAMODEL 21897 switch (ddi_model_convert_from(flag & FMODELS)) { 21898 case DDI_MODEL_ILP32: { 21899 struct mhioc_inkeys32 inkeys32; 21900 21901 if (ddi_copyin(arg, &inkeys32, 21902 sizeof (struct mhioc_inkeys32), flag) != 0) { 21903 return (EFAULT); 21904 } 21905 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21906 if ((rval = sd_persistent_reservation_in_read_keys(un, 21907 &inkeys, flag)) != 0) { 21908 return (rval); 21909 } 21910 inkeys32.generation = inkeys.generation; 21911 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21912 flag) != 0) { 21913 return (EFAULT); 21914 } 21915 break; 21916 } 21917 case DDI_MODEL_NONE: 21918 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21919 flag) != 0) { 21920 return (EFAULT); 21921 } 21922 if ((rval = sd_persistent_reservation_in_read_keys(un, 21923 &inkeys, flag)) != 0) { 21924 return (rval); 21925 } 21926 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21927 flag) != 0) { 21928 return (EFAULT); 21929 } 21930 break; 21931 } 21932 21933 #else /* ! _MULTI_DATAMODEL */ 21934 21935 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21936 return (EFAULT); 21937 } 21938 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21939 if (rval != 0) { 21940 return (rval); 21941 } 21942 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21943 return (EFAULT); 21944 } 21945 21946 #endif /* _MULTI_DATAMODEL */ 21947 21948 return (rval); 21949 } 21950 21951 21952 /* 21953 * Function: sd_mhdioc_inresv 21954 * 21955 * Description: This routine is the driver entry point for handling ioctl 21956 * requests to issue the SCSI-3 Persistent In Read Reservations 21957 * command to the device (MHIOCGRP_INKEYS). 21958 * 21959 * Arguments: dev - the device number 21960 * arg - user provided in_resv structure 21961 * flag - this argument is a pass through to ddi_copyxxx() 21962 * directly from the mode argument of ioctl(). 21963 * 21964 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21965 * ENXIO 21966 * EFAULT 21967 */ 21968 21969 static int 21970 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21971 { 21972 struct sd_lun *un; 21973 mhioc_inresvs_t inresvs; 21974 int rval = 0; 21975 21976 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21977 return (ENXIO); 21978 } 21979 21980 #ifdef _MULTI_DATAMODEL 21981 21982 switch (ddi_model_convert_from(flag & FMODELS)) { 21983 case DDI_MODEL_ILP32: { 21984 struct mhioc_inresvs32 inresvs32; 21985 21986 if (ddi_copyin(arg, &inresvs32, 21987 sizeof (struct mhioc_inresvs32), flag) != 0) { 21988 return (EFAULT); 21989 } 21990 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21991 if ((rval = sd_persistent_reservation_in_read_resv(un, 21992 &inresvs, flag)) != 0) { 21993 return (rval); 21994 } 21995 inresvs32.generation = inresvs.generation; 21996 if (ddi_copyout(&inresvs32, arg, 21997 sizeof (struct mhioc_inresvs32), flag) != 0) { 21998 return (EFAULT); 21999 } 22000 break; 22001 } 22002 case DDI_MODEL_NONE: 22003 if (ddi_copyin(arg, &inresvs, 22004 sizeof (mhioc_inresvs_t), flag) != 0) { 22005 return (EFAULT); 22006 } 22007 if ((rval = sd_persistent_reservation_in_read_resv(un, 22008 &inresvs, flag)) != 0) { 22009 return (rval); 22010 } 22011 if (ddi_copyout(&inresvs, arg, 22012 sizeof (mhioc_inresvs_t), flag) != 0) { 22013 return (EFAULT); 22014 } 22015 break; 22016 } 22017 22018 #else /* ! _MULTI_DATAMODEL */ 22019 22020 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22021 return (EFAULT); 22022 } 22023 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22024 if (rval != 0) { 22025 return (rval); 22026 } 22027 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22028 return (EFAULT); 22029 } 22030 22031 #endif /* ! _MULTI_DATAMODEL */ 22032 22033 return (rval); 22034 } 22035 22036 22037 /* 22038 * The following routines support the clustering functionality described below 22039 * and implement lost reservation reclaim functionality. 22040 * 22041 * Clustering 22042 * ---------- 22043 * The clustering code uses two different, independent forms of SCSI 22044 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22045 * Persistent Group Reservations. For any particular disk, it will use either 22046 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22047 * 22048 * SCSI-2 22049 * The cluster software takes ownership of a multi-hosted disk by issuing the 22050 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22051 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22052 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22053 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22054 * driver. The meaning of failfast is that if the driver (on this host) ever 22055 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22056 * it should immediately panic the host. The motivation for this ioctl is that 22057 * if this host does encounter reservation conflict, the underlying cause is 22058 * that some other host of the cluster has decided that this host is no longer 22059 * in the cluster and has seized control of the disks for itself. Since this 22060 * host is no longer in the cluster, it ought to panic itself. The 22061 * MHIOCENFAILFAST ioctl does two things: 22062 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22063 * error to panic the host 22064 * (b) it sets up a periodic timer to test whether this host still has 22065 * "access" (in that no other host has reserved the device): if the 22066 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22067 * purpose of that periodic timer is to handle scenarios where the host is 22068 * otherwise temporarily quiescent, temporarily doing no real i/o. 22069 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22070 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22071 * the device itself. 22072 * 22073 * SCSI-3 PGR 22074 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22075 * facility is supported through the shared multihost disk ioctls 22076 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22077 * MHIOCGRP_PREEMPTANDABORT) 22078 * 22079 * Reservation Reclaim: 22080 * -------------------- 22081 * To support the lost reservation reclaim operations this driver creates a 22082 * single thread to handle reinstating reservations on all devices that have 22083 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22084 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22085 * and the reservation reclaim thread loops through the requests to regain the 22086 * lost reservations. 22087 */ 22088 22089 /* 22090 * Function: sd_check_mhd() 22091 * 22092 * Description: This function sets up and submits a scsi watch request or 22093 * terminates an existing watch request. This routine is used in 22094 * support of reservation reclaim. 22095 * 22096 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22097 * among multiple watches that share the callback function 22098 * interval - the number of microseconds specifying the watch 22099 * interval for issuing TEST UNIT READY commands. If 22100 * set to 0 the watch should be terminated. If the 22101 * interval is set to 0 and if the device is required 22102 * to hold reservation while disabling failfast, the 22103 * watch is restarted with an interval of 22104 * reinstate_resv_delay. 22105 * 22106 * Return Code: 0 - Successful submit/terminate of scsi watch request 22107 * ENXIO - Indicates an invalid device was specified 22108 * EAGAIN - Unable to submit the scsi watch request 22109 */ 22110 22111 static int 22112 sd_check_mhd(dev_t dev, int interval) 22113 { 22114 struct sd_lun *un; 22115 opaque_t token; 22116 22117 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22118 return (ENXIO); 22119 } 22120 22121 /* is this a watch termination request? */ 22122 if (interval == 0) { 22123 mutex_enter(SD_MUTEX(un)); 22124 /* if there is an existing watch task then terminate it */ 22125 if (un->un_mhd_token) { 22126 token = un->un_mhd_token; 22127 un->un_mhd_token = NULL; 22128 mutex_exit(SD_MUTEX(un)); 22129 (void) scsi_watch_request_terminate(token, 22130 SCSI_WATCH_TERMINATE_WAIT); 22131 mutex_enter(SD_MUTEX(un)); 22132 } else { 22133 mutex_exit(SD_MUTEX(un)); 22134 /* 22135 * Note: If we return here we don't check for the 22136 * failfast case. This is the original legacy 22137 * implementation but perhaps we should be checking 22138 * the failfast case. 22139 */ 22140 return (0); 22141 } 22142 /* 22143 * If the device is required to hold reservation while 22144 * disabling failfast, we need to restart the scsi_watch 22145 * routine with an interval of reinstate_resv_delay. 22146 */ 22147 if (un->un_resvd_status & SD_RESERVE) { 22148 interval = sd_reinstate_resv_delay/1000; 22149 } else { 22150 /* no failfast so bail */ 22151 mutex_exit(SD_MUTEX(un)); 22152 return (0); 22153 } 22154 mutex_exit(SD_MUTEX(un)); 22155 } 22156 22157 /* 22158 * adjust minimum time interval to 1 second, 22159 * and convert from msecs to usecs 22160 */ 22161 if (interval > 0 && interval < 1000) { 22162 interval = 1000; 22163 } 22164 interval *= 1000; 22165 22166 /* 22167 * submit the request to the scsi_watch service 22168 */ 22169 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22170 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22171 if (token == NULL) { 22172 return (EAGAIN); 22173 } 22174 22175 /* 22176 * save token for termination later on 22177 */ 22178 mutex_enter(SD_MUTEX(un)); 22179 un->un_mhd_token = token; 22180 mutex_exit(SD_MUTEX(un)); 22181 return (0); 22182 } 22183 22184 22185 /* 22186 * Function: sd_mhd_watch_cb() 22187 * 22188 * Description: This function is the call back function used by the scsi watch 22189 * facility. The scsi watch facility sends the "Test Unit Ready" 22190 * and processes the status. If applicable (i.e. a "Unit Attention" 22191 * status and automatic "Request Sense" not used) the scsi watch 22192 * facility will send a "Request Sense" and retrieve the sense data 22193 * to be passed to this callback function. In either case the 22194 * automatic "Request Sense" or the facility submitting one, this 22195 * callback is passed the status and sense data. 22196 * 22197 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22198 * among multiple watches that share this callback function 22199 * resultp - scsi watch facility result packet containing scsi 22200 * packet, status byte and sense data 22201 * 22202 * Return Code: 0 - continue the watch task 22203 * non-zero - terminate the watch task 22204 */ 22205 22206 static int 22207 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22208 { 22209 struct sd_lun *un; 22210 struct scsi_status *statusp; 22211 uint8_t *sensep; 22212 struct scsi_pkt *pkt; 22213 uchar_t actual_sense_length; 22214 dev_t dev = (dev_t)arg; 22215 22216 ASSERT(resultp != NULL); 22217 statusp = resultp->statusp; 22218 sensep = (uint8_t *)resultp->sensep; 22219 pkt = resultp->pkt; 22220 actual_sense_length = resultp->actual_sense_length; 22221 22222 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22223 return (ENXIO); 22224 } 22225 22226 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22227 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22228 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22229 22230 /* Begin processing of the status and/or sense data */ 22231 if (pkt->pkt_reason != CMD_CMPLT) { 22232 /* Handle the incomplete packet */ 22233 sd_mhd_watch_incomplete(un, pkt); 22234 return (0); 22235 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22236 if (*((unsigned char *)statusp) 22237 == STATUS_RESERVATION_CONFLICT) { 22238 /* 22239 * Handle a reservation conflict by panicking if 22240 * configured for failfast or by logging the conflict 22241 * and updating the reservation status 22242 */ 22243 mutex_enter(SD_MUTEX(un)); 22244 if ((un->un_resvd_status & SD_FAILFAST) && 22245 (sd_failfast_enable)) { 22246 sd_panic_for_res_conflict(un); 22247 /*NOTREACHED*/ 22248 } 22249 SD_INFO(SD_LOG_IOCTL_MHD, un, 22250 "sd_mhd_watch_cb: Reservation Conflict\n"); 22251 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22252 mutex_exit(SD_MUTEX(un)); 22253 } 22254 } 22255 22256 if (sensep != NULL) { 22257 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22258 mutex_enter(SD_MUTEX(un)); 22259 if ((scsi_sense_asc(sensep) == 22260 SD_SCSI_RESET_SENSE_CODE) && 22261 (un->un_resvd_status & SD_RESERVE)) { 22262 /* 22263 * The additional sense code indicates a power 22264 * on or bus device reset has occurred; update 22265 * the reservation status. 22266 */ 22267 un->un_resvd_status |= 22268 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22269 SD_INFO(SD_LOG_IOCTL_MHD, un, 22270 "sd_mhd_watch_cb: Lost Reservation\n"); 22271 } 22272 } else { 22273 return (0); 22274 } 22275 } else { 22276 mutex_enter(SD_MUTEX(un)); 22277 } 22278 22279 if ((un->un_resvd_status & SD_RESERVE) && 22280 (un->un_resvd_status & SD_LOST_RESERVE)) { 22281 if (un->un_resvd_status & SD_WANT_RESERVE) { 22282 /* 22283 * A reset occurred in between the last probe and this 22284 * one so if a timeout is pending cancel it. 22285 */ 22286 if (un->un_resvd_timeid) { 22287 timeout_id_t temp_id = un->un_resvd_timeid; 22288 un->un_resvd_timeid = NULL; 22289 mutex_exit(SD_MUTEX(un)); 22290 (void) untimeout(temp_id); 22291 mutex_enter(SD_MUTEX(un)); 22292 } 22293 un->un_resvd_status &= ~SD_WANT_RESERVE; 22294 } 22295 if (un->un_resvd_timeid == 0) { 22296 /* Schedule a timeout to handle the lost reservation */ 22297 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22298 (void *)dev, 22299 drv_usectohz(sd_reinstate_resv_delay)); 22300 } 22301 } 22302 mutex_exit(SD_MUTEX(un)); 22303 return (0); 22304 } 22305 22306 22307 /* 22308 * Function: sd_mhd_watch_incomplete() 22309 * 22310 * Description: This function is used to find out why a scsi pkt sent by the 22311 * scsi watch facility was not completed. Under some scenarios this 22312 * routine will return. Otherwise it will send a bus reset to see 22313 * if the drive is still online. 22314 * 22315 * Arguments: un - driver soft state (unit) structure 22316 * pkt - incomplete scsi pkt 22317 */ 22318 22319 static void 22320 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22321 { 22322 int be_chatty; 22323 int perr; 22324 22325 ASSERT(pkt != NULL); 22326 ASSERT(un != NULL); 22327 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22328 perr = (pkt->pkt_statistics & STAT_PERR); 22329 22330 mutex_enter(SD_MUTEX(un)); 22331 if (un->un_state == SD_STATE_DUMPING) { 22332 mutex_exit(SD_MUTEX(un)); 22333 return; 22334 } 22335 22336 switch (pkt->pkt_reason) { 22337 case CMD_UNX_BUS_FREE: 22338 /* 22339 * If we had a parity error that caused the target to drop BSY*, 22340 * don't be chatty about it. 22341 */ 22342 if (perr && be_chatty) { 22343 be_chatty = 0; 22344 } 22345 break; 22346 case CMD_TAG_REJECT: 22347 /* 22348 * The SCSI-2 spec states that a tag reject will be sent by the 22349 * target if tagged queuing is not supported. A tag reject may 22350 * also be sent during certain initialization periods or to 22351 * control internal resources. For the latter case the target 22352 * may also return Queue Full. 22353 * 22354 * If this driver receives a tag reject from a target that is 22355 * going through an init period or controlling internal 22356 * resources tagged queuing will be disabled. This is a less 22357 * than optimal behavior but the driver is unable to determine 22358 * the target state and assumes tagged queueing is not supported 22359 */ 22360 pkt->pkt_flags = 0; 22361 un->un_tagflags = 0; 22362 22363 if (un->un_f_opt_queueing == TRUE) { 22364 un->un_throttle = min(un->un_throttle, 3); 22365 } else { 22366 un->un_throttle = 1; 22367 } 22368 mutex_exit(SD_MUTEX(un)); 22369 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22370 mutex_enter(SD_MUTEX(un)); 22371 break; 22372 case CMD_INCOMPLETE: 22373 /* 22374 * The transport stopped with an abnormal state, fallthrough and 22375 * reset the target and/or bus unless selection did not complete 22376 * (indicated by STATE_GOT_BUS) in which case we don't want to 22377 * go through a target/bus reset 22378 */ 22379 if (pkt->pkt_state == STATE_GOT_BUS) { 22380 break; 22381 } 22382 /*FALLTHROUGH*/ 22383 22384 case CMD_TIMEOUT: 22385 default: 22386 /* 22387 * The lun may still be running the command, so a lun reset 22388 * should be attempted. If the lun reset fails or cannot be 22389 * issued, than try a target reset. Lastly try a bus reset. 22390 */ 22391 if ((pkt->pkt_statistics & 22392 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22393 int reset_retval = 0; 22394 mutex_exit(SD_MUTEX(un)); 22395 if (un->un_f_allow_bus_device_reset == TRUE) { 22396 if (un->un_f_lun_reset_enabled == TRUE) { 22397 reset_retval = 22398 scsi_reset(SD_ADDRESS(un), 22399 RESET_LUN); 22400 } 22401 if (reset_retval == 0) { 22402 reset_retval = 22403 scsi_reset(SD_ADDRESS(un), 22404 RESET_TARGET); 22405 } 22406 } 22407 if (reset_retval == 0) { 22408 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22409 } 22410 mutex_enter(SD_MUTEX(un)); 22411 } 22412 break; 22413 } 22414 22415 /* A device/bus reset has occurred; update the reservation status. */ 22416 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22417 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22418 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22419 un->un_resvd_status |= 22420 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22421 SD_INFO(SD_LOG_IOCTL_MHD, un, 22422 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22423 } 22424 } 22425 22426 /* 22427 * The disk has been turned off; Update the device state. 22428 * 22429 * Note: Should we be offlining the disk here? 22430 */ 22431 if (pkt->pkt_state == STATE_GOT_BUS) { 22432 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22433 "Disk not responding to selection\n"); 22434 if (un->un_state != SD_STATE_OFFLINE) { 22435 New_state(un, SD_STATE_OFFLINE); 22436 } 22437 } else if (be_chatty) { 22438 /* 22439 * suppress messages if they are all the same pkt reason; 22440 * with TQ, many (up to 256) are returned with the same 22441 * pkt_reason 22442 */ 22443 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22444 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22445 "sd_mhd_watch_incomplete: " 22446 "SCSI transport failed: reason '%s'\n", 22447 scsi_rname(pkt->pkt_reason)); 22448 } 22449 } 22450 un->un_last_pkt_reason = pkt->pkt_reason; 22451 mutex_exit(SD_MUTEX(un)); 22452 } 22453 22454 22455 /* 22456 * Function: sd_sname() 22457 * 22458 * Description: This is a simple little routine to return a string containing 22459 * a printable description of command status byte for use in 22460 * logging. 22461 * 22462 * Arguments: status - pointer to a status byte 22463 * 22464 * Return Code: char * - string containing status description. 22465 */ 22466 22467 static char * 22468 sd_sname(uchar_t status) 22469 { 22470 switch (status & STATUS_MASK) { 22471 case STATUS_GOOD: 22472 return ("good status"); 22473 case STATUS_CHECK: 22474 return ("check condition"); 22475 case STATUS_MET: 22476 return ("condition met"); 22477 case STATUS_BUSY: 22478 return ("busy"); 22479 case STATUS_INTERMEDIATE: 22480 return ("intermediate"); 22481 case STATUS_INTERMEDIATE_MET: 22482 return ("intermediate - condition met"); 22483 case STATUS_RESERVATION_CONFLICT: 22484 return ("reservation_conflict"); 22485 case STATUS_TERMINATED: 22486 return ("command terminated"); 22487 case STATUS_QFULL: 22488 return ("queue full"); 22489 default: 22490 return ("<unknown status>"); 22491 } 22492 } 22493 22494 22495 /* 22496 * Function: sd_mhd_resvd_recover() 22497 * 22498 * Description: This function adds a reservation entry to the 22499 * sd_resv_reclaim_request list and signals the reservation 22500 * reclaim thread that there is work pending. If the reservation 22501 * reclaim thread has not been previously created this function 22502 * will kick it off. 22503 * 22504 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22505 * among multiple watches that share this callback function 22506 * 22507 * Context: This routine is called by timeout() and is run in interrupt 22508 * context. It must not sleep or call other functions which may 22509 * sleep. 22510 */ 22511 22512 static void 22513 sd_mhd_resvd_recover(void *arg) 22514 { 22515 dev_t dev = (dev_t)arg; 22516 struct sd_lun *un; 22517 struct sd_thr_request *sd_treq = NULL; 22518 struct sd_thr_request *sd_cur = NULL; 22519 struct sd_thr_request *sd_prev = NULL; 22520 int already_there = 0; 22521 22522 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22523 return; 22524 } 22525 22526 mutex_enter(SD_MUTEX(un)); 22527 un->un_resvd_timeid = NULL; 22528 if (un->un_resvd_status & SD_WANT_RESERVE) { 22529 /* 22530 * There was a reset so don't issue the reserve, allow the 22531 * sd_mhd_watch_cb callback function to notice this and 22532 * reschedule the timeout for reservation. 22533 */ 22534 mutex_exit(SD_MUTEX(un)); 22535 return; 22536 } 22537 mutex_exit(SD_MUTEX(un)); 22538 22539 /* 22540 * Add this device to the sd_resv_reclaim_request list and the 22541 * sd_resv_reclaim_thread should take care of the rest. 22542 * 22543 * Note: We can't sleep in this context so if the memory allocation 22544 * fails allow the sd_mhd_watch_cb callback function to notice this and 22545 * reschedule the timeout for reservation. (4378460) 22546 */ 22547 sd_treq = (struct sd_thr_request *) 22548 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22549 if (sd_treq == NULL) { 22550 return; 22551 } 22552 22553 sd_treq->sd_thr_req_next = NULL; 22554 sd_treq->dev = dev; 22555 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22556 if (sd_tr.srq_thr_req_head == NULL) { 22557 sd_tr.srq_thr_req_head = sd_treq; 22558 } else { 22559 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22560 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22561 if (sd_cur->dev == dev) { 22562 /* 22563 * already in Queue so don't log 22564 * another request for the device 22565 */ 22566 already_there = 1; 22567 break; 22568 } 22569 sd_prev = sd_cur; 22570 } 22571 if (!already_there) { 22572 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22573 "logging request for %lx\n", dev); 22574 sd_prev->sd_thr_req_next = sd_treq; 22575 } else { 22576 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22577 } 22578 } 22579 22580 /* 22581 * Create a kernel thread to do the reservation reclaim and free up this 22582 * thread. We cannot block this thread while we go away to do the 22583 * reservation reclaim 22584 */ 22585 if (sd_tr.srq_resv_reclaim_thread == NULL) 22586 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22587 sd_resv_reclaim_thread, NULL, 22588 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22589 22590 /* Tell the reservation reclaim thread that it has work to do */ 22591 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22592 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22593 } 22594 22595 /* 22596 * Function: sd_resv_reclaim_thread() 22597 * 22598 * Description: This function implements the reservation reclaim operations 22599 * 22600 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22601 * among multiple watches that share this callback function 22602 */ 22603 22604 static void 22605 sd_resv_reclaim_thread() 22606 { 22607 struct sd_lun *un; 22608 struct sd_thr_request *sd_mhreq; 22609 22610 /* Wait for work */ 22611 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22612 if (sd_tr.srq_thr_req_head == NULL) { 22613 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22614 &sd_tr.srq_resv_reclaim_mutex); 22615 } 22616 22617 /* Loop while we have work */ 22618 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22619 un = ddi_get_soft_state(sd_state, 22620 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22621 if (un == NULL) { 22622 /* 22623 * softstate structure is NULL so just 22624 * dequeue the request and continue 22625 */ 22626 sd_tr.srq_thr_req_head = 22627 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22628 kmem_free(sd_tr.srq_thr_cur_req, 22629 sizeof (struct sd_thr_request)); 22630 continue; 22631 } 22632 22633 /* dequeue the request */ 22634 sd_mhreq = sd_tr.srq_thr_cur_req; 22635 sd_tr.srq_thr_req_head = 22636 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22637 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22638 22639 /* 22640 * Reclaim reservation only if SD_RESERVE is still set. There 22641 * may have been a call to MHIOCRELEASE before we got here. 22642 */ 22643 mutex_enter(SD_MUTEX(un)); 22644 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22645 /* 22646 * Note: The SD_LOST_RESERVE flag is cleared before 22647 * reclaiming the reservation. If this is done after the 22648 * call to sd_reserve_release a reservation loss in the 22649 * window between pkt completion of reserve cmd and 22650 * mutex_enter below may not be recognized 22651 */ 22652 un->un_resvd_status &= ~SD_LOST_RESERVE; 22653 mutex_exit(SD_MUTEX(un)); 22654 22655 if (sd_reserve_release(sd_mhreq->dev, 22656 SD_RESERVE) == 0) { 22657 mutex_enter(SD_MUTEX(un)); 22658 un->un_resvd_status |= SD_RESERVE; 22659 mutex_exit(SD_MUTEX(un)); 22660 SD_INFO(SD_LOG_IOCTL_MHD, un, 22661 "sd_resv_reclaim_thread: " 22662 "Reservation Recovered\n"); 22663 } else { 22664 mutex_enter(SD_MUTEX(un)); 22665 un->un_resvd_status |= SD_LOST_RESERVE; 22666 mutex_exit(SD_MUTEX(un)); 22667 SD_INFO(SD_LOG_IOCTL_MHD, un, 22668 "sd_resv_reclaim_thread: Failed " 22669 "Reservation Recovery\n"); 22670 } 22671 } else { 22672 mutex_exit(SD_MUTEX(un)); 22673 } 22674 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22675 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22676 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22677 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22678 /* 22679 * wakeup the destroy thread if anyone is waiting on 22680 * us to complete. 22681 */ 22682 cv_signal(&sd_tr.srq_inprocess_cv); 22683 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22684 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22685 } 22686 22687 /* 22688 * cleanup the sd_tr structure now that this thread will not exist 22689 */ 22690 ASSERT(sd_tr.srq_thr_req_head == NULL); 22691 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22692 sd_tr.srq_resv_reclaim_thread = NULL; 22693 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22694 thread_exit(); 22695 } 22696 22697 22698 /* 22699 * Function: sd_rmv_resv_reclaim_req() 22700 * 22701 * Description: This function removes any pending reservation reclaim requests 22702 * for the specified device. 22703 * 22704 * Arguments: dev - the device 'dev_t' 22705 */ 22706 22707 static void 22708 sd_rmv_resv_reclaim_req(dev_t dev) 22709 { 22710 struct sd_thr_request *sd_mhreq; 22711 struct sd_thr_request *sd_prev; 22712 22713 /* Remove a reservation reclaim request from the list */ 22714 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22715 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22716 /* 22717 * We are attempting to reinstate reservation for 22718 * this device. We wait for sd_reserve_release() 22719 * to return before we return. 22720 */ 22721 cv_wait(&sd_tr.srq_inprocess_cv, 22722 &sd_tr.srq_resv_reclaim_mutex); 22723 } else { 22724 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22725 if (sd_mhreq && sd_mhreq->dev == dev) { 22726 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22727 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22728 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22729 return; 22730 } 22731 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22732 if (sd_mhreq && sd_mhreq->dev == dev) { 22733 break; 22734 } 22735 sd_prev = sd_mhreq; 22736 } 22737 if (sd_mhreq != NULL) { 22738 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22739 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22740 } 22741 } 22742 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22743 } 22744 22745 22746 /* 22747 * Function: sd_mhd_reset_notify_cb() 22748 * 22749 * Description: This is a call back function for scsi_reset_notify. This 22750 * function updates the softstate reserved status and logs the 22751 * reset. The driver scsi watch facility callback function 22752 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22753 * will reclaim the reservation. 22754 * 22755 * Arguments: arg - driver soft state (unit) structure 22756 */ 22757 22758 static void 22759 sd_mhd_reset_notify_cb(caddr_t arg) 22760 { 22761 struct sd_lun *un = (struct sd_lun *)arg; 22762 22763 mutex_enter(SD_MUTEX(un)); 22764 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22765 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22766 SD_INFO(SD_LOG_IOCTL_MHD, un, 22767 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22768 } 22769 mutex_exit(SD_MUTEX(un)); 22770 } 22771 22772 22773 /* 22774 * Function: sd_take_ownership() 22775 * 22776 * Description: This routine implements an algorithm to achieve a stable 22777 * reservation on disks which don't implement priority reserve, 22778 * and makes sure that other host lose re-reservation attempts. 22779 * This algorithm contains of a loop that keeps issuing the RESERVE 22780 * for some period of time (min_ownership_delay, default 6 seconds) 22781 * During that loop, it looks to see if there has been a bus device 22782 * reset or bus reset (both of which cause an existing reservation 22783 * to be lost). If the reservation is lost issue RESERVE until a 22784 * period of min_ownership_delay with no resets has gone by, or 22785 * until max_ownership_delay has expired. This loop ensures that 22786 * the host really did manage to reserve the device, in spite of 22787 * resets. The looping for min_ownership_delay (default six 22788 * seconds) is important to early generation clustering products, 22789 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22790 * MHIOCENFAILFAST periodic timer of two seconds. By having 22791 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22792 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22793 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22794 * have already noticed, via the MHIOCENFAILFAST polling, that it 22795 * no longer "owns" the disk and will have panicked itself. Thus, 22796 * the host issuing the MHIOCTKOWN is assured (with timing 22797 * dependencies) that by the time it actually starts to use the 22798 * disk for real work, the old owner is no longer accessing it. 22799 * 22800 * min_ownership_delay is the minimum amount of time for which the 22801 * disk must be reserved continuously devoid of resets before the 22802 * MHIOCTKOWN ioctl will return success. 22803 * 22804 * max_ownership_delay indicates the amount of time by which the 22805 * take ownership should succeed or timeout with an error. 22806 * 22807 * Arguments: dev - the device 'dev_t' 22808 * *p - struct containing timing info. 22809 * 22810 * Return Code: 0 for success or error code 22811 */ 22812 22813 static int 22814 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22815 { 22816 struct sd_lun *un; 22817 int rval; 22818 int err; 22819 int reservation_count = 0; 22820 int min_ownership_delay = 6000000; /* in usec */ 22821 int max_ownership_delay = 30000000; /* in usec */ 22822 clock_t start_time; /* starting time of this algorithm */ 22823 clock_t end_time; /* time limit for giving up */ 22824 clock_t ownership_time; /* time limit for stable ownership */ 22825 clock_t current_time; 22826 clock_t previous_current_time; 22827 22828 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22829 return (ENXIO); 22830 } 22831 22832 /* 22833 * Attempt a device reservation. A priority reservation is requested. 22834 */ 22835 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22836 != SD_SUCCESS) { 22837 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22838 "sd_take_ownership: return(1)=%d\n", rval); 22839 return (rval); 22840 } 22841 22842 /* Update the softstate reserved status to indicate the reservation */ 22843 mutex_enter(SD_MUTEX(un)); 22844 un->un_resvd_status |= SD_RESERVE; 22845 un->un_resvd_status &= 22846 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22847 mutex_exit(SD_MUTEX(un)); 22848 22849 if (p != NULL) { 22850 if (p->min_ownership_delay != 0) { 22851 min_ownership_delay = p->min_ownership_delay * 1000; 22852 } 22853 if (p->max_ownership_delay != 0) { 22854 max_ownership_delay = p->max_ownership_delay * 1000; 22855 } 22856 } 22857 SD_INFO(SD_LOG_IOCTL_MHD, un, 22858 "sd_take_ownership: min, max delays: %d, %d\n", 22859 min_ownership_delay, max_ownership_delay); 22860 22861 start_time = ddi_get_lbolt(); 22862 current_time = start_time; 22863 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22864 end_time = start_time + drv_usectohz(max_ownership_delay); 22865 22866 while (current_time - end_time < 0) { 22867 delay(drv_usectohz(500000)); 22868 22869 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22870 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22871 mutex_enter(SD_MUTEX(un)); 22872 rval = (un->un_resvd_status & 22873 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22874 mutex_exit(SD_MUTEX(un)); 22875 break; 22876 } 22877 } 22878 previous_current_time = current_time; 22879 current_time = ddi_get_lbolt(); 22880 mutex_enter(SD_MUTEX(un)); 22881 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22882 ownership_time = ddi_get_lbolt() + 22883 drv_usectohz(min_ownership_delay); 22884 reservation_count = 0; 22885 } else { 22886 reservation_count++; 22887 } 22888 un->un_resvd_status |= SD_RESERVE; 22889 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22890 mutex_exit(SD_MUTEX(un)); 22891 22892 SD_INFO(SD_LOG_IOCTL_MHD, un, 22893 "sd_take_ownership: ticks for loop iteration=%ld, " 22894 "reservation=%s\n", (current_time - previous_current_time), 22895 reservation_count ? "ok" : "reclaimed"); 22896 22897 if (current_time - ownership_time >= 0 && 22898 reservation_count >= 4) { 22899 rval = 0; /* Achieved a stable ownership */ 22900 break; 22901 } 22902 if (current_time - end_time >= 0) { 22903 rval = EACCES; /* No ownership in max possible time */ 22904 break; 22905 } 22906 } 22907 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22908 "sd_take_ownership: return(2)=%d\n", rval); 22909 return (rval); 22910 } 22911 22912 22913 /* 22914 * Function: sd_reserve_release() 22915 * 22916 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22917 * PRIORITY RESERVE commands based on a user specified command type 22918 * 22919 * Arguments: dev - the device 'dev_t' 22920 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22921 * SD_RESERVE, SD_RELEASE 22922 * 22923 * Return Code: 0 or Error Code 22924 */ 22925 22926 static int 22927 sd_reserve_release(dev_t dev, int cmd) 22928 { 22929 struct uscsi_cmd *com = NULL; 22930 struct sd_lun *un = NULL; 22931 char cdb[CDB_GROUP0]; 22932 int rval; 22933 22934 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22935 (cmd == SD_PRIORITY_RESERVE)); 22936 22937 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22938 return (ENXIO); 22939 } 22940 22941 /* instantiate and initialize the command and cdb */ 22942 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22943 bzero(cdb, CDB_GROUP0); 22944 com->uscsi_flags = USCSI_SILENT; 22945 com->uscsi_timeout = un->un_reserve_release_time; 22946 com->uscsi_cdblen = CDB_GROUP0; 22947 com->uscsi_cdb = cdb; 22948 if (cmd == SD_RELEASE) { 22949 cdb[0] = SCMD_RELEASE; 22950 } else { 22951 cdb[0] = SCMD_RESERVE; 22952 } 22953 22954 /* Send the command. */ 22955 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22956 SD_PATH_STANDARD); 22957 22958 /* 22959 * "break" a reservation that is held by another host, by issuing a 22960 * reset if priority reserve is desired, and we could not get the 22961 * device. 22962 */ 22963 if ((cmd == SD_PRIORITY_RESERVE) && 22964 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22965 /* 22966 * First try to reset the LUN. If we cannot, then try a target 22967 * reset, followed by a bus reset if the target reset fails. 22968 */ 22969 int reset_retval = 0; 22970 if (un->un_f_lun_reset_enabled == TRUE) { 22971 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22972 } 22973 if (reset_retval == 0) { 22974 /* The LUN reset either failed or was not issued */ 22975 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22976 } 22977 if ((reset_retval == 0) && 22978 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22979 rval = EIO; 22980 kmem_free(com, sizeof (*com)); 22981 return (rval); 22982 } 22983 22984 bzero(com, sizeof (struct uscsi_cmd)); 22985 com->uscsi_flags = USCSI_SILENT; 22986 com->uscsi_cdb = cdb; 22987 com->uscsi_cdblen = CDB_GROUP0; 22988 com->uscsi_timeout = 5; 22989 22990 /* 22991 * Reissue the last reserve command, this time without request 22992 * sense. Assume that it is just a regular reserve command. 22993 */ 22994 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22995 SD_PATH_STANDARD); 22996 } 22997 22998 /* Return an error if still getting a reservation conflict. */ 22999 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23000 rval = EACCES; 23001 } 23002 23003 kmem_free(com, sizeof (*com)); 23004 return (rval); 23005 } 23006 23007 23008 #define SD_NDUMP_RETRIES 12 23009 /* 23010 * System Crash Dump routine 23011 */ 23012 23013 static int 23014 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23015 { 23016 int instance; 23017 int partition; 23018 int i; 23019 int err; 23020 struct sd_lun *un; 23021 struct scsi_pkt *wr_pktp; 23022 struct buf *wr_bp; 23023 struct buf wr_buf; 23024 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23025 daddr_t tgt_blkno; /* rmw - blkno for target */ 23026 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23027 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23028 size_t io_start_offset; 23029 int doing_rmw = FALSE; 23030 int rval; 23031 ssize_t dma_resid; 23032 daddr_t oblkno; 23033 diskaddr_t nblks = 0; 23034 diskaddr_t start_block; 23035 23036 instance = SDUNIT(dev); 23037 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23038 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23039 return (ENXIO); 23040 } 23041 23042 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23043 23044 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23045 23046 partition = SDPART(dev); 23047 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23048 23049 /* Validate blocks to dump at against partition size. */ 23050 23051 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23052 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23053 23054 if ((blkno + nblk) > nblks) { 23055 SD_TRACE(SD_LOG_DUMP, un, 23056 "sddump: dump range larger than partition: " 23057 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23058 blkno, nblk, nblks); 23059 return (EINVAL); 23060 } 23061 23062 mutex_enter(&un->un_pm_mutex); 23063 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23064 struct scsi_pkt *start_pktp; 23065 23066 mutex_exit(&un->un_pm_mutex); 23067 23068 /* 23069 * use pm framework to power on HBA 1st 23070 */ 23071 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23072 23073 /* 23074 * Dump no long uses sdpower to power on a device, it's 23075 * in-line here so it can be done in polled mode. 23076 */ 23077 23078 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23079 23080 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23081 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23082 23083 if (start_pktp == NULL) { 23084 /* We were not given a SCSI packet, fail. */ 23085 return (EIO); 23086 } 23087 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23088 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23089 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23090 start_pktp->pkt_flags = FLAG_NOINTR; 23091 23092 mutex_enter(SD_MUTEX(un)); 23093 SD_FILL_SCSI1_LUN(un, start_pktp); 23094 mutex_exit(SD_MUTEX(un)); 23095 /* 23096 * Scsi_poll returns 0 (success) if the command completes and 23097 * the status block is STATUS_GOOD. 23098 */ 23099 if (sd_scsi_poll(un, start_pktp) != 0) { 23100 scsi_destroy_pkt(start_pktp); 23101 return (EIO); 23102 } 23103 scsi_destroy_pkt(start_pktp); 23104 (void) sd_ddi_pm_resume(un); 23105 } else { 23106 mutex_exit(&un->un_pm_mutex); 23107 } 23108 23109 mutex_enter(SD_MUTEX(un)); 23110 un->un_throttle = 0; 23111 23112 /* 23113 * The first time through, reset the specific target device. 23114 * However, when cpr calls sddump we know that sd is in a 23115 * a good state so no bus reset is required. 23116 * Clear sense data via Request Sense cmd. 23117 * In sddump we don't care about allow_bus_device_reset anymore 23118 */ 23119 23120 if ((un->un_state != SD_STATE_SUSPENDED) && 23121 (un->un_state != SD_STATE_DUMPING)) { 23122 23123 New_state(un, SD_STATE_DUMPING); 23124 23125 if (un->un_f_is_fibre == FALSE) { 23126 mutex_exit(SD_MUTEX(un)); 23127 /* 23128 * Attempt a bus reset for parallel scsi. 23129 * 23130 * Note: A bus reset is required because on some host 23131 * systems (i.e. E420R) a bus device reset is 23132 * insufficient to reset the state of the target. 23133 * 23134 * Note: Don't issue the reset for fibre-channel, 23135 * because this tends to hang the bus (loop) for 23136 * too long while everyone is logging out and in 23137 * and the deadman timer for dumping will fire 23138 * before the dump is complete. 23139 */ 23140 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23141 mutex_enter(SD_MUTEX(un)); 23142 Restore_state(un); 23143 mutex_exit(SD_MUTEX(un)); 23144 return (EIO); 23145 } 23146 23147 /* Delay to give the device some recovery time. */ 23148 drv_usecwait(10000); 23149 23150 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23151 SD_INFO(SD_LOG_DUMP, un, 23152 "sddump: sd_send_polled_RQS failed\n"); 23153 } 23154 mutex_enter(SD_MUTEX(un)); 23155 } 23156 } 23157 23158 /* 23159 * Convert the partition-relative block number to a 23160 * disk physical block number. 23161 */ 23162 blkno += start_block; 23163 23164 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23165 23166 23167 /* 23168 * Check if the device has a non-512 block size. 23169 */ 23170 wr_bp = NULL; 23171 if (NOT_DEVBSIZE(un)) { 23172 tgt_byte_offset = blkno * un->un_sys_blocksize; 23173 tgt_byte_count = nblk * un->un_sys_blocksize; 23174 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23175 (tgt_byte_count % un->un_tgt_blocksize)) { 23176 doing_rmw = TRUE; 23177 /* 23178 * Calculate the block number and number of block 23179 * in terms of the media block size. 23180 */ 23181 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23182 tgt_nblk = 23183 ((tgt_byte_offset + tgt_byte_count + 23184 (un->un_tgt_blocksize - 1)) / 23185 un->un_tgt_blocksize) - tgt_blkno; 23186 23187 /* 23188 * Invoke the routine which is going to do read part 23189 * of read-modify-write. 23190 * Note that this routine returns a pointer to 23191 * a valid bp in wr_bp. 23192 */ 23193 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23194 &wr_bp); 23195 if (err) { 23196 mutex_exit(SD_MUTEX(un)); 23197 return (err); 23198 } 23199 /* 23200 * Offset is being calculated as - 23201 * (original block # * system block size) - 23202 * (new block # * target block size) 23203 */ 23204 io_start_offset = 23205 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23206 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23207 23208 ASSERT((io_start_offset >= 0) && 23209 (io_start_offset < un->un_tgt_blocksize)); 23210 /* 23211 * Do the modify portion of read modify write. 23212 */ 23213 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23214 (size_t)nblk * un->un_sys_blocksize); 23215 } else { 23216 doing_rmw = FALSE; 23217 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23218 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23219 } 23220 23221 /* Convert blkno and nblk to target blocks */ 23222 blkno = tgt_blkno; 23223 nblk = tgt_nblk; 23224 } else { 23225 wr_bp = &wr_buf; 23226 bzero(wr_bp, sizeof (struct buf)); 23227 wr_bp->b_flags = B_BUSY; 23228 wr_bp->b_un.b_addr = addr; 23229 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23230 wr_bp->b_resid = 0; 23231 } 23232 23233 mutex_exit(SD_MUTEX(un)); 23234 23235 /* 23236 * Obtain a SCSI packet for the write command. 23237 * It should be safe to call the allocator here without 23238 * worrying about being locked for DVMA mapping because 23239 * the address we're passed is already a DVMA mapping 23240 * 23241 * We are also not going to worry about semaphore ownership 23242 * in the dump buffer. Dumping is single threaded at present. 23243 */ 23244 23245 wr_pktp = NULL; 23246 23247 dma_resid = wr_bp->b_bcount; 23248 oblkno = blkno; 23249 23250 while (dma_resid != 0) { 23251 23252 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23253 wr_bp->b_flags &= ~B_ERROR; 23254 23255 if (un->un_partial_dma_supported == 1) { 23256 blkno = oblkno + 23257 ((wr_bp->b_bcount - dma_resid) / 23258 un->un_tgt_blocksize); 23259 nblk = dma_resid / un->un_tgt_blocksize; 23260 23261 if (wr_pktp) { 23262 /* 23263 * Partial DMA transfers after initial transfer 23264 */ 23265 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23266 blkno, nblk); 23267 } else { 23268 /* Initial transfer */ 23269 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23270 un->un_pkt_flags, NULL_FUNC, NULL, 23271 blkno, nblk); 23272 } 23273 } else { 23274 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23275 0, NULL_FUNC, NULL, blkno, nblk); 23276 } 23277 23278 if (rval == 0) { 23279 /* We were given a SCSI packet, continue. */ 23280 break; 23281 } 23282 23283 if (i == 0) { 23284 if (wr_bp->b_flags & B_ERROR) { 23285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23286 "no resources for dumping; " 23287 "error code: 0x%x, retrying", 23288 geterror(wr_bp)); 23289 } else { 23290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23291 "no resources for dumping; retrying"); 23292 } 23293 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23294 if (wr_bp->b_flags & B_ERROR) { 23295 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23296 "no resources for dumping; error code: " 23297 "0x%x, retrying\n", geterror(wr_bp)); 23298 } 23299 } else { 23300 if (wr_bp->b_flags & B_ERROR) { 23301 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23302 "no resources for dumping; " 23303 "error code: 0x%x, retries failed, " 23304 "giving up.\n", geterror(wr_bp)); 23305 } else { 23306 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23307 "no resources for dumping; " 23308 "retries failed, giving up.\n"); 23309 } 23310 mutex_enter(SD_MUTEX(un)); 23311 Restore_state(un); 23312 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23313 mutex_exit(SD_MUTEX(un)); 23314 scsi_free_consistent_buf(wr_bp); 23315 } else { 23316 mutex_exit(SD_MUTEX(un)); 23317 } 23318 return (EIO); 23319 } 23320 drv_usecwait(10000); 23321 } 23322 23323 if (un->un_partial_dma_supported == 1) { 23324 /* 23325 * save the resid from PARTIAL_DMA 23326 */ 23327 dma_resid = wr_pktp->pkt_resid; 23328 if (dma_resid != 0) 23329 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23330 wr_pktp->pkt_resid = 0; 23331 } else { 23332 dma_resid = 0; 23333 } 23334 23335 /* SunBug 1222170 */ 23336 wr_pktp->pkt_flags = FLAG_NOINTR; 23337 23338 err = EIO; 23339 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23340 23341 /* 23342 * Scsi_poll returns 0 (success) if the command completes and 23343 * the status block is STATUS_GOOD. We should only check 23344 * errors if this condition is not true. Even then we should 23345 * send our own request sense packet only if we have a check 23346 * condition and auto request sense has not been performed by 23347 * the hba. 23348 */ 23349 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23350 23351 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23352 (wr_pktp->pkt_resid == 0)) { 23353 err = SD_SUCCESS; 23354 break; 23355 } 23356 23357 /* 23358 * Check CMD_DEV_GONE 1st, give up if device is gone. 23359 */ 23360 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23361 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23362 "Error while dumping state...Device is gone\n"); 23363 break; 23364 } 23365 23366 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23367 SD_INFO(SD_LOG_DUMP, un, 23368 "sddump: write failed with CHECK, try # %d\n", i); 23369 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23370 (void) sd_send_polled_RQS(un); 23371 } 23372 23373 continue; 23374 } 23375 23376 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23377 int reset_retval = 0; 23378 23379 SD_INFO(SD_LOG_DUMP, un, 23380 "sddump: write failed with BUSY, try # %d\n", i); 23381 23382 if (un->un_f_lun_reset_enabled == TRUE) { 23383 reset_retval = scsi_reset(SD_ADDRESS(un), 23384 RESET_LUN); 23385 } 23386 if (reset_retval == 0) { 23387 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23388 } 23389 (void) sd_send_polled_RQS(un); 23390 23391 } else { 23392 SD_INFO(SD_LOG_DUMP, un, 23393 "sddump: write failed with 0x%x, try # %d\n", 23394 SD_GET_PKT_STATUS(wr_pktp), i); 23395 mutex_enter(SD_MUTEX(un)); 23396 sd_reset_target(un, wr_pktp); 23397 mutex_exit(SD_MUTEX(un)); 23398 } 23399 23400 /* 23401 * If we are not getting anywhere with lun/target resets, 23402 * let's reset the bus. 23403 */ 23404 if (i == SD_NDUMP_RETRIES/2) { 23405 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23406 (void) sd_send_polled_RQS(un); 23407 } 23408 } 23409 } 23410 23411 scsi_destroy_pkt(wr_pktp); 23412 mutex_enter(SD_MUTEX(un)); 23413 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23414 mutex_exit(SD_MUTEX(un)); 23415 scsi_free_consistent_buf(wr_bp); 23416 } else { 23417 mutex_exit(SD_MUTEX(un)); 23418 } 23419 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23420 return (err); 23421 } 23422 23423 /* 23424 * Function: sd_scsi_poll() 23425 * 23426 * Description: This is a wrapper for the scsi_poll call. 23427 * 23428 * Arguments: sd_lun - The unit structure 23429 * scsi_pkt - The scsi packet being sent to the device. 23430 * 23431 * Return Code: 0 - Command completed successfully with good status 23432 * -1 - Command failed. This could indicate a check condition 23433 * or other status value requiring recovery action. 23434 * 23435 * NOTE: This code is only called off sddump(). 23436 */ 23437 23438 static int 23439 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23440 { 23441 int status; 23442 23443 ASSERT(un != NULL); 23444 ASSERT(!mutex_owned(SD_MUTEX(un))); 23445 ASSERT(pktp != NULL); 23446 23447 status = SD_SUCCESS; 23448 23449 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23450 pktp->pkt_flags |= un->un_tagflags; 23451 pktp->pkt_flags &= ~FLAG_NODISCON; 23452 } 23453 23454 status = sd_ddi_scsi_poll(pktp); 23455 /* 23456 * Scsi_poll returns 0 (success) if the command completes and the 23457 * status block is STATUS_GOOD. We should only check errors if this 23458 * condition is not true. Even then we should send our own request 23459 * sense packet only if we have a check condition and auto 23460 * request sense has not been performed by the hba. 23461 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23462 */ 23463 if ((status != SD_SUCCESS) && 23464 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23465 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23466 (pktp->pkt_reason != CMD_DEV_GONE)) 23467 (void) sd_send_polled_RQS(un); 23468 23469 return (status); 23470 } 23471 23472 /* 23473 * Function: sd_send_polled_RQS() 23474 * 23475 * Description: This sends the request sense command to a device. 23476 * 23477 * Arguments: sd_lun - The unit structure 23478 * 23479 * Return Code: 0 - Command completed successfully with good status 23480 * -1 - Command failed. 23481 * 23482 */ 23483 23484 static int 23485 sd_send_polled_RQS(struct sd_lun *un) 23486 { 23487 int ret_val; 23488 struct scsi_pkt *rqs_pktp; 23489 struct buf *rqs_bp; 23490 23491 ASSERT(un != NULL); 23492 ASSERT(!mutex_owned(SD_MUTEX(un))); 23493 23494 ret_val = SD_SUCCESS; 23495 23496 rqs_pktp = un->un_rqs_pktp; 23497 rqs_bp = un->un_rqs_bp; 23498 23499 mutex_enter(SD_MUTEX(un)); 23500 23501 if (un->un_sense_isbusy) { 23502 ret_val = SD_FAILURE; 23503 mutex_exit(SD_MUTEX(un)); 23504 return (ret_val); 23505 } 23506 23507 /* 23508 * If the request sense buffer (and packet) is not in use, 23509 * let's set the un_sense_isbusy and send our packet 23510 */ 23511 un->un_sense_isbusy = 1; 23512 rqs_pktp->pkt_resid = 0; 23513 rqs_pktp->pkt_reason = 0; 23514 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23515 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23516 23517 mutex_exit(SD_MUTEX(un)); 23518 23519 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23520 " 0x%p\n", rqs_bp->b_un.b_addr); 23521 23522 /* 23523 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23524 * axle - it has a call into us! 23525 */ 23526 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23527 SD_INFO(SD_LOG_COMMON, un, 23528 "sd_send_polled_RQS: RQS failed\n"); 23529 } 23530 23531 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23532 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23533 23534 mutex_enter(SD_MUTEX(un)); 23535 un->un_sense_isbusy = 0; 23536 mutex_exit(SD_MUTEX(un)); 23537 23538 return (ret_val); 23539 } 23540 23541 /* 23542 * Defines needed for localized version of the scsi_poll routine. 23543 */ 23544 #define CSEC 10000 /* usecs */ 23545 #define SEC_TO_CSEC (1000000/CSEC) 23546 23547 /* 23548 * Function: sd_ddi_scsi_poll() 23549 * 23550 * Description: Localized version of the scsi_poll routine. The purpose is to 23551 * send a scsi_pkt to a device as a polled command. This version 23552 * is to ensure more robust handling of transport errors. 23553 * Specifically this routine cures not ready, coming ready 23554 * transition for power up and reset of sonoma's. This can take 23555 * up to 45 seconds for power-on and 20 seconds for reset of a 23556 * sonoma lun. 23557 * 23558 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23559 * 23560 * Return Code: 0 - Command completed successfully with good status 23561 * -1 - Command failed. 23562 * 23563 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23564 * be fixed (removing this code), we need to determine how to handle the 23565 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23566 * 23567 * NOTE: This code is only called off sddump(). 23568 */ 23569 static int 23570 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23571 { 23572 int rval = -1; 23573 int savef; 23574 long savet; 23575 void (*savec)(); 23576 int timeout; 23577 int busy_count; 23578 int poll_delay; 23579 int rc; 23580 uint8_t *sensep; 23581 struct scsi_arq_status *arqstat; 23582 extern int do_polled_io; 23583 23584 ASSERT(pkt->pkt_scbp); 23585 23586 /* 23587 * save old flags.. 23588 */ 23589 savef = pkt->pkt_flags; 23590 savec = pkt->pkt_comp; 23591 savet = pkt->pkt_time; 23592 23593 pkt->pkt_flags |= FLAG_NOINTR; 23594 23595 /* 23596 * XXX there is nothing in the SCSA spec that states that we should not 23597 * do a callback for polled cmds; however, removing this will break sd 23598 * and probably other target drivers 23599 */ 23600 pkt->pkt_comp = NULL; 23601 23602 /* 23603 * we don't like a polled command without timeout. 23604 * 60 seconds seems long enough. 23605 */ 23606 if (pkt->pkt_time == 0) 23607 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23608 23609 /* 23610 * Send polled cmd. 23611 * 23612 * We do some error recovery for various errors. Tran_busy, 23613 * queue full, and non-dispatched commands are retried every 10 msec. 23614 * as they are typically transient failures. Busy status and Not 23615 * Ready are retried every second as this status takes a while to 23616 * change. 23617 */ 23618 timeout = pkt->pkt_time * SEC_TO_CSEC; 23619 23620 for (busy_count = 0; busy_count < timeout; busy_count++) { 23621 /* 23622 * Initialize pkt status variables. 23623 */ 23624 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23625 23626 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23627 if (rc != TRAN_BUSY) { 23628 /* Transport failed - give up. */ 23629 break; 23630 } else { 23631 /* Transport busy - try again. */ 23632 poll_delay = 1 * CSEC; /* 10 msec. */ 23633 } 23634 } else { 23635 /* 23636 * Transport accepted - check pkt status. 23637 */ 23638 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23639 if ((pkt->pkt_reason == CMD_CMPLT) && 23640 (rc == STATUS_CHECK) && 23641 (pkt->pkt_state & STATE_ARQ_DONE)) { 23642 arqstat = 23643 (struct scsi_arq_status *)(pkt->pkt_scbp); 23644 sensep = (uint8_t *)&arqstat->sts_sensedata; 23645 } else { 23646 sensep = NULL; 23647 } 23648 23649 if ((pkt->pkt_reason == CMD_CMPLT) && 23650 (rc == STATUS_GOOD)) { 23651 /* No error - we're done */ 23652 rval = 0; 23653 break; 23654 23655 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23656 /* Lost connection - give up */ 23657 break; 23658 23659 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23660 (pkt->pkt_state == 0)) { 23661 /* Pkt not dispatched - try again. */ 23662 poll_delay = 1 * CSEC; /* 10 msec. */ 23663 23664 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23665 (rc == STATUS_QFULL)) { 23666 /* Queue full - try again. */ 23667 poll_delay = 1 * CSEC; /* 10 msec. */ 23668 23669 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23670 (rc == STATUS_BUSY)) { 23671 /* Busy - try again. */ 23672 poll_delay = 100 * CSEC; /* 1 sec. */ 23673 busy_count += (SEC_TO_CSEC - 1); 23674 23675 } else if ((sensep != NULL) && 23676 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23677 /* 23678 * Unit Attention - try again. 23679 * Pretend it took 1 sec. 23680 * NOTE: 'continue' avoids poll_delay 23681 */ 23682 busy_count += (SEC_TO_CSEC - 1); 23683 continue; 23684 23685 } else if ((sensep != NULL) && 23686 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23687 (scsi_sense_asc(sensep) == 0x04) && 23688 (scsi_sense_ascq(sensep) == 0x01)) { 23689 /* 23690 * Not ready -> ready - try again. 23691 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23692 * ...same as STATUS_BUSY 23693 */ 23694 poll_delay = 100 * CSEC; /* 1 sec. */ 23695 busy_count += (SEC_TO_CSEC - 1); 23696 23697 } else { 23698 /* BAD status - give up. */ 23699 break; 23700 } 23701 } 23702 23703 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23704 !do_polled_io) { 23705 delay(drv_usectohz(poll_delay)); 23706 } else { 23707 /* we busy wait during cpr_dump or interrupt threads */ 23708 drv_usecwait(poll_delay); 23709 } 23710 } 23711 23712 pkt->pkt_flags = savef; 23713 pkt->pkt_comp = savec; 23714 pkt->pkt_time = savet; 23715 23716 /* return on error */ 23717 if (rval) 23718 return (rval); 23719 23720 /* 23721 * This is not a performance critical code path. 23722 * 23723 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23724 * issues associated with looking at DMA memory prior to 23725 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23726 */ 23727 scsi_sync_pkt(pkt); 23728 return (0); 23729 } 23730 23731 23732 23733 /* 23734 * Function: sd_persistent_reservation_in_read_keys 23735 * 23736 * Description: This routine is the driver entry point for handling CD-ROM 23737 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23738 * by sending the SCSI-3 PRIN commands to the device. 23739 * Processes the read keys command response by copying the 23740 * reservation key information into the user provided buffer. 23741 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23742 * 23743 * Arguments: un - Pointer to soft state struct for the target. 23744 * usrp - user provided pointer to multihost Persistent In Read 23745 * Keys structure (mhioc_inkeys_t) 23746 * flag - this argument is a pass through to ddi_copyxxx() 23747 * directly from the mode argument of ioctl(). 23748 * 23749 * Return Code: 0 - Success 23750 * EACCES 23751 * ENOTSUP 23752 * errno return code from sd_send_scsi_cmd() 23753 * 23754 * Context: Can sleep. Does not return until command is completed. 23755 */ 23756 23757 static int 23758 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23759 mhioc_inkeys_t *usrp, int flag) 23760 { 23761 #ifdef _MULTI_DATAMODEL 23762 struct mhioc_key_list32 li32; 23763 #endif 23764 sd_prin_readkeys_t *in; 23765 mhioc_inkeys_t *ptr; 23766 mhioc_key_list_t li; 23767 uchar_t *data_bufp; 23768 int data_len; 23769 int rval; 23770 size_t copysz; 23771 23772 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23773 return (EINVAL); 23774 } 23775 bzero(&li, sizeof (mhioc_key_list_t)); 23776 23777 /* 23778 * Get the listsize from user 23779 */ 23780 #ifdef _MULTI_DATAMODEL 23781 23782 switch (ddi_model_convert_from(flag & FMODELS)) { 23783 case DDI_MODEL_ILP32: 23784 copysz = sizeof (struct mhioc_key_list32); 23785 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23786 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23787 "sd_persistent_reservation_in_read_keys: " 23788 "failed ddi_copyin: mhioc_key_list32_t\n"); 23789 rval = EFAULT; 23790 goto done; 23791 } 23792 li.listsize = li32.listsize; 23793 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23794 break; 23795 23796 case DDI_MODEL_NONE: 23797 copysz = sizeof (mhioc_key_list_t); 23798 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23799 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23800 "sd_persistent_reservation_in_read_keys: " 23801 "failed ddi_copyin: mhioc_key_list_t\n"); 23802 rval = EFAULT; 23803 goto done; 23804 } 23805 break; 23806 } 23807 23808 #else /* ! _MULTI_DATAMODEL */ 23809 copysz = sizeof (mhioc_key_list_t); 23810 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23811 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23812 "sd_persistent_reservation_in_read_keys: " 23813 "failed ddi_copyin: mhioc_key_list_t\n"); 23814 rval = EFAULT; 23815 goto done; 23816 } 23817 #endif 23818 23819 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23820 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23821 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23822 23823 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23824 data_len, data_bufp)) != 0) { 23825 goto done; 23826 } 23827 in = (sd_prin_readkeys_t *)data_bufp; 23828 ptr->generation = BE_32(in->generation); 23829 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23830 23831 /* 23832 * Return the min(listsize, listlen) keys 23833 */ 23834 #ifdef _MULTI_DATAMODEL 23835 23836 switch (ddi_model_convert_from(flag & FMODELS)) { 23837 case DDI_MODEL_ILP32: 23838 li32.listlen = li.listlen; 23839 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23840 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23841 "sd_persistent_reservation_in_read_keys: " 23842 "failed ddi_copyout: mhioc_key_list32_t\n"); 23843 rval = EFAULT; 23844 goto done; 23845 } 23846 break; 23847 23848 case DDI_MODEL_NONE: 23849 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23850 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23851 "sd_persistent_reservation_in_read_keys: " 23852 "failed ddi_copyout: mhioc_key_list_t\n"); 23853 rval = EFAULT; 23854 goto done; 23855 } 23856 break; 23857 } 23858 23859 #else /* ! _MULTI_DATAMODEL */ 23860 23861 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23862 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23863 "sd_persistent_reservation_in_read_keys: " 23864 "failed ddi_copyout: mhioc_key_list_t\n"); 23865 rval = EFAULT; 23866 goto done; 23867 } 23868 23869 #endif /* _MULTI_DATAMODEL */ 23870 23871 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23872 li.listsize * MHIOC_RESV_KEY_SIZE); 23873 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23874 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23875 "sd_persistent_reservation_in_read_keys: " 23876 "failed ddi_copyout: keylist\n"); 23877 rval = EFAULT; 23878 } 23879 done: 23880 kmem_free(data_bufp, data_len); 23881 return (rval); 23882 } 23883 23884 23885 /* 23886 * Function: sd_persistent_reservation_in_read_resv 23887 * 23888 * Description: This routine is the driver entry point for handling CD-ROM 23889 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23890 * by sending the SCSI-3 PRIN commands to the device. 23891 * Process the read persistent reservations command response by 23892 * copying the reservation information into the user provided 23893 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23894 * 23895 * Arguments: un - Pointer to soft state struct for the target. 23896 * usrp - user provided pointer to multihost Persistent In Read 23897 * Keys structure (mhioc_inkeys_t) 23898 * flag - this argument is a pass through to ddi_copyxxx() 23899 * directly from the mode argument of ioctl(). 23900 * 23901 * Return Code: 0 - Success 23902 * EACCES 23903 * ENOTSUP 23904 * errno return code from sd_send_scsi_cmd() 23905 * 23906 * Context: Can sleep. Does not return until command is completed. 23907 */ 23908 23909 static int 23910 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23911 mhioc_inresvs_t *usrp, int flag) 23912 { 23913 #ifdef _MULTI_DATAMODEL 23914 struct mhioc_resv_desc_list32 resvlist32; 23915 #endif 23916 sd_prin_readresv_t *in; 23917 mhioc_inresvs_t *ptr; 23918 sd_readresv_desc_t *readresv_ptr; 23919 mhioc_resv_desc_list_t resvlist; 23920 mhioc_resv_desc_t resvdesc; 23921 uchar_t *data_bufp; 23922 int data_len; 23923 int rval; 23924 int i; 23925 size_t copysz; 23926 mhioc_resv_desc_t *bufp; 23927 23928 if ((ptr = usrp) == NULL) { 23929 return (EINVAL); 23930 } 23931 23932 /* 23933 * Get the listsize from user 23934 */ 23935 #ifdef _MULTI_DATAMODEL 23936 switch (ddi_model_convert_from(flag & FMODELS)) { 23937 case DDI_MODEL_ILP32: 23938 copysz = sizeof (struct mhioc_resv_desc_list32); 23939 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23940 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23941 "sd_persistent_reservation_in_read_resv: " 23942 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23943 rval = EFAULT; 23944 goto done; 23945 } 23946 resvlist.listsize = resvlist32.listsize; 23947 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23948 break; 23949 23950 case DDI_MODEL_NONE: 23951 copysz = sizeof (mhioc_resv_desc_list_t); 23952 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23953 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23954 "sd_persistent_reservation_in_read_resv: " 23955 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23956 rval = EFAULT; 23957 goto done; 23958 } 23959 break; 23960 } 23961 #else /* ! _MULTI_DATAMODEL */ 23962 copysz = sizeof (mhioc_resv_desc_list_t); 23963 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23964 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23965 "sd_persistent_reservation_in_read_resv: " 23966 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23967 rval = EFAULT; 23968 goto done; 23969 } 23970 #endif /* ! _MULTI_DATAMODEL */ 23971 23972 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23973 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23974 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23975 23976 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23977 data_len, data_bufp)) != 0) { 23978 goto done; 23979 } 23980 in = (sd_prin_readresv_t *)data_bufp; 23981 ptr->generation = BE_32(in->generation); 23982 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23983 23984 /* 23985 * Return the min(listsize, listlen( keys 23986 */ 23987 #ifdef _MULTI_DATAMODEL 23988 23989 switch (ddi_model_convert_from(flag & FMODELS)) { 23990 case DDI_MODEL_ILP32: 23991 resvlist32.listlen = resvlist.listlen; 23992 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23993 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23994 "sd_persistent_reservation_in_read_resv: " 23995 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23996 rval = EFAULT; 23997 goto done; 23998 } 23999 break; 24000 24001 case DDI_MODEL_NONE: 24002 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24003 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24004 "sd_persistent_reservation_in_read_resv: " 24005 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24006 rval = EFAULT; 24007 goto done; 24008 } 24009 break; 24010 } 24011 24012 #else /* ! _MULTI_DATAMODEL */ 24013 24014 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24015 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24016 "sd_persistent_reservation_in_read_resv: " 24017 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24018 rval = EFAULT; 24019 goto done; 24020 } 24021 24022 #endif /* ! _MULTI_DATAMODEL */ 24023 24024 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24025 bufp = resvlist.list; 24026 copysz = sizeof (mhioc_resv_desc_t); 24027 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24028 i++, readresv_ptr++, bufp++) { 24029 24030 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24031 MHIOC_RESV_KEY_SIZE); 24032 resvdesc.type = readresv_ptr->type; 24033 resvdesc.scope = readresv_ptr->scope; 24034 resvdesc.scope_specific_addr = 24035 BE_32(readresv_ptr->scope_specific_addr); 24036 24037 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24038 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24039 "sd_persistent_reservation_in_read_resv: " 24040 "failed ddi_copyout: resvlist\n"); 24041 rval = EFAULT; 24042 goto done; 24043 } 24044 } 24045 done: 24046 kmem_free(data_bufp, data_len); 24047 return (rval); 24048 } 24049 24050 24051 /* 24052 * Function: sr_change_blkmode() 24053 * 24054 * Description: This routine is the driver entry point for handling CD-ROM 24055 * block mode ioctl requests. Support for returning and changing 24056 * the current block size in use by the device is implemented. The 24057 * LBA size is changed via a MODE SELECT Block Descriptor. 24058 * 24059 * This routine issues a mode sense with an allocation length of 24060 * 12 bytes for the mode page header and a single block descriptor. 24061 * 24062 * Arguments: dev - the device 'dev_t' 24063 * cmd - the request type; one of CDROMGBLKMODE (get) or 24064 * CDROMSBLKMODE (set) 24065 * data - current block size or requested block size 24066 * flag - this argument is a pass through to ddi_copyxxx() directly 24067 * from the mode argument of ioctl(). 24068 * 24069 * Return Code: the code returned by sd_send_scsi_cmd() 24070 * EINVAL if invalid arguments are provided 24071 * EFAULT if ddi_copyxxx() fails 24072 * ENXIO if fail ddi_get_soft_state 24073 * EIO if invalid mode sense block descriptor length 24074 * 24075 */ 24076 24077 static int 24078 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24079 { 24080 struct sd_lun *un = NULL; 24081 struct mode_header *sense_mhp, *select_mhp; 24082 struct block_descriptor *sense_desc, *select_desc; 24083 int current_bsize; 24084 int rval = EINVAL; 24085 uchar_t *sense = NULL; 24086 uchar_t *select = NULL; 24087 24088 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24089 24090 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24091 return (ENXIO); 24092 } 24093 24094 /* 24095 * The block length is changed via the Mode Select block descriptor, the 24096 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24097 * required as part of this routine. Therefore the mode sense allocation 24098 * length is specified to be the length of a mode page header and a 24099 * block descriptor. 24100 */ 24101 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24102 24103 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24104 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24105 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24106 "sr_change_blkmode: Mode Sense Failed\n"); 24107 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24108 return (rval); 24109 } 24110 24111 /* Check the block descriptor len to handle only 1 block descriptor */ 24112 sense_mhp = (struct mode_header *)sense; 24113 if ((sense_mhp->bdesc_length == 0) || 24114 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24115 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24116 "sr_change_blkmode: Mode Sense returned invalid block" 24117 " descriptor length\n"); 24118 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24119 return (EIO); 24120 } 24121 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24122 current_bsize = ((sense_desc->blksize_hi << 16) | 24123 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24124 24125 /* Process command */ 24126 switch (cmd) { 24127 case CDROMGBLKMODE: 24128 /* Return the block size obtained during the mode sense */ 24129 if (ddi_copyout(¤t_bsize, (void *)data, 24130 sizeof (int), flag) != 0) 24131 rval = EFAULT; 24132 break; 24133 case CDROMSBLKMODE: 24134 /* Validate the requested block size */ 24135 switch (data) { 24136 case CDROM_BLK_512: 24137 case CDROM_BLK_1024: 24138 case CDROM_BLK_2048: 24139 case CDROM_BLK_2056: 24140 case CDROM_BLK_2336: 24141 case CDROM_BLK_2340: 24142 case CDROM_BLK_2352: 24143 case CDROM_BLK_2368: 24144 case CDROM_BLK_2448: 24145 case CDROM_BLK_2646: 24146 case CDROM_BLK_2647: 24147 break; 24148 default: 24149 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24150 "sr_change_blkmode: " 24151 "Block Size '%ld' Not Supported\n", data); 24152 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24153 return (EINVAL); 24154 } 24155 24156 /* 24157 * The current block size matches the requested block size so 24158 * there is no need to send the mode select to change the size 24159 */ 24160 if (current_bsize == data) { 24161 break; 24162 } 24163 24164 /* Build the select data for the requested block size */ 24165 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24166 select_mhp = (struct mode_header *)select; 24167 select_desc = 24168 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24169 /* 24170 * The LBA size is changed via the block descriptor, so the 24171 * descriptor is built according to the user data 24172 */ 24173 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24174 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24175 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24176 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24177 24178 /* Send the mode select for the requested block size */ 24179 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24180 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24181 SD_PATH_STANDARD)) != 0) { 24182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24183 "sr_change_blkmode: Mode Select Failed\n"); 24184 /* 24185 * The mode select failed for the requested block size, 24186 * so reset the data for the original block size and 24187 * send it to the target. The error is indicated by the 24188 * return value for the failed mode select. 24189 */ 24190 select_desc->blksize_hi = sense_desc->blksize_hi; 24191 select_desc->blksize_mid = sense_desc->blksize_mid; 24192 select_desc->blksize_lo = sense_desc->blksize_lo; 24193 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24194 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24195 SD_PATH_STANDARD); 24196 } else { 24197 ASSERT(!mutex_owned(SD_MUTEX(un))); 24198 mutex_enter(SD_MUTEX(un)); 24199 sd_update_block_info(un, (uint32_t)data, 0); 24200 mutex_exit(SD_MUTEX(un)); 24201 } 24202 break; 24203 default: 24204 /* should not reach here, but check anyway */ 24205 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24206 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24207 rval = EINVAL; 24208 break; 24209 } 24210 24211 if (select) { 24212 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24213 } 24214 if (sense) { 24215 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24216 } 24217 return (rval); 24218 } 24219 24220 24221 /* 24222 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24223 * implement driver support for getting and setting the CD speed. The command 24224 * set used will be based on the device type. If the device has not been 24225 * identified as MMC the Toshiba vendor specific mode page will be used. If 24226 * the device is MMC but does not support the Real Time Streaming feature 24227 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24228 * be used to read the speed. 24229 */ 24230 24231 /* 24232 * Function: sr_change_speed() 24233 * 24234 * Description: This routine is the driver entry point for handling CD-ROM 24235 * drive speed ioctl requests for devices supporting the Toshiba 24236 * vendor specific drive speed mode page. Support for returning 24237 * and changing the current drive speed in use by the device is 24238 * implemented. 24239 * 24240 * Arguments: dev - the device 'dev_t' 24241 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24242 * CDROMSDRVSPEED (set) 24243 * data - current drive speed or requested drive speed 24244 * flag - this argument is a pass through to ddi_copyxxx() directly 24245 * from the mode argument of ioctl(). 24246 * 24247 * Return Code: the code returned by sd_send_scsi_cmd() 24248 * EINVAL if invalid arguments are provided 24249 * EFAULT if ddi_copyxxx() fails 24250 * ENXIO if fail ddi_get_soft_state 24251 * EIO if invalid mode sense block descriptor length 24252 */ 24253 24254 static int 24255 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24256 { 24257 struct sd_lun *un = NULL; 24258 struct mode_header *sense_mhp, *select_mhp; 24259 struct mode_speed *sense_page, *select_page; 24260 int current_speed; 24261 int rval = EINVAL; 24262 int bd_len; 24263 uchar_t *sense = NULL; 24264 uchar_t *select = NULL; 24265 24266 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24267 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24268 return (ENXIO); 24269 } 24270 24271 /* 24272 * Note: The drive speed is being modified here according to a Toshiba 24273 * vendor specific mode page (0x31). 24274 */ 24275 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24276 24277 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24278 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24279 SD_PATH_STANDARD)) != 0) { 24280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24281 "sr_change_speed: Mode Sense Failed\n"); 24282 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24283 return (rval); 24284 } 24285 sense_mhp = (struct mode_header *)sense; 24286 24287 /* Check the block descriptor len to handle only 1 block descriptor */ 24288 bd_len = sense_mhp->bdesc_length; 24289 if (bd_len > MODE_BLK_DESC_LENGTH) { 24290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24291 "sr_change_speed: Mode Sense returned invalid block " 24292 "descriptor length\n"); 24293 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24294 return (EIO); 24295 } 24296 24297 sense_page = (struct mode_speed *) 24298 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24299 current_speed = sense_page->speed; 24300 24301 /* Process command */ 24302 switch (cmd) { 24303 case CDROMGDRVSPEED: 24304 /* Return the drive speed obtained during the mode sense */ 24305 if (current_speed == 0x2) { 24306 current_speed = CDROM_TWELVE_SPEED; 24307 } 24308 if (ddi_copyout(¤t_speed, (void *)data, 24309 sizeof (int), flag) != 0) { 24310 rval = EFAULT; 24311 } 24312 break; 24313 case CDROMSDRVSPEED: 24314 /* Validate the requested drive speed */ 24315 switch ((uchar_t)data) { 24316 case CDROM_TWELVE_SPEED: 24317 data = 0x2; 24318 /*FALLTHROUGH*/ 24319 case CDROM_NORMAL_SPEED: 24320 case CDROM_DOUBLE_SPEED: 24321 case CDROM_QUAD_SPEED: 24322 case CDROM_MAXIMUM_SPEED: 24323 break; 24324 default: 24325 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24326 "sr_change_speed: " 24327 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24328 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24329 return (EINVAL); 24330 } 24331 24332 /* 24333 * The current drive speed matches the requested drive speed so 24334 * there is no need to send the mode select to change the speed 24335 */ 24336 if (current_speed == data) { 24337 break; 24338 } 24339 24340 /* Build the select data for the requested drive speed */ 24341 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24342 select_mhp = (struct mode_header *)select; 24343 select_mhp->bdesc_length = 0; 24344 select_page = 24345 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24346 select_page = 24347 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24348 select_page->mode_page.code = CDROM_MODE_SPEED; 24349 select_page->mode_page.length = 2; 24350 select_page->speed = (uchar_t)data; 24351 24352 /* Send the mode select for the requested block size */ 24353 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24354 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24355 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24356 /* 24357 * The mode select failed for the requested drive speed, 24358 * so reset the data for the original drive speed and 24359 * send it to the target. The error is indicated by the 24360 * return value for the failed mode select. 24361 */ 24362 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24363 "sr_drive_speed: Mode Select Failed\n"); 24364 select_page->speed = sense_page->speed; 24365 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24366 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24367 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24368 } 24369 break; 24370 default: 24371 /* should not reach here, but check anyway */ 24372 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24373 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24374 rval = EINVAL; 24375 break; 24376 } 24377 24378 if (select) { 24379 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24380 } 24381 if (sense) { 24382 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24383 } 24384 24385 return (rval); 24386 } 24387 24388 24389 /* 24390 * Function: sr_atapi_change_speed() 24391 * 24392 * Description: This routine is the driver entry point for handling CD-ROM 24393 * drive speed ioctl requests for MMC devices that do not support 24394 * the Real Time Streaming feature (0x107). 24395 * 24396 * Note: This routine will use the SET SPEED command which may not 24397 * be supported by all devices. 24398 * 24399 * Arguments: dev- the device 'dev_t' 24400 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24401 * CDROMSDRVSPEED (set) 24402 * data- current drive speed or requested drive speed 24403 * flag- this argument is a pass through to ddi_copyxxx() directly 24404 * from the mode argument of ioctl(). 24405 * 24406 * Return Code: the code returned by sd_send_scsi_cmd() 24407 * EINVAL if invalid arguments are provided 24408 * EFAULT if ddi_copyxxx() fails 24409 * ENXIO if fail ddi_get_soft_state 24410 * EIO if invalid mode sense block descriptor length 24411 */ 24412 24413 static int 24414 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24415 { 24416 struct sd_lun *un; 24417 struct uscsi_cmd *com = NULL; 24418 struct mode_header_grp2 *sense_mhp; 24419 uchar_t *sense_page; 24420 uchar_t *sense = NULL; 24421 char cdb[CDB_GROUP5]; 24422 int bd_len; 24423 int current_speed = 0; 24424 int max_speed = 0; 24425 int rval; 24426 24427 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24428 24429 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24430 return (ENXIO); 24431 } 24432 24433 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24434 24435 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24436 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24437 SD_PATH_STANDARD)) != 0) { 24438 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24439 "sr_atapi_change_speed: Mode Sense Failed\n"); 24440 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24441 return (rval); 24442 } 24443 24444 /* Check the block descriptor len to handle only 1 block descriptor */ 24445 sense_mhp = (struct mode_header_grp2 *)sense; 24446 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24447 if (bd_len > MODE_BLK_DESC_LENGTH) { 24448 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24449 "sr_atapi_change_speed: Mode Sense returned invalid " 24450 "block descriptor length\n"); 24451 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24452 return (EIO); 24453 } 24454 24455 /* Calculate the current and maximum drive speeds */ 24456 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24457 current_speed = (sense_page[14] << 8) | sense_page[15]; 24458 max_speed = (sense_page[8] << 8) | sense_page[9]; 24459 24460 /* Process the command */ 24461 switch (cmd) { 24462 case CDROMGDRVSPEED: 24463 current_speed /= SD_SPEED_1X; 24464 if (ddi_copyout(¤t_speed, (void *)data, 24465 sizeof (int), flag) != 0) 24466 rval = EFAULT; 24467 break; 24468 case CDROMSDRVSPEED: 24469 /* Convert the speed code to KB/sec */ 24470 switch ((uchar_t)data) { 24471 case CDROM_NORMAL_SPEED: 24472 current_speed = SD_SPEED_1X; 24473 break; 24474 case CDROM_DOUBLE_SPEED: 24475 current_speed = 2 * SD_SPEED_1X; 24476 break; 24477 case CDROM_QUAD_SPEED: 24478 current_speed = 4 * SD_SPEED_1X; 24479 break; 24480 case CDROM_TWELVE_SPEED: 24481 current_speed = 12 * SD_SPEED_1X; 24482 break; 24483 case CDROM_MAXIMUM_SPEED: 24484 current_speed = 0xffff; 24485 break; 24486 default: 24487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24488 "sr_atapi_change_speed: invalid drive speed %d\n", 24489 (uchar_t)data); 24490 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24491 return (EINVAL); 24492 } 24493 24494 /* Check the request against the drive's max speed. */ 24495 if (current_speed != 0xffff) { 24496 if (current_speed > max_speed) { 24497 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24498 return (EINVAL); 24499 } 24500 } 24501 24502 /* 24503 * Build and send the SET SPEED command 24504 * 24505 * Note: The SET SPEED (0xBB) command used in this routine is 24506 * obsolete per the SCSI MMC spec but still supported in the 24507 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24508 * therefore the command is still implemented in this routine. 24509 */ 24510 bzero(cdb, sizeof (cdb)); 24511 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24512 cdb[2] = (uchar_t)(current_speed >> 8); 24513 cdb[3] = (uchar_t)current_speed; 24514 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24515 com->uscsi_cdb = (caddr_t)cdb; 24516 com->uscsi_cdblen = CDB_GROUP5; 24517 com->uscsi_bufaddr = NULL; 24518 com->uscsi_buflen = 0; 24519 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24520 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24521 break; 24522 default: 24523 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24524 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24525 rval = EINVAL; 24526 } 24527 24528 if (sense) { 24529 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24530 } 24531 if (com) { 24532 kmem_free(com, sizeof (*com)); 24533 } 24534 return (rval); 24535 } 24536 24537 24538 /* 24539 * Function: sr_pause_resume() 24540 * 24541 * Description: This routine is the driver entry point for handling CD-ROM 24542 * pause/resume ioctl requests. This only affects the audio play 24543 * operation. 24544 * 24545 * Arguments: dev - the device 'dev_t' 24546 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24547 * for setting the resume bit of the cdb. 24548 * 24549 * Return Code: the code returned by sd_send_scsi_cmd() 24550 * EINVAL if invalid mode specified 24551 * 24552 */ 24553 24554 static int 24555 sr_pause_resume(dev_t dev, int cmd) 24556 { 24557 struct sd_lun *un; 24558 struct uscsi_cmd *com; 24559 char cdb[CDB_GROUP1]; 24560 int rval; 24561 24562 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24563 return (ENXIO); 24564 } 24565 24566 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24567 bzero(cdb, CDB_GROUP1); 24568 cdb[0] = SCMD_PAUSE_RESUME; 24569 switch (cmd) { 24570 case CDROMRESUME: 24571 cdb[8] = 1; 24572 break; 24573 case CDROMPAUSE: 24574 cdb[8] = 0; 24575 break; 24576 default: 24577 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24578 " Command '%x' Not Supported\n", cmd); 24579 rval = EINVAL; 24580 goto done; 24581 } 24582 24583 com->uscsi_cdb = cdb; 24584 com->uscsi_cdblen = CDB_GROUP1; 24585 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24586 24587 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24588 SD_PATH_STANDARD); 24589 24590 done: 24591 kmem_free(com, sizeof (*com)); 24592 return (rval); 24593 } 24594 24595 24596 /* 24597 * Function: sr_play_msf() 24598 * 24599 * Description: This routine is the driver entry point for handling CD-ROM 24600 * ioctl requests to output the audio signals at the specified 24601 * starting address and continue the audio play until the specified 24602 * ending address (CDROMPLAYMSF) The address is in Minute Second 24603 * Frame (MSF) format. 24604 * 24605 * Arguments: dev - the device 'dev_t' 24606 * data - pointer to user provided audio msf structure, 24607 * specifying start/end addresses. 24608 * flag - this argument is a pass through to ddi_copyxxx() 24609 * directly from the mode argument of ioctl(). 24610 * 24611 * Return Code: the code returned by sd_send_scsi_cmd() 24612 * EFAULT if ddi_copyxxx() fails 24613 * ENXIO if fail ddi_get_soft_state 24614 * EINVAL if data pointer is NULL 24615 */ 24616 24617 static int 24618 sr_play_msf(dev_t dev, caddr_t data, int flag) 24619 { 24620 struct sd_lun *un; 24621 struct uscsi_cmd *com; 24622 struct cdrom_msf msf_struct; 24623 struct cdrom_msf *msf = &msf_struct; 24624 char cdb[CDB_GROUP1]; 24625 int rval; 24626 24627 if (data == NULL) { 24628 return (EINVAL); 24629 } 24630 24631 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24632 return (ENXIO); 24633 } 24634 24635 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24636 return (EFAULT); 24637 } 24638 24639 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24640 bzero(cdb, CDB_GROUP1); 24641 cdb[0] = SCMD_PLAYAUDIO_MSF; 24642 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24643 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24644 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24645 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24646 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24647 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24648 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24649 } else { 24650 cdb[3] = msf->cdmsf_min0; 24651 cdb[4] = msf->cdmsf_sec0; 24652 cdb[5] = msf->cdmsf_frame0; 24653 cdb[6] = msf->cdmsf_min1; 24654 cdb[7] = msf->cdmsf_sec1; 24655 cdb[8] = msf->cdmsf_frame1; 24656 } 24657 com->uscsi_cdb = cdb; 24658 com->uscsi_cdblen = CDB_GROUP1; 24659 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24660 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24661 SD_PATH_STANDARD); 24662 kmem_free(com, sizeof (*com)); 24663 return (rval); 24664 } 24665 24666 24667 /* 24668 * Function: sr_play_trkind() 24669 * 24670 * Description: This routine is the driver entry point for handling CD-ROM 24671 * ioctl requests to output the audio signals at the specified 24672 * starting address and continue the audio play until the specified 24673 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24674 * format. 24675 * 24676 * Arguments: dev - the device 'dev_t' 24677 * data - pointer to user provided audio track/index structure, 24678 * specifying start/end addresses. 24679 * flag - this argument is a pass through to ddi_copyxxx() 24680 * directly from the mode argument of ioctl(). 24681 * 24682 * Return Code: the code returned by sd_send_scsi_cmd() 24683 * EFAULT if ddi_copyxxx() fails 24684 * ENXIO if fail ddi_get_soft_state 24685 * EINVAL if data pointer is NULL 24686 */ 24687 24688 static int 24689 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24690 { 24691 struct cdrom_ti ti_struct; 24692 struct cdrom_ti *ti = &ti_struct; 24693 struct uscsi_cmd *com = NULL; 24694 char cdb[CDB_GROUP1]; 24695 int rval; 24696 24697 if (data == NULL) { 24698 return (EINVAL); 24699 } 24700 24701 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24702 return (EFAULT); 24703 } 24704 24705 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24706 bzero(cdb, CDB_GROUP1); 24707 cdb[0] = SCMD_PLAYAUDIO_TI; 24708 cdb[4] = ti->cdti_trk0; 24709 cdb[5] = ti->cdti_ind0; 24710 cdb[7] = ti->cdti_trk1; 24711 cdb[8] = ti->cdti_ind1; 24712 com->uscsi_cdb = cdb; 24713 com->uscsi_cdblen = CDB_GROUP1; 24714 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24715 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24716 SD_PATH_STANDARD); 24717 kmem_free(com, sizeof (*com)); 24718 return (rval); 24719 } 24720 24721 24722 /* 24723 * Function: sr_read_all_subcodes() 24724 * 24725 * Description: This routine is the driver entry point for handling CD-ROM 24726 * ioctl requests to return raw subcode data while the target is 24727 * playing audio (CDROMSUBCODE). 24728 * 24729 * Arguments: dev - the device 'dev_t' 24730 * data - pointer to user provided cdrom subcode structure, 24731 * specifying the transfer length and address. 24732 * flag - this argument is a pass through to ddi_copyxxx() 24733 * directly from the mode argument of ioctl(). 24734 * 24735 * Return Code: the code returned by sd_send_scsi_cmd() 24736 * EFAULT if ddi_copyxxx() fails 24737 * ENXIO if fail ddi_get_soft_state 24738 * EINVAL if data pointer is NULL 24739 */ 24740 24741 static int 24742 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24743 { 24744 struct sd_lun *un = NULL; 24745 struct uscsi_cmd *com = NULL; 24746 struct cdrom_subcode *subcode = NULL; 24747 int rval; 24748 size_t buflen; 24749 char cdb[CDB_GROUP5]; 24750 24751 #ifdef _MULTI_DATAMODEL 24752 /* To support ILP32 applications in an LP64 world */ 24753 struct cdrom_subcode32 cdrom_subcode32; 24754 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24755 #endif 24756 if (data == NULL) { 24757 return (EINVAL); 24758 } 24759 24760 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24761 return (ENXIO); 24762 } 24763 24764 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24765 24766 #ifdef _MULTI_DATAMODEL 24767 switch (ddi_model_convert_from(flag & FMODELS)) { 24768 case DDI_MODEL_ILP32: 24769 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24771 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24772 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24773 return (EFAULT); 24774 } 24775 /* Convert the ILP32 uscsi data from the application to LP64 */ 24776 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24777 break; 24778 case DDI_MODEL_NONE: 24779 if (ddi_copyin(data, subcode, 24780 sizeof (struct cdrom_subcode), flag)) { 24781 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24782 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24783 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24784 return (EFAULT); 24785 } 24786 break; 24787 } 24788 #else /* ! _MULTI_DATAMODEL */ 24789 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24790 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24791 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24792 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24793 return (EFAULT); 24794 } 24795 #endif /* _MULTI_DATAMODEL */ 24796 24797 /* 24798 * Since MMC-2 expects max 3 bytes for length, check if the 24799 * length input is greater than 3 bytes 24800 */ 24801 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24802 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24803 "sr_read_all_subcodes: " 24804 "cdrom transfer length too large: %d (limit %d)\n", 24805 subcode->cdsc_length, 0xFFFFFF); 24806 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24807 return (EINVAL); 24808 } 24809 24810 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24811 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24812 bzero(cdb, CDB_GROUP5); 24813 24814 if (un->un_f_mmc_cap == TRUE) { 24815 cdb[0] = (char)SCMD_READ_CD; 24816 cdb[2] = (char)0xff; 24817 cdb[3] = (char)0xff; 24818 cdb[4] = (char)0xff; 24819 cdb[5] = (char)0xff; 24820 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24821 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24822 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24823 cdb[10] = 1; 24824 } else { 24825 /* 24826 * Note: A vendor specific command (0xDF) is being used her to 24827 * request a read of all subcodes. 24828 */ 24829 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24830 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24831 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24832 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24833 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24834 } 24835 com->uscsi_cdb = cdb; 24836 com->uscsi_cdblen = CDB_GROUP5; 24837 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24838 com->uscsi_buflen = buflen; 24839 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24840 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24841 SD_PATH_STANDARD); 24842 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24843 kmem_free(com, sizeof (*com)); 24844 return (rval); 24845 } 24846 24847 24848 /* 24849 * Function: sr_read_subchannel() 24850 * 24851 * Description: This routine is the driver entry point for handling CD-ROM 24852 * ioctl requests to return the Q sub-channel data of the CD 24853 * current position block. (CDROMSUBCHNL) The data includes the 24854 * track number, index number, absolute CD-ROM address (LBA or MSF 24855 * format per the user) , track relative CD-ROM address (LBA or MSF 24856 * format per the user), control data and audio status. 24857 * 24858 * Arguments: dev - the device 'dev_t' 24859 * data - pointer to user provided cdrom sub-channel structure 24860 * flag - this argument is a pass through to ddi_copyxxx() 24861 * directly from the mode argument of ioctl(). 24862 * 24863 * Return Code: the code returned by sd_send_scsi_cmd() 24864 * EFAULT if ddi_copyxxx() fails 24865 * ENXIO if fail ddi_get_soft_state 24866 * EINVAL if data pointer is NULL 24867 */ 24868 24869 static int 24870 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24871 { 24872 struct sd_lun *un; 24873 struct uscsi_cmd *com; 24874 struct cdrom_subchnl subchanel; 24875 struct cdrom_subchnl *subchnl = &subchanel; 24876 char cdb[CDB_GROUP1]; 24877 caddr_t buffer; 24878 int rval; 24879 24880 if (data == NULL) { 24881 return (EINVAL); 24882 } 24883 24884 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24885 (un->un_state == SD_STATE_OFFLINE)) { 24886 return (ENXIO); 24887 } 24888 24889 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24890 return (EFAULT); 24891 } 24892 24893 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24894 bzero(cdb, CDB_GROUP1); 24895 cdb[0] = SCMD_READ_SUBCHANNEL; 24896 /* Set the MSF bit based on the user requested address format */ 24897 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24898 /* 24899 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24900 * returned 24901 */ 24902 cdb[2] = 0x40; 24903 /* 24904 * Set byte 3 to specify the return data format. A value of 0x01 24905 * indicates that the CD-ROM current position should be returned. 24906 */ 24907 cdb[3] = 0x01; 24908 cdb[8] = 0x10; 24909 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24910 com->uscsi_cdb = cdb; 24911 com->uscsi_cdblen = CDB_GROUP1; 24912 com->uscsi_bufaddr = buffer; 24913 com->uscsi_buflen = 16; 24914 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24915 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24916 SD_PATH_STANDARD); 24917 if (rval != 0) { 24918 kmem_free(buffer, 16); 24919 kmem_free(com, sizeof (*com)); 24920 return (rval); 24921 } 24922 24923 /* Process the returned Q sub-channel data */ 24924 subchnl->cdsc_audiostatus = buffer[1]; 24925 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24926 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24927 subchnl->cdsc_trk = buffer[6]; 24928 subchnl->cdsc_ind = buffer[7]; 24929 if (subchnl->cdsc_format & CDROM_LBA) { 24930 subchnl->cdsc_absaddr.lba = 24931 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24932 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24933 subchnl->cdsc_reladdr.lba = 24934 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24935 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24936 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24937 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24938 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24939 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24940 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24941 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24942 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24943 } else { 24944 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24945 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24946 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24947 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24948 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24949 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24950 } 24951 kmem_free(buffer, 16); 24952 kmem_free(com, sizeof (*com)); 24953 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24954 != 0) { 24955 return (EFAULT); 24956 } 24957 return (rval); 24958 } 24959 24960 24961 /* 24962 * Function: sr_read_tocentry() 24963 * 24964 * Description: This routine is the driver entry point for handling CD-ROM 24965 * ioctl requests to read from the Table of Contents (TOC) 24966 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24967 * fields, the starting address (LBA or MSF format per the user) 24968 * and the data mode if the user specified track is a data track. 24969 * 24970 * Note: The READ HEADER (0x44) command used in this routine is 24971 * obsolete per the SCSI MMC spec but still supported in the 24972 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24973 * therefore the command is still implemented in this routine. 24974 * 24975 * Arguments: dev - the device 'dev_t' 24976 * data - pointer to user provided toc entry structure, 24977 * specifying the track # and the address format 24978 * (LBA or MSF). 24979 * flag - this argument is a pass through to ddi_copyxxx() 24980 * directly from the mode argument of ioctl(). 24981 * 24982 * Return Code: the code returned by sd_send_scsi_cmd() 24983 * EFAULT if ddi_copyxxx() fails 24984 * ENXIO if fail ddi_get_soft_state 24985 * EINVAL if data pointer is NULL 24986 */ 24987 24988 static int 24989 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24990 { 24991 struct sd_lun *un = NULL; 24992 struct uscsi_cmd *com; 24993 struct cdrom_tocentry toc_entry; 24994 struct cdrom_tocentry *entry = &toc_entry; 24995 caddr_t buffer; 24996 int rval; 24997 char cdb[CDB_GROUP1]; 24998 24999 if (data == NULL) { 25000 return (EINVAL); 25001 } 25002 25003 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25004 (un->un_state == SD_STATE_OFFLINE)) { 25005 return (ENXIO); 25006 } 25007 25008 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25009 return (EFAULT); 25010 } 25011 25012 /* Validate the requested track and address format */ 25013 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25014 return (EINVAL); 25015 } 25016 25017 if (entry->cdte_track == 0) { 25018 return (EINVAL); 25019 } 25020 25021 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25022 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25023 bzero(cdb, CDB_GROUP1); 25024 25025 cdb[0] = SCMD_READ_TOC; 25026 /* Set the MSF bit based on the user requested address format */ 25027 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25028 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25029 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25030 } else { 25031 cdb[6] = entry->cdte_track; 25032 } 25033 25034 /* 25035 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25036 * (4 byte TOC response header + 8 byte track descriptor) 25037 */ 25038 cdb[8] = 12; 25039 com->uscsi_cdb = cdb; 25040 com->uscsi_cdblen = CDB_GROUP1; 25041 com->uscsi_bufaddr = buffer; 25042 com->uscsi_buflen = 0x0C; 25043 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25044 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25045 SD_PATH_STANDARD); 25046 if (rval != 0) { 25047 kmem_free(buffer, 12); 25048 kmem_free(com, sizeof (*com)); 25049 return (rval); 25050 } 25051 25052 /* Process the toc entry */ 25053 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25054 entry->cdte_ctrl = (buffer[5] & 0x0F); 25055 if (entry->cdte_format & CDROM_LBA) { 25056 entry->cdte_addr.lba = 25057 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25058 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25059 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25060 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25061 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25062 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25063 /* 25064 * Send a READ TOC command using the LBA address format to get 25065 * the LBA for the track requested so it can be used in the 25066 * READ HEADER request 25067 * 25068 * Note: The MSF bit of the READ HEADER command specifies the 25069 * output format. The block address specified in that command 25070 * must be in LBA format. 25071 */ 25072 cdb[1] = 0; 25073 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25074 SD_PATH_STANDARD); 25075 if (rval != 0) { 25076 kmem_free(buffer, 12); 25077 kmem_free(com, sizeof (*com)); 25078 return (rval); 25079 } 25080 } else { 25081 entry->cdte_addr.msf.minute = buffer[9]; 25082 entry->cdte_addr.msf.second = buffer[10]; 25083 entry->cdte_addr.msf.frame = buffer[11]; 25084 /* 25085 * Send a READ TOC command using the LBA address format to get 25086 * the LBA for the track requested so it can be used in the 25087 * READ HEADER request 25088 * 25089 * Note: The MSF bit of the READ HEADER command specifies the 25090 * output format. The block address specified in that command 25091 * must be in LBA format. 25092 */ 25093 cdb[1] = 0; 25094 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25095 SD_PATH_STANDARD); 25096 if (rval != 0) { 25097 kmem_free(buffer, 12); 25098 kmem_free(com, sizeof (*com)); 25099 return (rval); 25100 } 25101 } 25102 25103 /* 25104 * Build and send the READ HEADER command to determine the data mode of 25105 * the user specified track. 25106 */ 25107 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25108 (entry->cdte_track != CDROM_LEADOUT)) { 25109 bzero(cdb, CDB_GROUP1); 25110 cdb[0] = SCMD_READ_HEADER; 25111 cdb[2] = buffer[8]; 25112 cdb[3] = buffer[9]; 25113 cdb[4] = buffer[10]; 25114 cdb[5] = buffer[11]; 25115 cdb[8] = 0x08; 25116 com->uscsi_buflen = 0x08; 25117 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25118 SD_PATH_STANDARD); 25119 if (rval == 0) { 25120 entry->cdte_datamode = buffer[0]; 25121 } else { 25122 /* 25123 * READ HEADER command failed, since this is 25124 * obsoleted in one spec, its better to return 25125 * -1 for an invlid track so that we can still 25126 * receive the rest of the TOC data. 25127 */ 25128 entry->cdte_datamode = (uchar_t)-1; 25129 } 25130 } else { 25131 entry->cdte_datamode = (uchar_t)-1; 25132 } 25133 25134 kmem_free(buffer, 12); 25135 kmem_free(com, sizeof (*com)); 25136 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25137 return (EFAULT); 25138 25139 return (rval); 25140 } 25141 25142 25143 /* 25144 * Function: sr_read_tochdr() 25145 * 25146 * Description: This routine is the driver entry point for handling CD-ROM 25147 * ioctl requests to read the Table of Contents (TOC) header 25148 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25149 * and ending track numbers 25150 * 25151 * Arguments: dev - the device 'dev_t' 25152 * data - pointer to user provided toc header structure, 25153 * specifying the starting and ending track numbers. 25154 * flag - this argument is a pass through to ddi_copyxxx() 25155 * directly from the mode argument of ioctl(). 25156 * 25157 * Return Code: the code returned by sd_send_scsi_cmd() 25158 * EFAULT if ddi_copyxxx() fails 25159 * ENXIO if fail ddi_get_soft_state 25160 * EINVAL if data pointer is NULL 25161 */ 25162 25163 static int 25164 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25165 { 25166 struct sd_lun *un; 25167 struct uscsi_cmd *com; 25168 struct cdrom_tochdr toc_header; 25169 struct cdrom_tochdr *hdr = &toc_header; 25170 char cdb[CDB_GROUP1]; 25171 int rval; 25172 caddr_t buffer; 25173 25174 if (data == NULL) { 25175 return (EINVAL); 25176 } 25177 25178 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25179 (un->un_state == SD_STATE_OFFLINE)) { 25180 return (ENXIO); 25181 } 25182 25183 buffer = kmem_zalloc(4, KM_SLEEP); 25184 bzero(cdb, CDB_GROUP1); 25185 cdb[0] = SCMD_READ_TOC; 25186 /* 25187 * Specifying a track number of 0x00 in the READ TOC command indicates 25188 * that the TOC header should be returned 25189 */ 25190 cdb[6] = 0x00; 25191 /* 25192 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25193 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25194 */ 25195 cdb[8] = 0x04; 25196 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25197 com->uscsi_cdb = cdb; 25198 com->uscsi_cdblen = CDB_GROUP1; 25199 com->uscsi_bufaddr = buffer; 25200 com->uscsi_buflen = 0x04; 25201 com->uscsi_timeout = 300; 25202 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25203 25204 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25205 SD_PATH_STANDARD); 25206 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25207 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25208 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25209 } else { 25210 hdr->cdth_trk0 = buffer[2]; 25211 hdr->cdth_trk1 = buffer[3]; 25212 } 25213 kmem_free(buffer, 4); 25214 kmem_free(com, sizeof (*com)); 25215 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25216 return (EFAULT); 25217 } 25218 return (rval); 25219 } 25220 25221 25222 /* 25223 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25224 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25225 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25226 * digital audio and extended architecture digital audio. These modes are 25227 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25228 * MMC specs. 25229 * 25230 * In addition to support for the various data formats these routines also 25231 * include support for devices that implement only the direct access READ 25232 * commands (0x08, 0x28), devices that implement the READ_CD commands 25233 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25234 * READ CDXA commands (0xD8, 0xDB) 25235 */ 25236 25237 /* 25238 * Function: sr_read_mode1() 25239 * 25240 * Description: This routine is the driver entry point for handling CD-ROM 25241 * ioctl read mode1 requests (CDROMREADMODE1). 25242 * 25243 * Arguments: dev - the device 'dev_t' 25244 * data - pointer to user provided cd read structure specifying 25245 * the lba buffer address and length. 25246 * flag - this argument is a pass through to ddi_copyxxx() 25247 * directly from the mode argument of ioctl(). 25248 * 25249 * Return Code: the code returned by sd_send_scsi_cmd() 25250 * EFAULT if ddi_copyxxx() fails 25251 * ENXIO if fail ddi_get_soft_state 25252 * EINVAL if data pointer is NULL 25253 */ 25254 25255 static int 25256 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25257 { 25258 struct sd_lun *un; 25259 struct cdrom_read mode1_struct; 25260 struct cdrom_read *mode1 = &mode1_struct; 25261 int rval; 25262 #ifdef _MULTI_DATAMODEL 25263 /* To support ILP32 applications in an LP64 world */ 25264 struct cdrom_read32 cdrom_read32; 25265 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25266 #endif /* _MULTI_DATAMODEL */ 25267 25268 if (data == NULL) { 25269 return (EINVAL); 25270 } 25271 25272 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25273 (un->un_state == SD_STATE_OFFLINE)) { 25274 return (ENXIO); 25275 } 25276 25277 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25278 "sd_read_mode1: entry: un:0x%p\n", un); 25279 25280 #ifdef _MULTI_DATAMODEL 25281 switch (ddi_model_convert_from(flag & FMODELS)) { 25282 case DDI_MODEL_ILP32: 25283 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25284 return (EFAULT); 25285 } 25286 /* Convert the ILP32 uscsi data from the application to LP64 */ 25287 cdrom_read32tocdrom_read(cdrd32, mode1); 25288 break; 25289 case DDI_MODEL_NONE: 25290 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25291 return (EFAULT); 25292 } 25293 } 25294 #else /* ! _MULTI_DATAMODEL */ 25295 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25296 return (EFAULT); 25297 } 25298 #endif /* _MULTI_DATAMODEL */ 25299 25300 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25301 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25302 25303 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25304 "sd_read_mode1: exit: un:0x%p\n", un); 25305 25306 return (rval); 25307 } 25308 25309 25310 /* 25311 * Function: sr_read_cd_mode2() 25312 * 25313 * Description: This routine is the driver entry point for handling CD-ROM 25314 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25315 * support the READ CD (0xBE) command or the 1st generation 25316 * READ CD (0xD4) command. 25317 * 25318 * Arguments: dev - the device 'dev_t' 25319 * data - pointer to user provided cd read structure specifying 25320 * the lba buffer address and length. 25321 * flag - this argument is a pass through to ddi_copyxxx() 25322 * directly from the mode argument of ioctl(). 25323 * 25324 * Return Code: the code returned by sd_send_scsi_cmd() 25325 * EFAULT if ddi_copyxxx() fails 25326 * ENXIO if fail ddi_get_soft_state 25327 * EINVAL if data pointer is NULL 25328 */ 25329 25330 static int 25331 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25332 { 25333 struct sd_lun *un; 25334 struct uscsi_cmd *com; 25335 struct cdrom_read mode2_struct; 25336 struct cdrom_read *mode2 = &mode2_struct; 25337 uchar_t cdb[CDB_GROUP5]; 25338 int nblocks; 25339 int rval; 25340 #ifdef _MULTI_DATAMODEL 25341 /* To support ILP32 applications in an LP64 world */ 25342 struct cdrom_read32 cdrom_read32; 25343 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25344 #endif /* _MULTI_DATAMODEL */ 25345 25346 if (data == NULL) { 25347 return (EINVAL); 25348 } 25349 25350 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25351 (un->un_state == SD_STATE_OFFLINE)) { 25352 return (ENXIO); 25353 } 25354 25355 #ifdef _MULTI_DATAMODEL 25356 switch (ddi_model_convert_from(flag & FMODELS)) { 25357 case DDI_MODEL_ILP32: 25358 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25359 return (EFAULT); 25360 } 25361 /* Convert the ILP32 uscsi data from the application to LP64 */ 25362 cdrom_read32tocdrom_read(cdrd32, mode2); 25363 break; 25364 case DDI_MODEL_NONE: 25365 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25366 return (EFAULT); 25367 } 25368 break; 25369 } 25370 25371 #else /* ! _MULTI_DATAMODEL */ 25372 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25373 return (EFAULT); 25374 } 25375 #endif /* _MULTI_DATAMODEL */ 25376 25377 bzero(cdb, sizeof (cdb)); 25378 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25379 /* Read command supported by 1st generation atapi drives */ 25380 cdb[0] = SCMD_READ_CDD4; 25381 } else { 25382 /* Universal CD Access Command */ 25383 cdb[0] = SCMD_READ_CD; 25384 } 25385 25386 /* 25387 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25388 */ 25389 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25390 25391 /* set the start address */ 25392 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25393 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25394 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25395 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25396 25397 /* set the transfer length */ 25398 nblocks = mode2->cdread_buflen / 2336; 25399 cdb[6] = (uchar_t)(nblocks >> 16); 25400 cdb[7] = (uchar_t)(nblocks >> 8); 25401 cdb[8] = (uchar_t)nblocks; 25402 25403 /* set the filter bits */ 25404 cdb[9] = CDROM_READ_CD_USERDATA; 25405 25406 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25407 com->uscsi_cdb = (caddr_t)cdb; 25408 com->uscsi_cdblen = sizeof (cdb); 25409 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25410 com->uscsi_buflen = mode2->cdread_buflen; 25411 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25412 25413 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25414 SD_PATH_STANDARD); 25415 kmem_free(com, sizeof (*com)); 25416 return (rval); 25417 } 25418 25419 25420 /* 25421 * Function: sr_read_mode2() 25422 * 25423 * Description: This routine is the driver entry point for handling CD-ROM 25424 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25425 * do not support the READ CD (0xBE) command. 25426 * 25427 * Arguments: dev - the device 'dev_t' 25428 * data - pointer to user provided cd read structure specifying 25429 * the lba buffer address and length. 25430 * flag - this argument is a pass through to ddi_copyxxx() 25431 * directly from the mode argument of ioctl(). 25432 * 25433 * Return Code: the code returned by sd_send_scsi_cmd() 25434 * EFAULT if ddi_copyxxx() fails 25435 * ENXIO if fail ddi_get_soft_state 25436 * EINVAL if data pointer is NULL 25437 * EIO if fail to reset block size 25438 * EAGAIN if commands are in progress in the driver 25439 */ 25440 25441 static int 25442 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25443 { 25444 struct sd_lun *un; 25445 struct cdrom_read mode2_struct; 25446 struct cdrom_read *mode2 = &mode2_struct; 25447 int rval; 25448 uint32_t restore_blksize; 25449 struct uscsi_cmd *com; 25450 uchar_t cdb[CDB_GROUP0]; 25451 int nblocks; 25452 25453 #ifdef _MULTI_DATAMODEL 25454 /* To support ILP32 applications in an LP64 world */ 25455 struct cdrom_read32 cdrom_read32; 25456 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25457 #endif /* _MULTI_DATAMODEL */ 25458 25459 if (data == NULL) { 25460 return (EINVAL); 25461 } 25462 25463 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25464 (un->un_state == SD_STATE_OFFLINE)) { 25465 return (ENXIO); 25466 } 25467 25468 /* 25469 * Because this routine will update the device and driver block size 25470 * being used we want to make sure there are no commands in progress. 25471 * If commands are in progress the user will have to try again. 25472 * 25473 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25474 * in sdioctl to protect commands from sdioctl through to the top of 25475 * sd_uscsi_strategy. See sdioctl for details. 25476 */ 25477 mutex_enter(SD_MUTEX(un)); 25478 if (un->un_ncmds_in_driver != 1) { 25479 mutex_exit(SD_MUTEX(un)); 25480 return (EAGAIN); 25481 } 25482 mutex_exit(SD_MUTEX(un)); 25483 25484 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25485 "sd_read_mode2: entry: un:0x%p\n", un); 25486 25487 #ifdef _MULTI_DATAMODEL 25488 switch (ddi_model_convert_from(flag & FMODELS)) { 25489 case DDI_MODEL_ILP32: 25490 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25491 return (EFAULT); 25492 } 25493 /* Convert the ILP32 uscsi data from the application to LP64 */ 25494 cdrom_read32tocdrom_read(cdrd32, mode2); 25495 break; 25496 case DDI_MODEL_NONE: 25497 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25498 return (EFAULT); 25499 } 25500 break; 25501 } 25502 #else /* ! _MULTI_DATAMODEL */ 25503 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25504 return (EFAULT); 25505 } 25506 #endif /* _MULTI_DATAMODEL */ 25507 25508 /* Store the current target block size for restoration later */ 25509 restore_blksize = un->un_tgt_blocksize; 25510 25511 /* Change the device and soft state target block size to 2336 */ 25512 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25513 rval = EIO; 25514 goto done; 25515 } 25516 25517 25518 bzero(cdb, sizeof (cdb)); 25519 25520 /* set READ operation */ 25521 cdb[0] = SCMD_READ; 25522 25523 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25524 mode2->cdread_lba >>= 2; 25525 25526 /* set the start address */ 25527 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25528 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25529 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25530 25531 /* set the transfer length */ 25532 nblocks = mode2->cdread_buflen / 2336; 25533 cdb[4] = (uchar_t)nblocks & 0xFF; 25534 25535 /* build command */ 25536 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25537 com->uscsi_cdb = (caddr_t)cdb; 25538 com->uscsi_cdblen = sizeof (cdb); 25539 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25540 com->uscsi_buflen = mode2->cdread_buflen; 25541 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25542 25543 /* 25544 * Issue SCSI command with user space address for read buffer. 25545 * 25546 * This sends the command through main channel in the driver. 25547 * 25548 * Since this is accessed via an IOCTL call, we go through the 25549 * standard path, so that if the device was powered down, then 25550 * it would be 'awakened' to handle the command. 25551 */ 25552 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25553 SD_PATH_STANDARD); 25554 25555 kmem_free(com, sizeof (*com)); 25556 25557 /* Restore the device and soft state target block size */ 25558 if (sr_sector_mode(dev, restore_blksize) != 0) { 25559 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25560 "can't do switch back to mode 1\n"); 25561 /* 25562 * If sd_send_scsi_READ succeeded we still need to report 25563 * an error because we failed to reset the block size 25564 */ 25565 if (rval == 0) { 25566 rval = EIO; 25567 } 25568 } 25569 25570 done: 25571 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25572 "sd_read_mode2: exit: un:0x%p\n", un); 25573 25574 return (rval); 25575 } 25576 25577 25578 /* 25579 * Function: sr_sector_mode() 25580 * 25581 * Description: This utility function is used by sr_read_mode2 to set the target 25582 * block size based on the user specified size. This is a legacy 25583 * implementation based upon a vendor specific mode page 25584 * 25585 * Arguments: dev - the device 'dev_t' 25586 * data - flag indicating if block size is being set to 2336 or 25587 * 512. 25588 * 25589 * Return Code: the code returned by sd_send_scsi_cmd() 25590 * EFAULT if ddi_copyxxx() fails 25591 * ENXIO if fail ddi_get_soft_state 25592 * EINVAL if data pointer is NULL 25593 */ 25594 25595 static int 25596 sr_sector_mode(dev_t dev, uint32_t blksize) 25597 { 25598 struct sd_lun *un; 25599 uchar_t *sense; 25600 uchar_t *select; 25601 int rval; 25602 25603 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25604 (un->un_state == SD_STATE_OFFLINE)) { 25605 return (ENXIO); 25606 } 25607 25608 sense = kmem_zalloc(20, KM_SLEEP); 25609 25610 /* Note: This is a vendor specific mode page (0x81) */ 25611 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25612 SD_PATH_STANDARD)) != 0) { 25613 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25614 "sr_sector_mode: Mode Sense failed\n"); 25615 kmem_free(sense, 20); 25616 return (rval); 25617 } 25618 select = kmem_zalloc(20, KM_SLEEP); 25619 select[3] = 0x08; 25620 select[10] = ((blksize >> 8) & 0xff); 25621 select[11] = (blksize & 0xff); 25622 select[12] = 0x01; 25623 select[13] = 0x06; 25624 select[14] = sense[14]; 25625 select[15] = sense[15]; 25626 if (blksize == SD_MODE2_BLKSIZE) { 25627 select[14] |= 0x01; 25628 } 25629 25630 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25631 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25632 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25633 "sr_sector_mode: Mode Select failed\n"); 25634 } else { 25635 /* 25636 * Only update the softstate block size if we successfully 25637 * changed the device block mode. 25638 */ 25639 mutex_enter(SD_MUTEX(un)); 25640 sd_update_block_info(un, blksize, 0); 25641 mutex_exit(SD_MUTEX(un)); 25642 } 25643 kmem_free(sense, 20); 25644 kmem_free(select, 20); 25645 return (rval); 25646 } 25647 25648 25649 /* 25650 * Function: sr_read_cdda() 25651 * 25652 * Description: This routine is the driver entry point for handling CD-ROM 25653 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25654 * the target supports CDDA these requests are handled via a vendor 25655 * specific command (0xD8) If the target does not support CDDA 25656 * these requests are handled via the READ CD command (0xBE). 25657 * 25658 * Arguments: dev - the device 'dev_t' 25659 * data - pointer to user provided CD-DA structure specifying 25660 * the track starting address, transfer length, and 25661 * subcode options. 25662 * flag - this argument is a pass through to ddi_copyxxx() 25663 * directly from the mode argument of ioctl(). 25664 * 25665 * Return Code: the code returned by sd_send_scsi_cmd() 25666 * EFAULT if ddi_copyxxx() fails 25667 * ENXIO if fail ddi_get_soft_state 25668 * EINVAL if invalid arguments are provided 25669 * ENOTTY 25670 */ 25671 25672 static int 25673 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25674 { 25675 struct sd_lun *un; 25676 struct uscsi_cmd *com; 25677 struct cdrom_cdda *cdda; 25678 int rval; 25679 size_t buflen; 25680 char cdb[CDB_GROUP5]; 25681 25682 #ifdef _MULTI_DATAMODEL 25683 /* To support ILP32 applications in an LP64 world */ 25684 struct cdrom_cdda32 cdrom_cdda32; 25685 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25686 #endif /* _MULTI_DATAMODEL */ 25687 25688 if (data == NULL) { 25689 return (EINVAL); 25690 } 25691 25692 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25693 return (ENXIO); 25694 } 25695 25696 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25697 25698 #ifdef _MULTI_DATAMODEL 25699 switch (ddi_model_convert_from(flag & FMODELS)) { 25700 case DDI_MODEL_ILP32: 25701 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25702 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25703 "sr_read_cdda: ddi_copyin Failed\n"); 25704 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25705 return (EFAULT); 25706 } 25707 /* Convert the ILP32 uscsi data from the application to LP64 */ 25708 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25709 break; 25710 case DDI_MODEL_NONE: 25711 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25712 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25713 "sr_read_cdda: ddi_copyin Failed\n"); 25714 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25715 return (EFAULT); 25716 } 25717 break; 25718 } 25719 #else /* ! _MULTI_DATAMODEL */ 25720 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25721 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25722 "sr_read_cdda: ddi_copyin Failed\n"); 25723 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25724 return (EFAULT); 25725 } 25726 #endif /* _MULTI_DATAMODEL */ 25727 25728 /* 25729 * Since MMC-2 expects max 3 bytes for length, check if the 25730 * length input is greater than 3 bytes 25731 */ 25732 if ((cdda->cdda_length & 0xFF000000) != 0) { 25733 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25734 "cdrom transfer length too large: %d (limit %d)\n", 25735 cdda->cdda_length, 0xFFFFFF); 25736 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25737 return (EINVAL); 25738 } 25739 25740 switch (cdda->cdda_subcode) { 25741 case CDROM_DA_NO_SUBCODE: 25742 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25743 break; 25744 case CDROM_DA_SUBQ: 25745 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25746 break; 25747 case CDROM_DA_ALL_SUBCODE: 25748 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25749 break; 25750 case CDROM_DA_SUBCODE_ONLY: 25751 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25752 break; 25753 default: 25754 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25755 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25756 cdda->cdda_subcode); 25757 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25758 return (EINVAL); 25759 } 25760 25761 /* Build and send the command */ 25762 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25763 bzero(cdb, CDB_GROUP5); 25764 25765 if (un->un_f_cfg_cdda == TRUE) { 25766 cdb[0] = (char)SCMD_READ_CD; 25767 cdb[1] = 0x04; 25768 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25769 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25770 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25771 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25772 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25773 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25774 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25775 cdb[9] = 0x10; 25776 switch (cdda->cdda_subcode) { 25777 case CDROM_DA_NO_SUBCODE : 25778 cdb[10] = 0x0; 25779 break; 25780 case CDROM_DA_SUBQ : 25781 cdb[10] = 0x2; 25782 break; 25783 case CDROM_DA_ALL_SUBCODE : 25784 cdb[10] = 0x1; 25785 break; 25786 case CDROM_DA_SUBCODE_ONLY : 25787 /* FALLTHROUGH */ 25788 default : 25789 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25790 kmem_free(com, sizeof (*com)); 25791 return (ENOTTY); 25792 } 25793 } else { 25794 cdb[0] = (char)SCMD_READ_CDDA; 25795 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25796 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25797 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25798 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25799 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25800 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25801 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25802 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25803 cdb[10] = cdda->cdda_subcode; 25804 } 25805 25806 com->uscsi_cdb = cdb; 25807 com->uscsi_cdblen = CDB_GROUP5; 25808 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25809 com->uscsi_buflen = buflen; 25810 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25811 25812 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25813 SD_PATH_STANDARD); 25814 25815 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25816 kmem_free(com, sizeof (*com)); 25817 return (rval); 25818 } 25819 25820 25821 /* 25822 * Function: sr_read_cdxa() 25823 * 25824 * Description: This routine is the driver entry point for handling CD-ROM 25825 * ioctl requests to return CD-XA (Extended Architecture) data. 25826 * (CDROMCDXA). 25827 * 25828 * Arguments: dev - the device 'dev_t' 25829 * data - pointer to user provided CD-XA structure specifying 25830 * the data starting address, transfer length, and format 25831 * flag - this argument is a pass through to ddi_copyxxx() 25832 * directly from the mode argument of ioctl(). 25833 * 25834 * Return Code: the code returned by sd_send_scsi_cmd() 25835 * EFAULT if ddi_copyxxx() fails 25836 * ENXIO if fail ddi_get_soft_state 25837 * EINVAL if data pointer is NULL 25838 */ 25839 25840 static int 25841 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25842 { 25843 struct sd_lun *un; 25844 struct uscsi_cmd *com; 25845 struct cdrom_cdxa *cdxa; 25846 int rval; 25847 size_t buflen; 25848 char cdb[CDB_GROUP5]; 25849 uchar_t read_flags; 25850 25851 #ifdef _MULTI_DATAMODEL 25852 /* To support ILP32 applications in an LP64 world */ 25853 struct cdrom_cdxa32 cdrom_cdxa32; 25854 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25855 #endif /* _MULTI_DATAMODEL */ 25856 25857 if (data == NULL) { 25858 return (EINVAL); 25859 } 25860 25861 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25862 return (ENXIO); 25863 } 25864 25865 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25866 25867 #ifdef _MULTI_DATAMODEL 25868 switch (ddi_model_convert_from(flag & FMODELS)) { 25869 case DDI_MODEL_ILP32: 25870 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25871 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25872 return (EFAULT); 25873 } 25874 /* 25875 * Convert the ILP32 uscsi data from the 25876 * application to LP64 for internal use. 25877 */ 25878 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25879 break; 25880 case DDI_MODEL_NONE: 25881 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25882 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25883 return (EFAULT); 25884 } 25885 break; 25886 } 25887 #else /* ! _MULTI_DATAMODEL */ 25888 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25889 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25890 return (EFAULT); 25891 } 25892 #endif /* _MULTI_DATAMODEL */ 25893 25894 /* 25895 * Since MMC-2 expects max 3 bytes for length, check if the 25896 * length input is greater than 3 bytes 25897 */ 25898 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25899 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25900 "cdrom transfer length too large: %d (limit %d)\n", 25901 cdxa->cdxa_length, 0xFFFFFF); 25902 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25903 return (EINVAL); 25904 } 25905 25906 switch (cdxa->cdxa_format) { 25907 case CDROM_XA_DATA: 25908 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25909 read_flags = 0x10; 25910 break; 25911 case CDROM_XA_SECTOR_DATA: 25912 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25913 read_flags = 0xf8; 25914 break; 25915 case CDROM_XA_DATA_W_ERROR: 25916 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25917 read_flags = 0xfc; 25918 break; 25919 default: 25920 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25921 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25922 cdxa->cdxa_format); 25923 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25924 return (EINVAL); 25925 } 25926 25927 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25928 bzero(cdb, CDB_GROUP5); 25929 if (un->un_f_mmc_cap == TRUE) { 25930 cdb[0] = (char)SCMD_READ_CD; 25931 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25932 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25933 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25934 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25935 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25936 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25937 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25938 cdb[9] = (char)read_flags; 25939 } else { 25940 /* 25941 * Note: A vendor specific command (0xDB) is being used her to 25942 * request a read of all subcodes. 25943 */ 25944 cdb[0] = (char)SCMD_READ_CDXA; 25945 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25946 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25947 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25948 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25949 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25950 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25951 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25952 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25953 cdb[10] = cdxa->cdxa_format; 25954 } 25955 com->uscsi_cdb = cdb; 25956 com->uscsi_cdblen = CDB_GROUP5; 25957 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25958 com->uscsi_buflen = buflen; 25959 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25960 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25961 SD_PATH_STANDARD); 25962 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25963 kmem_free(com, sizeof (*com)); 25964 return (rval); 25965 } 25966 25967 25968 /* 25969 * Function: sr_eject() 25970 * 25971 * Description: This routine is the driver entry point for handling CD-ROM 25972 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25973 * 25974 * Arguments: dev - the device 'dev_t' 25975 * 25976 * Return Code: the code returned by sd_send_scsi_cmd() 25977 */ 25978 25979 static int 25980 sr_eject(dev_t dev) 25981 { 25982 struct sd_lun *un; 25983 int rval; 25984 25985 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25986 (un->un_state == SD_STATE_OFFLINE)) { 25987 return (ENXIO); 25988 } 25989 25990 /* 25991 * To prevent race conditions with the eject 25992 * command, keep track of an eject command as 25993 * it progresses. If we are already handling 25994 * an eject command in the driver for the given 25995 * unit and another request to eject is received 25996 * immediately return EAGAIN so we don't lose 25997 * the command if the current eject command fails. 25998 */ 25999 mutex_enter(SD_MUTEX(un)); 26000 if (un->un_f_ejecting == TRUE) { 26001 mutex_exit(SD_MUTEX(un)); 26002 return (EAGAIN); 26003 } 26004 un->un_f_ejecting = TRUE; 26005 mutex_exit(SD_MUTEX(un)); 26006 26007 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26008 SD_PATH_STANDARD)) != 0) { 26009 mutex_enter(SD_MUTEX(un)); 26010 un->un_f_ejecting = FALSE; 26011 mutex_exit(SD_MUTEX(un)); 26012 return (rval); 26013 } 26014 26015 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26016 SD_PATH_STANDARD); 26017 26018 if (rval == 0) { 26019 mutex_enter(SD_MUTEX(un)); 26020 sr_ejected(un); 26021 un->un_mediastate = DKIO_EJECTED; 26022 un->un_f_ejecting = FALSE; 26023 cv_broadcast(&un->un_state_cv); 26024 mutex_exit(SD_MUTEX(un)); 26025 } else { 26026 mutex_enter(SD_MUTEX(un)); 26027 un->un_f_ejecting = FALSE; 26028 mutex_exit(SD_MUTEX(un)); 26029 } 26030 return (rval); 26031 } 26032 26033 26034 /* 26035 * Function: sr_ejected() 26036 * 26037 * Description: This routine updates the soft state structure to invalidate the 26038 * geometry information after the media has been ejected or a 26039 * media eject has been detected. 26040 * 26041 * Arguments: un - driver soft state (unit) structure 26042 */ 26043 26044 static void 26045 sr_ejected(struct sd_lun *un) 26046 { 26047 struct sd_errstats *stp; 26048 26049 ASSERT(un != NULL); 26050 ASSERT(mutex_owned(SD_MUTEX(un))); 26051 26052 un->un_f_blockcount_is_valid = FALSE; 26053 un->un_f_tgt_blocksize_is_valid = FALSE; 26054 mutex_exit(SD_MUTEX(un)); 26055 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26056 mutex_enter(SD_MUTEX(un)); 26057 26058 if (un->un_errstats != NULL) { 26059 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26060 stp->sd_capacity.value.ui64 = 0; 26061 } 26062 26063 /* remove "capacity-of-device" properties */ 26064 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26065 "device-nblocks"); 26066 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26067 "device-blksize"); 26068 } 26069 26070 26071 /* 26072 * Function: sr_check_wp() 26073 * 26074 * Description: This routine checks the write protection of a removable 26075 * media disk and hotpluggable devices via the write protect bit of 26076 * the Mode Page Header device specific field. Some devices choke 26077 * on unsupported mode page. In order to workaround this issue, 26078 * this routine has been implemented to use 0x3f mode page(request 26079 * for all pages) for all device types. 26080 * 26081 * Arguments: dev - the device 'dev_t' 26082 * 26083 * Return Code: int indicating if the device is write protected (1) or not (0) 26084 * 26085 * Context: Kernel thread. 26086 * 26087 */ 26088 26089 static int 26090 sr_check_wp(dev_t dev) 26091 { 26092 struct sd_lun *un; 26093 uchar_t device_specific; 26094 uchar_t *sense; 26095 int hdrlen; 26096 int rval = FALSE; 26097 26098 /* 26099 * Note: The return codes for this routine should be reworked to 26100 * properly handle the case of a NULL softstate. 26101 */ 26102 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26103 return (FALSE); 26104 } 26105 26106 if (un->un_f_cfg_is_atapi == TRUE) { 26107 /* 26108 * The mode page contents are not required; set the allocation 26109 * length for the mode page header only 26110 */ 26111 hdrlen = MODE_HEADER_LENGTH_GRP2; 26112 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26113 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26114 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26115 goto err_exit; 26116 device_specific = 26117 ((struct mode_header_grp2 *)sense)->device_specific; 26118 } else { 26119 hdrlen = MODE_HEADER_LENGTH; 26120 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26121 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26122 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26123 goto err_exit; 26124 device_specific = 26125 ((struct mode_header *)sense)->device_specific; 26126 } 26127 26128 /* 26129 * Write protect mode sense failed; not all disks 26130 * understand this query. Return FALSE assuming that 26131 * these devices are not writable. 26132 */ 26133 if (device_specific & WRITE_PROTECT) { 26134 rval = TRUE; 26135 } 26136 26137 err_exit: 26138 kmem_free(sense, hdrlen); 26139 return (rval); 26140 } 26141 26142 /* 26143 * Function: sr_volume_ctrl() 26144 * 26145 * Description: This routine is the driver entry point for handling CD-ROM 26146 * audio output volume ioctl requests. (CDROMVOLCTRL) 26147 * 26148 * Arguments: dev - the device 'dev_t' 26149 * data - pointer to user audio volume control structure 26150 * flag - this argument is a pass through to ddi_copyxxx() 26151 * directly from the mode argument of ioctl(). 26152 * 26153 * Return Code: the code returned by sd_send_scsi_cmd() 26154 * EFAULT if ddi_copyxxx() fails 26155 * ENXIO if fail ddi_get_soft_state 26156 * EINVAL if data pointer is NULL 26157 * 26158 */ 26159 26160 static int 26161 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26162 { 26163 struct sd_lun *un; 26164 struct cdrom_volctrl volume; 26165 struct cdrom_volctrl *vol = &volume; 26166 uchar_t *sense_page; 26167 uchar_t *select_page; 26168 uchar_t *sense; 26169 uchar_t *select; 26170 int sense_buflen; 26171 int select_buflen; 26172 int rval; 26173 26174 if (data == NULL) { 26175 return (EINVAL); 26176 } 26177 26178 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26179 (un->un_state == SD_STATE_OFFLINE)) { 26180 return (ENXIO); 26181 } 26182 26183 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26184 return (EFAULT); 26185 } 26186 26187 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26188 struct mode_header_grp2 *sense_mhp; 26189 struct mode_header_grp2 *select_mhp; 26190 int bd_len; 26191 26192 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26193 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26194 MODEPAGE_AUDIO_CTRL_LEN; 26195 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26196 select = kmem_zalloc(select_buflen, KM_SLEEP); 26197 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26198 sense_buflen, MODEPAGE_AUDIO_CTRL, 26199 SD_PATH_STANDARD)) != 0) { 26200 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26201 "sr_volume_ctrl: Mode Sense Failed\n"); 26202 kmem_free(sense, sense_buflen); 26203 kmem_free(select, select_buflen); 26204 return (rval); 26205 } 26206 sense_mhp = (struct mode_header_grp2 *)sense; 26207 select_mhp = (struct mode_header_grp2 *)select; 26208 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26209 sense_mhp->bdesc_length_lo; 26210 if (bd_len > MODE_BLK_DESC_LENGTH) { 26211 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26212 "sr_volume_ctrl: Mode Sense returned invalid " 26213 "block descriptor length\n"); 26214 kmem_free(sense, sense_buflen); 26215 kmem_free(select, select_buflen); 26216 return (EIO); 26217 } 26218 sense_page = (uchar_t *) 26219 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26220 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26221 select_mhp->length_msb = 0; 26222 select_mhp->length_lsb = 0; 26223 select_mhp->bdesc_length_hi = 0; 26224 select_mhp->bdesc_length_lo = 0; 26225 } else { 26226 struct mode_header *sense_mhp, *select_mhp; 26227 26228 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26229 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26230 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26231 select = kmem_zalloc(select_buflen, KM_SLEEP); 26232 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26233 sense_buflen, MODEPAGE_AUDIO_CTRL, 26234 SD_PATH_STANDARD)) != 0) { 26235 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26236 "sr_volume_ctrl: Mode Sense Failed\n"); 26237 kmem_free(sense, sense_buflen); 26238 kmem_free(select, select_buflen); 26239 return (rval); 26240 } 26241 sense_mhp = (struct mode_header *)sense; 26242 select_mhp = (struct mode_header *)select; 26243 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26244 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26245 "sr_volume_ctrl: Mode Sense returned invalid " 26246 "block descriptor length\n"); 26247 kmem_free(sense, sense_buflen); 26248 kmem_free(select, select_buflen); 26249 return (EIO); 26250 } 26251 sense_page = (uchar_t *) 26252 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26253 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26254 select_mhp->length = 0; 26255 select_mhp->bdesc_length = 0; 26256 } 26257 /* 26258 * Note: An audio control data structure could be created and overlayed 26259 * on the following in place of the array indexing method implemented. 26260 */ 26261 26262 /* Build the select data for the user volume data */ 26263 select_page[0] = MODEPAGE_AUDIO_CTRL; 26264 select_page[1] = 0xE; 26265 /* Set the immediate bit */ 26266 select_page[2] = 0x04; 26267 /* Zero out reserved fields */ 26268 select_page[3] = 0x00; 26269 select_page[4] = 0x00; 26270 /* Return sense data for fields not to be modified */ 26271 select_page[5] = sense_page[5]; 26272 select_page[6] = sense_page[6]; 26273 select_page[7] = sense_page[7]; 26274 /* Set the user specified volume levels for channel 0 and 1 */ 26275 select_page[8] = 0x01; 26276 select_page[9] = vol->channel0; 26277 select_page[10] = 0x02; 26278 select_page[11] = vol->channel1; 26279 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26280 select_page[12] = sense_page[12]; 26281 select_page[13] = sense_page[13]; 26282 select_page[14] = sense_page[14]; 26283 select_page[15] = sense_page[15]; 26284 26285 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26286 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26287 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26288 } else { 26289 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26290 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26291 } 26292 26293 kmem_free(sense, sense_buflen); 26294 kmem_free(select, select_buflen); 26295 return (rval); 26296 } 26297 26298 26299 /* 26300 * Function: sr_read_sony_session_offset() 26301 * 26302 * Description: This routine is the driver entry point for handling CD-ROM 26303 * ioctl requests for session offset information. (CDROMREADOFFSET) 26304 * The address of the first track in the last session of a 26305 * multi-session CD-ROM is returned 26306 * 26307 * Note: This routine uses a vendor specific key value in the 26308 * command control field without implementing any vendor check here 26309 * or in the ioctl routine. 26310 * 26311 * Arguments: dev - the device 'dev_t' 26312 * data - pointer to an int to hold the requested address 26313 * flag - this argument is a pass through to ddi_copyxxx() 26314 * directly from the mode argument of ioctl(). 26315 * 26316 * Return Code: the code returned by sd_send_scsi_cmd() 26317 * EFAULT if ddi_copyxxx() fails 26318 * ENXIO if fail ddi_get_soft_state 26319 * EINVAL if data pointer is NULL 26320 */ 26321 26322 static int 26323 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26324 { 26325 struct sd_lun *un; 26326 struct uscsi_cmd *com; 26327 caddr_t buffer; 26328 char cdb[CDB_GROUP1]; 26329 int session_offset = 0; 26330 int rval; 26331 26332 if (data == NULL) { 26333 return (EINVAL); 26334 } 26335 26336 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26337 (un->un_state == SD_STATE_OFFLINE)) { 26338 return (ENXIO); 26339 } 26340 26341 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26342 bzero(cdb, CDB_GROUP1); 26343 cdb[0] = SCMD_READ_TOC; 26344 /* 26345 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26346 * (4 byte TOC response header + 8 byte response data) 26347 */ 26348 cdb[8] = SONY_SESSION_OFFSET_LEN; 26349 /* Byte 9 is the control byte. A vendor specific value is used */ 26350 cdb[9] = SONY_SESSION_OFFSET_KEY; 26351 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26352 com->uscsi_cdb = cdb; 26353 com->uscsi_cdblen = CDB_GROUP1; 26354 com->uscsi_bufaddr = buffer; 26355 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26356 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26357 26358 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26359 SD_PATH_STANDARD); 26360 if (rval != 0) { 26361 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26362 kmem_free(com, sizeof (*com)); 26363 return (rval); 26364 } 26365 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26366 session_offset = 26367 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26368 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26369 /* 26370 * Offset returned offset in current lbasize block's. Convert to 26371 * 2k block's to return to the user 26372 */ 26373 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26374 session_offset >>= 2; 26375 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26376 session_offset >>= 1; 26377 } 26378 } 26379 26380 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26381 rval = EFAULT; 26382 } 26383 26384 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26385 kmem_free(com, sizeof (*com)); 26386 return (rval); 26387 } 26388 26389 26390 /* 26391 * Function: sd_wm_cache_constructor() 26392 * 26393 * Description: Cache Constructor for the wmap cache for the read/modify/write 26394 * devices. 26395 * 26396 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26397 * un - sd_lun structure for the device. 26398 * flag - the km flags passed to constructor 26399 * 26400 * Return Code: 0 on success. 26401 * -1 on failure. 26402 */ 26403 26404 /*ARGSUSED*/ 26405 static int 26406 sd_wm_cache_constructor(void *wm, void *un, int flags) 26407 { 26408 bzero(wm, sizeof (struct sd_w_map)); 26409 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26410 return (0); 26411 } 26412 26413 26414 /* 26415 * Function: sd_wm_cache_destructor() 26416 * 26417 * Description: Cache destructor for the wmap cache for the read/modify/write 26418 * devices. 26419 * 26420 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26421 * un - sd_lun structure for the device. 26422 */ 26423 /*ARGSUSED*/ 26424 static void 26425 sd_wm_cache_destructor(void *wm, void *un) 26426 { 26427 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26428 } 26429 26430 26431 /* 26432 * Function: sd_range_lock() 26433 * 26434 * Description: Lock the range of blocks specified as parameter to ensure 26435 * that read, modify write is atomic and no other i/o writes 26436 * to the same location. The range is specified in terms 26437 * of start and end blocks. Block numbers are the actual 26438 * media block numbers and not system. 26439 * 26440 * Arguments: un - sd_lun structure for the device. 26441 * startb - The starting block number 26442 * endb - The end block number 26443 * typ - type of i/o - simple/read_modify_write 26444 * 26445 * Return Code: wm - pointer to the wmap structure. 26446 * 26447 * Context: This routine can sleep. 26448 */ 26449 26450 static struct sd_w_map * 26451 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26452 { 26453 struct sd_w_map *wmp = NULL; 26454 struct sd_w_map *sl_wmp = NULL; 26455 struct sd_w_map *tmp_wmp; 26456 wm_state state = SD_WM_CHK_LIST; 26457 26458 26459 ASSERT(un != NULL); 26460 ASSERT(!mutex_owned(SD_MUTEX(un))); 26461 26462 mutex_enter(SD_MUTEX(un)); 26463 26464 while (state != SD_WM_DONE) { 26465 26466 switch (state) { 26467 case SD_WM_CHK_LIST: 26468 /* 26469 * This is the starting state. Check the wmap list 26470 * to see if the range is currently available. 26471 */ 26472 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26473 /* 26474 * If this is a simple write and no rmw 26475 * i/o is pending then try to lock the 26476 * range as the range should be available. 26477 */ 26478 state = SD_WM_LOCK_RANGE; 26479 } else { 26480 tmp_wmp = sd_get_range(un, startb, endb); 26481 if (tmp_wmp != NULL) { 26482 if ((wmp != NULL) && ONLIST(un, wmp)) { 26483 /* 26484 * Should not keep onlist wmps 26485 * while waiting this macro 26486 * will also do wmp = NULL; 26487 */ 26488 FREE_ONLIST_WMAP(un, wmp); 26489 } 26490 /* 26491 * sl_wmp is the wmap on which wait 26492 * is done, since the tmp_wmp points 26493 * to the inuse wmap, set sl_wmp to 26494 * tmp_wmp and change the state to sleep 26495 */ 26496 sl_wmp = tmp_wmp; 26497 state = SD_WM_WAIT_MAP; 26498 } else { 26499 state = SD_WM_LOCK_RANGE; 26500 } 26501 26502 } 26503 break; 26504 26505 case SD_WM_LOCK_RANGE: 26506 ASSERT(un->un_wm_cache); 26507 /* 26508 * The range need to be locked, try to get a wmap. 26509 * First attempt it with NO_SLEEP, want to avoid a sleep 26510 * if possible as we will have to release the sd mutex 26511 * if we have to sleep. 26512 */ 26513 if (wmp == NULL) 26514 wmp = kmem_cache_alloc(un->un_wm_cache, 26515 KM_NOSLEEP); 26516 if (wmp == NULL) { 26517 mutex_exit(SD_MUTEX(un)); 26518 _NOTE(DATA_READABLE_WITHOUT_LOCK 26519 (sd_lun::un_wm_cache)) 26520 wmp = kmem_cache_alloc(un->un_wm_cache, 26521 KM_SLEEP); 26522 mutex_enter(SD_MUTEX(un)); 26523 /* 26524 * we released the mutex so recheck and go to 26525 * check list state. 26526 */ 26527 state = SD_WM_CHK_LIST; 26528 } else { 26529 /* 26530 * We exit out of state machine since we 26531 * have the wmap. Do the housekeeping first. 26532 * place the wmap on the wmap list if it is not 26533 * on it already and then set the state to done. 26534 */ 26535 wmp->wm_start = startb; 26536 wmp->wm_end = endb; 26537 wmp->wm_flags = typ | SD_WM_BUSY; 26538 if (typ & SD_WTYPE_RMW) { 26539 un->un_rmw_count++; 26540 } 26541 /* 26542 * If not already on the list then link 26543 */ 26544 if (!ONLIST(un, wmp)) { 26545 wmp->wm_next = un->un_wm; 26546 wmp->wm_prev = NULL; 26547 if (wmp->wm_next) 26548 wmp->wm_next->wm_prev = wmp; 26549 un->un_wm = wmp; 26550 } 26551 state = SD_WM_DONE; 26552 } 26553 break; 26554 26555 case SD_WM_WAIT_MAP: 26556 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26557 /* 26558 * Wait is done on sl_wmp, which is set in the 26559 * check_list state. 26560 */ 26561 sl_wmp->wm_wanted_count++; 26562 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26563 sl_wmp->wm_wanted_count--; 26564 /* 26565 * We can reuse the memory from the completed sl_wmp 26566 * lock range for our new lock, but only if noone is 26567 * waiting for it. 26568 */ 26569 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26570 if (sl_wmp->wm_wanted_count == 0) { 26571 if (wmp != NULL) 26572 CHK_N_FREEWMP(un, wmp); 26573 wmp = sl_wmp; 26574 } 26575 sl_wmp = NULL; 26576 /* 26577 * After waking up, need to recheck for availability of 26578 * range. 26579 */ 26580 state = SD_WM_CHK_LIST; 26581 break; 26582 26583 default: 26584 panic("sd_range_lock: " 26585 "Unknown state %d in sd_range_lock", state); 26586 /*NOTREACHED*/ 26587 } /* switch(state) */ 26588 26589 } /* while(state != SD_WM_DONE) */ 26590 26591 mutex_exit(SD_MUTEX(un)); 26592 26593 ASSERT(wmp != NULL); 26594 26595 return (wmp); 26596 } 26597 26598 26599 /* 26600 * Function: sd_get_range() 26601 * 26602 * Description: Find if there any overlapping I/O to this one 26603 * Returns the write-map of 1st such I/O, NULL otherwise. 26604 * 26605 * Arguments: un - sd_lun structure for the device. 26606 * startb - The starting block number 26607 * endb - The end block number 26608 * 26609 * Return Code: wm - pointer to the wmap structure. 26610 */ 26611 26612 static struct sd_w_map * 26613 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26614 { 26615 struct sd_w_map *wmp; 26616 26617 ASSERT(un != NULL); 26618 26619 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26620 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26621 continue; 26622 } 26623 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26624 break; 26625 } 26626 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26627 break; 26628 } 26629 } 26630 26631 return (wmp); 26632 } 26633 26634 26635 /* 26636 * Function: sd_free_inlist_wmap() 26637 * 26638 * Description: Unlink and free a write map struct. 26639 * 26640 * Arguments: un - sd_lun structure for the device. 26641 * wmp - sd_w_map which needs to be unlinked. 26642 */ 26643 26644 static void 26645 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26646 { 26647 ASSERT(un != NULL); 26648 26649 if (un->un_wm == wmp) { 26650 un->un_wm = wmp->wm_next; 26651 } else { 26652 wmp->wm_prev->wm_next = wmp->wm_next; 26653 } 26654 26655 if (wmp->wm_next) { 26656 wmp->wm_next->wm_prev = wmp->wm_prev; 26657 } 26658 26659 wmp->wm_next = wmp->wm_prev = NULL; 26660 26661 kmem_cache_free(un->un_wm_cache, wmp); 26662 } 26663 26664 26665 /* 26666 * Function: sd_range_unlock() 26667 * 26668 * Description: Unlock the range locked by wm. 26669 * Free write map if nobody else is waiting on it. 26670 * 26671 * Arguments: un - sd_lun structure for the device. 26672 * wmp - sd_w_map which needs to be unlinked. 26673 */ 26674 26675 static void 26676 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26677 { 26678 ASSERT(un != NULL); 26679 ASSERT(wm != NULL); 26680 ASSERT(!mutex_owned(SD_MUTEX(un))); 26681 26682 mutex_enter(SD_MUTEX(un)); 26683 26684 if (wm->wm_flags & SD_WTYPE_RMW) { 26685 un->un_rmw_count--; 26686 } 26687 26688 if (wm->wm_wanted_count) { 26689 wm->wm_flags = 0; 26690 /* 26691 * Broadcast that the wmap is available now. 26692 */ 26693 cv_broadcast(&wm->wm_avail); 26694 } else { 26695 /* 26696 * If no one is waiting on the map, it should be free'ed. 26697 */ 26698 sd_free_inlist_wmap(un, wm); 26699 } 26700 26701 mutex_exit(SD_MUTEX(un)); 26702 } 26703 26704 26705 /* 26706 * Function: sd_read_modify_write_task 26707 * 26708 * Description: Called from a taskq thread to initiate the write phase of 26709 * a read-modify-write request. This is used for targets where 26710 * un->un_sys_blocksize != un->un_tgt_blocksize. 26711 * 26712 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26713 * 26714 * Context: Called under taskq thread context. 26715 */ 26716 26717 static void 26718 sd_read_modify_write_task(void *arg) 26719 { 26720 struct sd_mapblocksize_info *bsp; 26721 struct buf *bp; 26722 struct sd_xbuf *xp; 26723 struct sd_lun *un; 26724 26725 bp = arg; /* The bp is given in arg */ 26726 ASSERT(bp != NULL); 26727 26728 /* Get the pointer to the layer-private data struct */ 26729 xp = SD_GET_XBUF(bp); 26730 ASSERT(xp != NULL); 26731 bsp = xp->xb_private; 26732 ASSERT(bsp != NULL); 26733 26734 un = SD_GET_UN(bp); 26735 ASSERT(un != NULL); 26736 ASSERT(!mutex_owned(SD_MUTEX(un))); 26737 26738 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26739 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26740 26741 /* 26742 * This is the write phase of a read-modify-write request, called 26743 * under the context of a taskq thread in response to the completion 26744 * of the read portion of the rmw request completing under interrupt 26745 * context. The write request must be sent from here down the iostart 26746 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26747 * we use the layer index saved in the layer-private data area. 26748 */ 26749 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26750 26751 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26752 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26753 } 26754 26755 26756 /* 26757 * Function: sddump_do_read_of_rmw() 26758 * 26759 * Description: This routine will be called from sddump, If sddump is called 26760 * with an I/O which not aligned on device blocksize boundary 26761 * then the write has to be converted to read-modify-write. 26762 * Do the read part here in order to keep sddump simple. 26763 * Note - That the sd_mutex is held across the call to this 26764 * routine. 26765 * 26766 * Arguments: un - sd_lun 26767 * blkno - block number in terms of media block size. 26768 * nblk - number of blocks. 26769 * bpp - pointer to pointer to the buf structure. On return 26770 * from this function, *bpp points to the valid buffer 26771 * to which the write has to be done. 26772 * 26773 * Return Code: 0 for success or errno-type return code 26774 */ 26775 26776 static int 26777 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26778 struct buf **bpp) 26779 { 26780 int err; 26781 int i; 26782 int rval; 26783 struct buf *bp; 26784 struct scsi_pkt *pkt = NULL; 26785 uint32_t target_blocksize; 26786 26787 ASSERT(un != NULL); 26788 ASSERT(mutex_owned(SD_MUTEX(un))); 26789 26790 target_blocksize = un->un_tgt_blocksize; 26791 26792 mutex_exit(SD_MUTEX(un)); 26793 26794 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26795 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26796 if (bp == NULL) { 26797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26798 "no resources for dumping; giving up"); 26799 err = ENOMEM; 26800 goto done; 26801 } 26802 26803 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26804 blkno, nblk); 26805 if (rval != 0) { 26806 scsi_free_consistent_buf(bp); 26807 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26808 "no resources for dumping; giving up"); 26809 err = ENOMEM; 26810 goto done; 26811 } 26812 26813 pkt->pkt_flags |= FLAG_NOINTR; 26814 26815 err = EIO; 26816 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26817 26818 /* 26819 * Scsi_poll returns 0 (success) if the command completes and 26820 * the status block is STATUS_GOOD. We should only check 26821 * errors if this condition is not true. Even then we should 26822 * send our own request sense packet only if we have a check 26823 * condition and auto request sense has not been performed by 26824 * the hba. 26825 */ 26826 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26827 26828 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26829 err = 0; 26830 break; 26831 } 26832 26833 /* 26834 * Check CMD_DEV_GONE 1st, give up if device is gone, 26835 * no need to read RQS data. 26836 */ 26837 if (pkt->pkt_reason == CMD_DEV_GONE) { 26838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26839 "Error while dumping state with rmw..." 26840 "Device is gone\n"); 26841 break; 26842 } 26843 26844 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26845 SD_INFO(SD_LOG_DUMP, un, 26846 "sddump: read failed with CHECK, try # %d\n", i); 26847 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26848 (void) sd_send_polled_RQS(un); 26849 } 26850 26851 continue; 26852 } 26853 26854 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26855 int reset_retval = 0; 26856 26857 SD_INFO(SD_LOG_DUMP, un, 26858 "sddump: read failed with BUSY, try # %d\n", i); 26859 26860 if (un->un_f_lun_reset_enabled == TRUE) { 26861 reset_retval = scsi_reset(SD_ADDRESS(un), 26862 RESET_LUN); 26863 } 26864 if (reset_retval == 0) { 26865 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26866 } 26867 (void) sd_send_polled_RQS(un); 26868 26869 } else { 26870 SD_INFO(SD_LOG_DUMP, un, 26871 "sddump: read failed with 0x%x, try # %d\n", 26872 SD_GET_PKT_STATUS(pkt), i); 26873 mutex_enter(SD_MUTEX(un)); 26874 sd_reset_target(un, pkt); 26875 mutex_exit(SD_MUTEX(un)); 26876 } 26877 26878 /* 26879 * If we are not getting anywhere with lun/target resets, 26880 * let's reset the bus. 26881 */ 26882 if (i > SD_NDUMP_RETRIES/2) { 26883 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26884 (void) sd_send_polled_RQS(un); 26885 } 26886 26887 } 26888 scsi_destroy_pkt(pkt); 26889 26890 if (err != 0) { 26891 scsi_free_consistent_buf(bp); 26892 *bpp = NULL; 26893 } else { 26894 *bpp = bp; 26895 } 26896 26897 done: 26898 mutex_enter(SD_MUTEX(un)); 26899 return (err); 26900 } 26901 26902 26903 /* 26904 * Function: sd_failfast_flushq 26905 * 26906 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26907 * in b_flags and move them onto the failfast queue, then kick 26908 * off a thread to return all bp's on the failfast queue to 26909 * their owners with an error set. 26910 * 26911 * Arguments: un - pointer to the soft state struct for the instance. 26912 * 26913 * Context: may execute in interrupt context. 26914 */ 26915 26916 static void 26917 sd_failfast_flushq(struct sd_lun *un) 26918 { 26919 struct buf *bp; 26920 struct buf *next_waitq_bp; 26921 struct buf *prev_waitq_bp = NULL; 26922 26923 ASSERT(un != NULL); 26924 ASSERT(mutex_owned(SD_MUTEX(un))); 26925 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26926 ASSERT(un->un_failfast_bp == NULL); 26927 26928 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26929 "sd_failfast_flushq: entry: un:0x%p\n", un); 26930 26931 /* 26932 * Check if we should flush all bufs when entering failfast state, or 26933 * just those with B_FAILFAST set. 26934 */ 26935 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26936 /* 26937 * Move *all* bp's on the wait queue to the failfast flush 26938 * queue, including those that do NOT have B_FAILFAST set. 26939 */ 26940 if (un->un_failfast_headp == NULL) { 26941 ASSERT(un->un_failfast_tailp == NULL); 26942 un->un_failfast_headp = un->un_waitq_headp; 26943 } else { 26944 ASSERT(un->un_failfast_tailp != NULL); 26945 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26946 } 26947 26948 un->un_failfast_tailp = un->un_waitq_tailp; 26949 26950 /* update kstat for each bp moved out of the waitq */ 26951 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26952 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26953 } 26954 26955 /* empty the waitq */ 26956 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26957 26958 } else { 26959 /* 26960 * Go thru the wait queue, pick off all entries with 26961 * B_FAILFAST set, and move these onto the failfast queue. 26962 */ 26963 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26964 /* 26965 * Save the pointer to the next bp on the wait queue, 26966 * so we get to it on the next iteration of this loop. 26967 */ 26968 next_waitq_bp = bp->av_forw; 26969 26970 /* 26971 * If this bp from the wait queue does NOT have 26972 * B_FAILFAST set, just move on to the next element 26973 * in the wait queue. Note, this is the only place 26974 * where it is correct to set prev_waitq_bp. 26975 */ 26976 if ((bp->b_flags & B_FAILFAST) == 0) { 26977 prev_waitq_bp = bp; 26978 continue; 26979 } 26980 26981 /* 26982 * Remove the bp from the wait queue. 26983 */ 26984 if (bp == un->un_waitq_headp) { 26985 /* The bp is the first element of the waitq. */ 26986 un->un_waitq_headp = next_waitq_bp; 26987 if (un->un_waitq_headp == NULL) { 26988 /* The wait queue is now empty */ 26989 un->un_waitq_tailp = NULL; 26990 } 26991 } else { 26992 /* 26993 * The bp is either somewhere in the middle 26994 * or at the end of the wait queue. 26995 */ 26996 ASSERT(un->un_waitq_headp != NULL); 26997 ASSERT(prev_waitq_bp != NULL); 26998 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26999 == 0); 27000 if (bp == un->un_waitq_tailp) { 27001 /* bp is the last entry on the waitq. */ 27002 ASSERT(next_waitq_bp == NULL); 27003 un->un_waitq_tailp = prev_waitq_bp; 27004 } 27005 prev_waitq_bp->av_forw = next_waitq_bp; 27006 } 27007 bp->av_forw = NULL; 27008 27009 /* 27010 * update kstat since the bp is moved out of 27011 * the waitq 27012 */ 27013 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27014 27015 /* 27016 * Now put the bp onto the failfast queue. 27017 */ 27018 if (un->un_failfast_headp == NULL) { 27019 /* failfast queue is currently empty */ 27020 ASSERT(un->un_failfast_tailp == NULL); 27021 un->un_failfast_headp = 27022 un->un_failfast_tailp = bp; 27023 } else { 27024 /* Add the bp to the end of the failfast q */ 27025 ASSERT(un->un_failfast_tailp != NULL); 27026 ASSERT(un->un_failfast_tailp->b_flags & 27027 B_FAILFAST); 27028 un->un_failfast_tailp->av_forw = bp; 27029 un->un_failfast_tailp = bp; 27030 } 27031 } 27032 } 27033 27034 /* 27035 * Now return all bp's on the failfast queue to their owners. 27036 */ 27037 while ((bp = un->un_failfast_headp) != NULL) { 27038 27039 un->un_failfast_headp = bp->av_forw; 27040 if (un->un_failfast_headp == NULL) { 27041 un->un_failfast_tailp = NULL; 27042 } 27043 27044 /* 27045 * We want to return the bp with a failure error code, but 27046 * we do not want a call to sd_start_cmds() to occur here, 27047 * so use sd_return_failed_command_no_restart() instead of 27048 * sd_return_failed_command(). 27049 */ 27050 sd_return_failed_command_no_restart(un, bp, EIO); 27051 } 27052 27053 /* Flush the xbuf queues if required. */ 27054 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27055 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27056 } 27057 27058 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27059 "sd_failfast_flushq: exit: un:0x%p\n", un); 27060 } 27061 27062 27063 /* 27064 * Function: sd_failfast_flushq_callback 27065 * 27066 * Description: Return TRUE if the given bp meets the criteria for failfast 27067 * flushing. Used with ddi_xbuf_flushq(9F). 27068 * 27069 * Arguments: bp - ptr to buf struct to be examined. 27070 * 27071 * Context: Any 27072 */ 27073 27074 static int 27075 sd_failfast_flushq_callback(struct buf *bp) 27076 { 27077 /* 27078 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27079 * state is entered; OR (2) the given bp has B_FAILFAST set. 27080 */ 27081 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27082 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27083 } 27084 27085 27086 27087 /* 27088 * Function: sd_setup_next_xfer 27089 * 27090 * Description: Prepare next I/O operation using DMA_PARTIAL 27091 * 27092 */ 27093 27094 static int 27095 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27096 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27097 { 27098 ssize_t num_blks_not_xfered; 27099 daddr_t strt_blk_num; 27100 ssize_t bytes_not_xfered; 27101 int rval; 27102 27103 ASSERT(pkt->pkt_resid == 0); 27104 27105 /* 27106 * Calculate next block number and amount to be transferred. 27107 * 27108 * How much data NOT transfered to the HBA yet. 27109 */ 27110 bytes_not_xfered = xp->xb_dma_resid; 27111 27112 /* 27113 * figure how many blocks NOT transfered to the HBA yet. 27114 */ 27115 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27116 27117 /* 27118 * set starting block number to the end of what WAS transfered. 27119 */ 27120 strt_blk_num = xp->xb_blkno + 27121 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27122 27123 /* 27124 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27125 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27126 * the disk mutex here. 27127 */ 27128 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27129 strt_blk_num, num_blks_not_xfered); 27130 27131 if (rval == 0) { 27132 27133 /* 27134 * Success. 27135 * 27136 * Adjust things if there are still more blocks to be 27137 * transfered. 27138 */ 27139 xp->xb_dma_resid = pkt->pkt_resid; 27140 pkt->pkt_resid = 0; 27141 27142 return (1); 27143 } 27144 27145 /* 27146 * There's really only one possible return value from 27147 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27148 * returns NULL. 27149 */ 27150 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27151 27152 bp->b_resid = bp->b_bcount; 27153 bp->b_flags |= B_ERROR; 27154 27155 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27156 "Error setting up next portion of DMA transfer\n"); 27157 27158 return (0); 27159 } 27160 27161 /* 27162 * Function: sd_panic_for_res_conflict 27163 * 27164 * Description: Call panic with a string formatted with "Reservation Conflict" 27165 * and a human readable identifier indicating the SD instance 27166 * that experienced the reservation conflict. 27167 * 27168 * Arguments: un - pointer to the soft state struct for the instance. 27169 * 27170 * Context: may execute in interrupt context. 27171 */ 27172 27173 #define SD_RESV_CONFLICT_FMT_LEN 40 27174 void 27175 sd_panic_for_res_conflict(struct sd_lun *un) 27176 { 27177 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27178 char path_str[MAXPATHLEN]; 27179 27180 (void) snprintf(panic_str, sizeof (panic_str), 27181 "Reservation Conflict\nDisk: %s", 27182 ddi_pathname(SD_DEVINFO(un), path_str)); 27183 27184 panic(panic_str); 27185 } 27186 27187 /* 27188 * Note: The following sd_faultinjection_ioctl( ) routines implement 27189 * driver support for handling fault injection for error analysis 27190 * causing faults in multiple layers of the driver. 27191 * 27192 */ 27193 27194 #ifdef SD_FAULT_INJECTION 27195 static uint_t sd_fault_injection_on = 0; 27196 27197 /* 27198 * Function: sd_faultinjection_ioctl() 27199 * 27200 * Description: This routine is the driver entry point for handling 27201 * faultinjection ioctls to inject errors into the 27202 * layer model 27203 * 27204 * Arguments: cmd - the ioctl cmd received 27205 * arg - the arguments from user and returns 27206 */ 27207 27208 static void 27209 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27210 27211 uint_t i; 27212 uint_t rval; 27213 27214 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27215 27216 mutex_enter(SD_MUTEX(un)); 27217 27218 switch (cmd) { 27219 case SDIOCRUN: 27220 /* Allow pushed faults to be injected */ 27221 SD_INFO(SD_LOG_SDTEST, un, 27222 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27223 27224 sd_fault_injection_on = 1; 27225 27226 SD_INFO(SD_LOG_IOERR, un, 27227 "sd_faultinjection_ioctl: run finished\n"); 27228 break; 27229 27230 case SDIOCSTART: 27231 /* Start Injection Session */ 27232 SD_INFO(SD_LOG_SDTEST, un, 27233 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27234 27235 sd_fault_injection_on = 0; 27236 un->sd_injection_mask = 0xFFFFFFFF; 27237 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27238 un->sd_fi_fifo_pkt[i] = NULL; 27239 un->sd_fi_fifo_xb[i] = NULL; 27240 un->sd_fi_fifo_un[i] = NULL; 27241 un->sd_fi_fifo_arq[i] = NULL; 27242 } 27243 un->sd_fi_fifo_start = 0; 27244 un->sd_fi_fifo_end = 0; 27245 27246 mutex_enter(&(un->un_fi_mutex)); 27247 un->sd_fi_log[0] = '\0'; 27248 un->sd_fi_buf_len = 0; 27249 mutex_exit(&(un->un_fi_mutex)); 27250 27251 SD_INFO(SD_LOG_IOERR, un, 27252 "sd_faultinjection_ioctl: start finished\n"); 27253 break; 27254 27255 case SDIOCSTOP: 27256 /* Stop Injection Session */ 27257 SD_INFO(SD_LOG_SDTEST, un, 27258 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27259 sd_fault_injection_on = 0; 27260 un->sd_injection_mask = 0x0; 27261 27262 /* Empty stray or unuseds structs from fifo */ 27263 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27264 if (un->sd_fi_fifo_pkt[i] != NULL) { 27265 kmem_free(un->sd_fi_fifo_pkt[i], 27266 sizeof (struct sd_fi_pkt)); 27267 } 27268 if (un->sd_fi_fifo_xb[i] != NULL) { 27269 kmem_free(un->sd_fi_fifo_xb[i], 27270 sizeof (struct sd_fi_xb)); 27271 } 27272 if (un->sd_fi_fifo_un[i] != NULL) { 27273 kmem_free(un->sd_fi_fifo_un[i], 27274 sizeof (struct sd_fi_un)); 27275 } 27276 if (un->sd_fi_fifo_arq[i] != NULL) { 27277 kmem_free(un->sd_fi_fifo_arq[i], 27278 sizeof (struct sd_fi_arq)); 27279 } 27280 un->sd_fi_fifo_pkt[i] = NULL; 27281 un->sd_fi_fifo_un[i] = NULL; 27282 un->sd_fi_fifo_xb[i] = NULL; 27283 un->sd_fi_fifo_arq[i] = NULL; 27284 } 27285 un->sd_fi_fifo_start = 0; 27286 un->sd_fi_fifo_end = 0; 27287 27288 SD_INFO(SD_LOG_IOERR, un, 27289 "sd_faultinjection_ioctl: stop finished\n"); 27290 break; 27291 27292 case SDIOCINSERTPKT: 27293 /* Store a packet struct to be pushed onto fifo */ 27294 SD_INFO(SD_LOG_SDTEST, un, 27295 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27296 27297 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27298 27299 sd_fault_injection_on = 0; 27300 27301 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27302 if (un->sd_fi_fifo_pkt[i] != NULL) { 27303 kmem_free(un->sd_fi_fifo_pkt[i], 27304 sizeof (struct sd_fi_pkt)); 27305 } 27306 if (arg != NULL) { 27307 un->sd_fi_fifo_pkt[i] = 27308 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27309 if (un->sd_fi_fifo_pkt[i] == NULL) { 27310 /* Alloc failed don't store anything */ 27311 break; 27312 } 27313 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27314 sizeof (struct sd_fi_pkt), 0); 27315 if (rval == -1) { 27316 kmem_free(un->sd_fi_fifo_pkt[i], 27317 sizeof (struct sd_fi_pkt)); 27318 un->sd_fi_fifo_pkt[i] = NULL; 27319 } 27320 } else { 27321 SD_INFO(SD_LOG_IOERR, un, 27322 "sd_faultinjection_ioctl: pkt null\n"); 27323 } 27324 break; 27325 27326 case SDIOCINSERTXB: 27327 /* Store a xb struct to be pushed onto fifo */ 27328 SD_INFO(SD_LOG_SDTEST, un, 27329 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27330 27331 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27332 27333 sd_fault_injection_on = 0; 27334 27335 if (un->sd_fi_fifo_xb[i] != NULL) { 27336 kmem_free(un->sd_fi_fifo_xb[i], 27337 sizeof (struct sd_fi_xb)); 27338 un->sd_fi_fifo_xb[i] = NULL; 27339 } 27340 if (arg != NULL) { 27341 un->sd_fi_fifo_xb[i] = 27342 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27343 if (un->sd_fi_fifo_xb[i] == NULL) { 27344 /* Alloc failed don't store anything */ 27345 break; 27346 } 27347 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27348 sizeof (struct sd_fi_xb), 0); 27349 27350 if (rval == -1) { 27351 kmem_free(un->sd_fi_fifo_xb[i], 27352 sizeof (struct sd_fi_xb)); 27353 un->sd_fi_fifo_xb[i] = NULL; 27354 } 27355 } else { 27356 SD_INFO(SD_LOG_IOERR, un, 27357 "sd_faultinjection_ioctl: xb null\n"); 27358 } 27359 break; 27360 27361 case SDIOCINSERTUN: 27362 /* Store a un struct to be pushed onto fifo */ 27363 SD_INFO(SD_LOG_SDTEST, un, 27364 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27365 27366 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27367 27368 sd_fault_injection_on = 0; 27369 27370 if (un->sd_fi_fifo_un[i] != NULL) { 27371 kmem_free(un->sd_fi_fifo_un[i], 27372 sizeof (struct sd_fi_un)); 27373 un->sd_fi_fifo_un[i] = NULL; 27374 } 27375 if (arg != NULL) { 27376 un->sd_fi_fifo_un[i] = 27377 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27378 if (un->sd_fi_fifo_un[i] == NULL) { 27379 /* Alloc failed don't store anything */ 27380 break; 27381 } 27382 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27383 sizeof (struct sd_fi_un), 0); 27384 if (rval == -1) { 27385 kmem_free(un->sd_fi_fifo_un[i], 27386 sizeof (struct sd_fi_un)); 27387 un->sd_fi_fifo_un[i] = NULL; 27388 } 27389 27390 } else { 27391 SD_INFO(SD_LOG_IOERR, un, 27392 "sd_faultinjection_ioctl: un null\n"); 27393 } 27394 27395 break; 27396 27397 case SDIOCINSERTARQ: 27398 /* Store a arq struct to be pushed onto fifo */ 27399 SD_INFO(SD_LOG_SDTEST, un, 27400 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27401 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27402 27403 sd_fault_injection_on = 0; 27404 27405 if (un->sd_fi_fifo_arq[i] != NULL) { 27406 kmem_free(un->sd_fi_fifo_arq[i], 27407 sizeof (struct sd_fi_arq)); 27408 un->sd_fi_fifo_arq[i] = NULL; 27409 } 27410 if (arg != NULL) { 27411 un->sd_fi_fifo_arq[i] = 27412 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27413 if (un->sd_fi_fifo_arq[i] == NULL) { 27414 /* Alloc failed don't store anything */ 27415 break; 27416 } 27417 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27418 sizeof (struct sd_fi_arq), 0); 27419 if (rval == -1) { 27420 kmem_free(un->sd_fi_fifo_arq[i], 27421 sizeof (struct sd_fi_arq)); 27422 un->sd_fi_fifo_arq[i] = NULL; 27423 } 27424 27425 } else { 27426 SD_INFO(SD_LOG_IOERR, un, 27427 "sd_faultinjection_ioctl: arq null\n"); 27428 } 27429 27430 break; 27431 27432 case SDIOCPUSH: 27433 /* Push stored xb, pkt, un, and arq onto fifo */ 27434 sd_fault_injection_on = 0; 27435 27436 if (arg != NULL) { 27437 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27438 if (rval != -1 && 27439 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27440 un->sd_fi_fifo_end += i; 27441 } 27442 } else { 27443 SD_INFO(SD_LOG_IOERR, un, 27444 "sd_faultinjection_ioctl: push arg null\n"); 27445 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27446 un->sd_fi_fifo_end++; 27447 } 27448 } 27449 SD_INFO(SD_LOG_IOERR, un, 27450 "sd_faultinjection_ioctl: push to end=%d\n", 27451 un->sd_fi_fifo_end); 27452 break; 27453 27454 case SDIOCRETRIEVE: 27455 /* Return buffer of log from Injection session */ 27456 SD_INFO(SD_LOG_SDTEST, un, 27457 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27458 27459 sd_fault_injection_on = 0; 27460 27461 mutex_enter(&(un->un_fi_mutex)); 27462 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27463 un->sd_fi_buf_len+1, 0); 27464 mutex_exit(&(un->un_fi_mutex)); 27465 27466 if (rval == -1) { 27467 /* 27468 * arg is possibly invalid setting 27469 * it to NULL for return 27470 */ 27471 arg = NULL; 27472 } 27473 break; 27474 } 27475 27476 mutex_exit(SD_MUTEX(un)); 27477 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27478 " exit\n"); 27479 } 27480 27481 27482 /* 27483 * Function: sd_injection_log() 27484 * 27485 * Description: This routine adds buff to the already existing injection log 27486 * for retrieval via faultinjection_ioctl for use in fault 27487 * detection and recovery 27488 * 27489 * Arguments: buf - the string to add to the log 27490 */ 27491 27492 static void 27493 sd_injection_log(char *buf, struct sd_lun *un) 27494 { 27495 uint_t len; 27496 27497 ASSERT(un != NULL); 27498 ASSERT(buf != NULL); 27499 27500 mutex_enter(&(un->un_fi_mutex)); 27501 27502 len = min(strlen(buf), 255); 27503 /* Add logged value to Injection log to be returned later */ 27504 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27505 uint_t offset = strlen((char *)un->sd_fi_log); 27506 char *destp = (char *)un->sd_fi_log + offset; 27507 int i; 27508 for (i = 0; i < len; i++) { 27509 *destp++ = *buf++; 27510 } 27511 un->sd_fi_buf_len += len; 27512 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27513 } 27514 27515 mutex_exit(&(un->un_fi_mutex)); 27516 } 27517 27518 27519 /* 27520 * Function: sd_faultinjection() 27521 * 27522 * Description: This routine takes the pkt and changes its 27523 * content based on error injection scenerio. 27524 * 27525 * Arguments: pktp - packet to be changed 27526 */ 27527 27528 static void 27529 sd_faultinjection(struct scsi_pkt *pktp) 27530 { 27531 uint_t i; 27532 struct sd_fi_pkt *fi_pkt; 27533 struct sd_fi_xb *fi_xb; 27534 struct sd_fi_un *fi_un; 27535 struct sd_fi_arq *fi_arq; 27536 struct buf *bp; 27537 struct sd_xbuf *xb; 27538 struct sd_lun *un; 27539 27540 ASSERT(pktp != NULL); 27541 27542 /* pull bp xb and un from pktp */ 27543 bp = (struct buf *)pktp->pkt_private; 27544 xb = SD_GET_XBUF(bp); 27545 un = SD_GET_UN(bp); 27546 27547 ASSERT(un != NULL); 27548 27549 mutex_enter(SD_MUTEX(un)); 27550 27551 SD_TRACE(SD_LOG_SDTEST, un, 27552 "sd_faultinjection: entry Injection from sdintr\n"); 27553 27554 /* if injection is off return */ 27555 if (sd_fault_injection_on == 0 || 27556 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27557 mutex_exit(SD_MUTEX(un)); 27558 return; 27559 } 27560 27561 27562 /* take next set off fifo */ 27563 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27564 27565 fi_pkt = un->sd_fi_fifo_pkt[i]; 27566 fi_xb = un->sd_fi_fifo_xb[i]; 27567 fi_un = un->sd_fi_fifo_un[i]; 27568 fi_arq = un->sd_fi_fifo_arq[i]; 27569 27570 27571 /* set variables accordingly */ 27572 /* set pkt if it was on fifo */ 27573 if (fi_pkt != NULL) { 27574 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27575 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27576 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27577 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27578 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27579 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27580 27581 } 27582 27583 /* set xb if it was on fifo */ 27584 if (fi_xb != NULL) { 27585 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27586 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27587 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27588 SD_CONDSET(xb, xb, xb_victim_retry_count, 27589 "xb_victim_retry_count"); 27590 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27591 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27592 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27593 27594 /* copy in block data from sense */ 27595 if (fi_xb->xb_sense_data[0] != -1) { 27596 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27597 SENSE_LENGTH); 27598 } 27599 27600 /* copy in extended sense codes */ 27601 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27602 "es_code"); 27603 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27604 "es_key"); 27605 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27606 "es_add_code"); 27607 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27608 es_qual_code, "es_qual_code"); 27609 } 27610 27611 /* set un if it was on fifo */ 27612 if (fi_un != NULL) { 27613 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27614 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27615 SD_CONDSET(un, un, un_reset_retry_count, 27616 "un_reset_retry_count"); 27617 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27618 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27619 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27620 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27621 "un_f_allow_bus_device_reset"); 27622 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27623 27624 } 27625 27626 /* copy in auto request sense if it was on fifo */ 27627 if (fi_arq != NULL) { 27628 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27629 } 27630 27631 /* free structs */ 27632 if (un->sd_fi_fifo_pkt[i] != NULL) { 27633 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27634 } 27635 if (un->sd_fi_fifo_xb[i] != NULL) { 27636 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27637 } 27638 if (un->sd_fi_fifo_un[i] != NULL) { 27639 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27640 } 27641 if (un->sd_fi_fifo_arq[i] != NULL) { 27642 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27643 } 27644 27645 /* 27646 * kmem_free does not gurantee to set to NULL 27647 * since we uses these to determine if we set 27648 * values or not lets confirm they are always 27649 * NULL after free 27650 */ 27651 un->sd_fi_fifo_pkt[i] = NULL; 27652 un->sd_fi_fifo_un[i] = NULL; 27653 un->sd_fi_fifo_xb[i] = NULL; 27654 un->sd_fi_fifo_arq[i] = NULL; 27655 27656 un->sd_fi_fifo_start++; 27657 27658 mutex_exit(SD_MUTEX(un)); 27659 27660 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27661 } 27662 27663 #endif /* SD_FAULT_INJECTION */ 27664 27665 /* 27666 * This routine is invoked in sd_unit_attach(). Before calling it, the 27667 * properties in conf file should be processed already, and "hotpluggable" 27668 * property was processed also. 27669 * 27670 * The sd driver distinguishes 3 different type of devices: removable media, 27671 * non-removable media, and hotpluggable. Below the differences are defined: 27672 * 27673 * 1. Device ID 27674 * 27675 * The device ID of a device is used to identify this device. Refer to 27676 * ddi_devid_register(9F). 27677 * 27678 * For a non-removable media disk device which can provide 0x80 or 0x83 27679 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27680 * device ID is created to identify this device. For other non-removable 27681 * media devices, a default device ID is created only if this device has 27682 * at least 2 alter cylinders. Otherwise, this device has no devid. 27683 * 27684 * ------------------------------------------------------- 27685 * removable media hotpluggable | Can Have Device ID 27686 * ------------------------------------------------------- 27687 * false false | Yes 27688 * false true | Yes 27689 * true x | No 27690 * ------------------------------------------------------ 27691 * 27692 * 27693 * 2. SCSI group 4 commands 27694 * 27695 * In SCSI specs, only some commands in group 4 command set can use 27696 * 8-byte addresses that can be used to access >2TB storage spaces. 27697 * Other commands have no such capability. Without supporting group4, 27698 * it is impossible to make full use of storage spaces of a disk with 27699 * capacity larger than 2TB. 27700 * 27701 * ----------------------------------------------- 27702 * removable media hotpluggable LP64 | Group 27703 * ----------------------------------------------- 27704 * false false false | 1 27705 * false false true | 4 27706 * false true false | 1 27707 * false true true | 4 27708 * true x x | 5 27709 * ----------------------------------------------- 27710 * 27711 * 27712 * 3. Check for VTOC Label 27713 * 27714 * If a direct-access disk has no EFI label, sd will check if it has a 27715 * valid VTOC label. Now, sd also does that check for removable media 27716 * and hotpluggable devices. 27717 * 27718 * -------------------------------------------------------------- 27719 * Direct-Access removable media hotpluggable | Check Label 27720 * ------------------------------------------------------------- 27721 * false false false | No 27722 * false false true | No 27723 * false true false | Yes 27724 * false true true | Yes 27725 * true x x | Yes 27726 * -------------------------------------------------------------- 27727 * 27728 * 27729 * 4. Building default VTOC label 27730 * 27731 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27732 * If those devices have no valid VTOC label, sd(7d) will attempt to 27733 * create default VTOC for them. Currently sd creates default VTOC label 27734 * for all devices on x86 platform (VTOC_16), but only for removable 27735 * media devices on SPARC (VTOC_8). 27736 * 27737 * ----------------------------------------------------------- 27738 * removable media hotpluggable platform | Default Label 27739 * ----------------------------------------------------------- 27740 * false false sparc | No 27741 * false true x86 | Yes 27742 * false true sparc | Yes 27743 * true x x | Yes 27744 * ---------------------------------------------------------- 27745 * 27746 * 27747 * 5. Supported blocksizes of target devices 27748 * 27749 * Sd supports non-512-byte blocksize for removable media devices only. 27750 * For other devices, only 512-byte blocksize is supported. This may be 27751 * changed in near future because some RAID devices require non-512-byte 27752 * blocksize 27753 * 27754 * ----------------------------------------------------------- 27755 * removable media hotpluggable | non-512-byte blocksize 27756 * ----------------------------------------------------------- 27757 * false false | No 27758 * false true | No 27759 * true x | Yes 27760 * ----------------------------------------------------------- 27761 * 27762 * 27763 * 6. Automatic mount & unmount 27764 * 27765 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27766 * if a device is removable media device. It return 1 for removable media 27767 * devices, and 0 for others. 27768 * 27769 * The automatic mounting subsystem should distinguish between the types 27770 * of devices and apply automounting policies to each. 27771 * 27772 * 27773 * 7. fdisk partition management 27774 * 27775 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27776 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27777 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27778 * fdisk partitions on both x86 and SPARC platform. 27779 * 27780 * ----------------------------------------------------------- 27781 * platform removable media USB/1394 | fdisk supported 27782 * ----------------------------------------------------------- 27783 * x86 X X | true 27784 * ------------------------------------------------------------ 27785 * sparc X X | false 27786 * ------------------------------------------------------------ 27787 * 27788 * 27789 * 8. MBOOT/MBR 27790 * 27791 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27792 * read/write mboot for removable media devices on sparc platform. 27793 * 27794 * ----------------------------------------------------------- 27795 * platform removable media USB/1394 | mboot supported 27796 * ----------------------------------------------------------- 27797 * x86 X X | true 27798 * ------------------------------------------------------------ 27799 * sparc false false | false 27800 * sparc false true | true 27801 * sparc true false | true 27802 * sparc true true | true 27803 * ------------------------------------------------------------ 27804 * 27805 * 27806 * 9. error handling during opening device 27807 * 27808 * If failed to open a disk device, an errno is returned. For some kinds 27809 * of errors, different errno is returned depending on if this device is 27810 * a removable media device. This brings USB/1394 hard disks in line with 27811 * expected hard disk behavior. It is not expected that this breaks any 27812 * application. 27813 * 27814 * ------------------------------------------------------ 27815 * removable media hotpluggable | errno 27816 * ------------------------------------------------------ 27817 * false false | EIO 27818 * false true | EIO 27819 * true x | ENXIO 27820 * ------------------------------------------------------ 27821 * 27822 * 27823 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27824 * 27825 * These IOCTLs are applicable only to removable media devices. 27826 * 27827 * ----------------------------------------------------------- 27828 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27829 * ----------------------------------------------------------- 27830 * false false | No 27831 * false true | No 27832 * true x | Yes 27833 * ----------------------------------------------------------- 27834 * 27835 * 27836 * 12. Kstats for partitions 27837 * 27838 * sd creates partition kstat for non-removable media devices. USB and 27839 * Firewire hard disks now have partition kstats 27840 * 27841 * ------------------------------------------------------ 27842 * removable media hotpluggable | kstat 27843 * ------------------------------------------------------ 27844 * false false | Yes 27845 * false true | Yes 27846 * true x | No 27847 * ------------------------------------------------------ 27848 * 27849 * 27850 * 13. Removable media & hotpluggable properties 27851 * 27852 * Sd driver creates a "removable-media" property for removable media 27853 * devices. Parent nexus drivers create a "hotpluggable" property if 27854 * it supports hotplugging. 27855 * 27856 * --------------------------------------------------------------------- 27857 * removable media hotpluggable | "removable-media" " hotpluggable" 27858 * --------------------------------------------------------------------- 27859 * false false | No No 27860 * false true | No Yes 27861 * true false | Yes No 27862 * true true | Yes Yes 27863 * --------------------------------------------------------------------- 27864 * 27865 * 27866 * 14. Power Management 27867 * 27868 * sd only power manages removable media devices or devices that support 27869 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27870 * 27871 * A parent nexus that supports hotplugging can also set "pm-capable" 27872 * if the disk can be power managed. 27873 * 27874 * ------------------------------------------------------------ 27875 * removable media hotpluggable pm-capable | power manage 27876 * ------------------------------------------------------------ 27877 * false false false | No 27878 * false false true | Yes 27879 * false true false | No 27880 * false true true | Yes 27881 * true x x | Yes 27882 * ------------------------------------------------------------ 27883 * 27884 * USB and firewire hard disks can now be power managed independently 27885 * of the framebuffer 27886 * 27887 * 27888 * 15. Support for USB disks with capacity larger than 1TB 27889 * 27890 * Currently, sd doesn't permit a fixed disk device with capacity 27891 * larger than 1TB to be used in a 32-bit operating system environment. 27892 * However, sd doesn't do that for removable media devices. Instead, it 27893 * assumes that removable media devices cannot have a capacity larger 27894 * than 1TB. Therefore, using those devices on 32-bit system is partially 27895 * supported, which can cause some unexpected results. 27896 * 27897 * --------------------------------------------------------------------- 27898 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27899 * --------------------------------------------------------------------- 27900 * false false | true | no 27901 * false true | true | no 27902 * true false | true | Yes 27903 * true true | true | Yes 27904 * --------------------------------------------------------------------- 27905 * 27906 * 27907 * 16. Check write-protection at open time 27908 * 27909 * When a removable media device is being opened for writing without NDELAY 27910 * flag, sd will check if this device is writable. If attempting to open 27911 * without NDELAY flag a write-protected device, this operation will abort. 27912 * 27913 * ------------------------------------------------------------ 27914 * removable media USB/1394 | WP Check 27915 * ------------------------------------------------------------ 27916 * false false | No 27917 * false true | No 27918 * true false | Yes 27919 * true true | Yes 27920 * ------------------------------------------------------------ 27921 * 27922 * 27923 * 17. syslog when corrupted VTOC is encountered 27924 * 27925 * Currently, if an invalid VTOC is encountered, sd only print syslog 27926 * for fixed SCSI disks. 27927 * ------------------------------------------------------------ 27928 * removable media USB/1394 | print syslog 27929 * ------------------------------------------------------------ 27930 * false false | Yes 27931 * false true | No 27932 * true false | No 27933 * true true | No 27934 * ------------------------------------------------------------ 27935 */ 27936 static void 27937 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27938 { 27939 int pm_capable_prop; 27940 27941 ASSERT(un->un_sd); 27942 ASSERT(un->un_sd->sd_inq); 27943 27944 /* 27945 * Enable SYNC CACHE support for all devices. 27946 */ 27947 un->un_f_sync_cache_supported = TRUE; 27948 27949 if (un->un_sd->sd_inq->inq_rmb) { 27950 /* 27951 * The media of this device is removable. And for this kind 27952 * of devices, it is possible to change medium after opening 27953 * devices. Thus we should support this operation. 27954 */ 27955 un->un_f_has_removable_media = TRUE; 27956 27957 /* 27958 * support non-512-byte blocksize of removable media devices 27959 */ 27960 un->un_f_non_devbsize_supported = TRUE; 27961 27962 /* 27963 * Assume that all removable media devices support DOOR_LOCK 27964 */ 27965 un->un_f_doorlock_supported = TRUE; 27966 27967 /* 27968 * For a removable media device, it is possible to be opened 27969 * with NDELAY flag when there is no media in drive, in this 27970 * case we don't care if device is writable. But if without 27971 * NDELAY flag, we need to check if media is write-protected. 27972 */ 27973 un->un_f_chk_wp_open = TRUE; 27974 27975 /* 27976 * need to start a SCSI watch thread to monitor media state, 27977 * when media is being inserted or ejected, notify syseventd. 27978 */ 27979 un->un_f_monitor_media_state = TRUE; 27980 27981 /* 27982 * Some devices don't support START_STOP_UNIT command. 27983 * Therefore, we'd better check if a device supports it 27984 * before sending it. 27985 */ 27986 un->un_f_check_start_stop = TRUE; 27987 27988 /* 27989 * support eject media ioctl: 27990 * FDEJECT, DKIOCEJECT, CDROMEJECT 27991 */ 27992 un->un_f_eject_media_supported = TRUE; 27993 27994 /* 27995 * Because many removable-media devices don't support 27996 * LOG_SENSE, we couldn't use this command to check if 27997 * a removable media device support power-management. 27998 * We assume that they support power-management via 27999 * START_STOP_UNIT command and can be spun up and down 28000 * without limitations. 28001 */ 28002 un->un_f_pm_supported = TRUE; 28003 28004 /* 28005 * Need to create a zero length (Boolean) property 28006 * removable-media for the removable media devices. 28007 * Note that the return value of the property is not being 28008 * checked, since if unable to create the property 28009 * then do not want the attach to fail altogether. Consistent 28010 * with other property creation in attach. 28011 */ 28012 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28013 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28014 28015 } else { 28016 /* 28017 * create device ID for device 28018 */ 28019 un->un_f_devid_supported = TRUE; 28020 28021 /* 28022 * Spin up non-removable-media devices once it is attached 28023 */ 28024 un->un_f_attach_spinup = TRUE; 28025 28026 /* 28027 * According to SCSI specification, Sense data has two kinds of 28028 * format: fixed format, and descriptor format. At present, we 28029 * don't support descriptor format sense data for removable 28030 * media. 28031 */ 28032 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28033 un->un_f_descr_format_supported = TRUE; 28034 } 28035 28036 /* 28037 * kstats are created only for non-removable media devices. 28038 * 28039 * Set this in sd.conf to 0 in order to disable kstats. The 28040 * default is 1, so they are enabled by default. 28041 */ 28042 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28043 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28044 "enable-partition-kstats", 1)); 28045 28046 /* 28047 * Check if HBA has set the "pm-capable" property. 28048 * If "pm-capable" exists and is non-zero then we can 28049 * power manage the device without checking the start/stop 28050 * cycle count log sense page. 28051 * 28052 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28053 * then we should not power manage the device. 28054 * 28055 * If "pm-capable" doesn't exist then pm_capable_prop will 28056 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28057 * sd will check the start/stop cycle count log sense page 28058 * and power manage the device if the cycle count limit has 28059 * not been exceeded. 28060 */ 28061 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28062 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28063 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28064 un->un_f_log_sense_supported = TRUE; 28065 } else { 28066 /* 28067 * pm-capable property exists. 28068 * 28069 * Convert "TRUE" values for pm_capable_prop to 28070 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28071 * later. "TRUE" values are any values except 28072 * SD_PM_CAPABLE_FALSE (0) and 28073 * SD_PM_CAPABLE_UNDEFINED (-1) 28074 */ 28075 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28076 un->un_f_log_sense_supported = FALSE; 28077 } else { 28078 un->un_f_pm_supported = TRUE; 28079 } 28080 28081 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28082 "sd_unit_attach: un:0x%p pm-capable " 28083 "property set to %d.\n", un, un->un_f_pm_supported); 28084 } 28085 } 28086 28087 if (un->un_f_is_hotpluggable) { 28088 28089 /* 28090 * Have to watch hotpluggable devices as well, since 28091 * that's the only way for userland applications to 28092 * detect hot removal while device is busy/mounted. 28093 */ 28094 un->un_f_monitor_media_state = TRUE; 28095 28096 un->un_f_check_start_stop = TRUE; 28097 28098 } 28099 } 28100 28101 /* 28102 * sd_tg_rdwr: 28103 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28104 * in sys block size, req_length in bytes. 28105 * 28106 */ 28107 static int 28108 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28109 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28110 { 28111 struct sd_lun *un; 28112 int path_flag = (int)(uintptr_t)tg_cookie; 28113 char *dkl = NULL; 28114 diskaddr_t real_addr = start_block; 28115 diskaddr_t first_byte, end_block; 28116 28117 size_t buffer_size = reqlength; 28118 int rval; 28119 diskaddr_t cap; 28120 uint32_t lbasize; 28121 28122 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28123 if (un == NULL) 28124 return (ENXIO); 28125 28126 if (cmd != TG_READ && cmd != TG_WRITE) 28127 return (EINVAL); 28128 28129 mutex_enter(SD_MUTEX(un)); 28130 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28131 mutex_exit(SD_MUTEX(un)); 28132 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28133 &lbasize, path_flag); 28134 if (rval != 0) 28135 return (rval); 28136 mutex_enter(SD_MUTEX(un)); 28137 sd_update_block_info(un, lbasize, cap); 28138 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28139 mutex_exit(SD_MUTEX(un)); 28140 return (EIO); 28141 } 28142 } 28143 28144 if (NOT_DEVBSIZE(un)) { 28145 /* 28146 * sys_blocksize != tgt_blocksize, need to re-adjust 28147 * blkno and save the index to beginning of dk_label 28148 */ 28149 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28150 real_addr = first_byte / un->un_tgt_blocksize; 28151 28152 end_block = (first_byte + reqlength + 28153 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28154 28155 /* round up buffer size to multiple of target block size */ 28156 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28157 28158 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28159 "label_addr: 0x%x allocation size: 0x%x\n", 28160 real_addr, buffer_size); 28161 28162 if (((first_byte % un->un_tgt_blocksize) != 0) || 28163 (reqlength % un->un_tgt_blocksize) != 0) 28164 /* the request is not aligned */ 28165 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28166 } 28167 28168 /* 28169 * The MMC standard allows READ CAPACITY to be 28170 * inaccurate by a bounded amount (in the interest of 28171 * response latency). As a result, failed READs are 28172 * commonplace (due to the reading of metadata and not 28173 * data). Depending on the per-Vendor/drive Sense data, 28174 * the failed READ can cause many (unnecessary) retries. 28175 */ 28176 28177 if (ISCD(un) && (cmd == TG_READ) && 28178 (un->un_f_blockcount_is_valid == TRUE) && 28179 ((start_block == (un->un_blockcount - 1))|| 28180 (start_block == (un->un_blockcount - 2)))) { 28181 path_flag = SD_PATH_DIRECT_PRIORITY; 28182 } 28183 28184 mutex_exit(SD_MUTEX(un)); 28185 if (cmd == TG_READ) { 28186 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28187 buffer_size, real_addr, path_flag); 28188 if (dkl != NULL) 28189 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28190 real_addr), bufaddr, reqlength); 28191 } else { 28192 if (dkl) { 28193 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28194 real_addr, path_flag); 28195 if (rval) { 28196 kmem_free(dkl, buffer_size); 28197 return (rval); 28198 } 28199 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28200 real_addr), reqlength); 28201 } 28202 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28203 buffer_size, real_addr, path_flag); 28204 } 28205 28206 if (dkl != NULL) 28207 kmem_free(dkl, buffer_size); 28208 28209 return (rval); 28210 } 28211 28212 28213 static int 28214 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28215 { 28216 28217 struct sd_lun *un; 28218 diskaddr_t cap; 28219 uint32_t lbasize; 28220 int path_flag = (int)(uintptr_t)tg_cookie; 28221 int ret = 0; 28222 28223 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28224 if (un == NULL) 28225 return (ENXIO); 28226 28227 switch (cmd) { 28228 case TG_GETPHYGEOM: 28229 case TG_GETVIRTGEOM: 28230 case TG_GETCAPACITY: 28231 case TG_GETBLOCKSIZE: 28232 mutex_enter(SD_MUTEX(un)); 28233 28234 if ((un->un_f_blockcount_is_valid == TRUE) && 28235 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28236 cap = un->un_blockcount; 28237 lbasize = un->un_tgt_blocksize; 28238 mutex_exit(SD_MUTEX(un)); 28239 } else { 28240 mutex_exit(SD_MUTEX(un)); 28241 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28242 &lbasize, path_flag); 28243 if (ret != 0) 28244 return (ret); 28245 mutex_enter(SD_MUTEX(un)); 28246 sd_update_block_info(un, lbasize, cap); 28247 if ((un->un_f_blockcount_is_valid == FALSE) || 28248 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28249 mutex_exit(SD_MUTEX(un)); 28250 return (EIO); 28251 } 28252 mutex_exit(SD_MUTEX(un)); 28253 } 28254 28255 if (cmd == TG_GETCAPACITY) { 28256 *(diskaddr_t *)arg = cap; 28257 return (0); 28258 } 28259 28260 if (cmd == TG_GETBLOCKSIZE) { 28261 *(uint32_t *)arg = lbasize; 28262 return (0); 28263 } 28264 28265 if (cmd == TG_GETPHYGEOM) 28266 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28267 cap, lbasize, path_flag); 28268 else 28269 /* TG_GETVIRTGEOM */ 28270 ret = sd_get_virtual_geometry(un, 28271 (cmlb_geom_t *)arg, cap, lbasize); 28272 28273 return (ret); 28274 28275 case TG_GETATTR: 28276 mutex_enter(SD_MUTEX(un)); 28277 ((tg_attribute_t *)arg)->media_is_writable = 28278 un->un_f_mmc_writable_media; 28279 mutex_exit(SD_MUTEX(un)); 28280 return (0); 28281 default: 28282 return (ENOTTY); 28283 28284 } 28285 28286 } 28287