1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 #include <sys/sysevent/eventdefs.h> 68 #include <sys/sysevent/dev.h> 69 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #endif 212 213 214 #ifdef SDDEBUG 215 int sd_force_pm_supported = 0; 216 #endif /* SDDEBUG */ 217 218 void *sd_state = NULL; 219 int sd_io_time = SD_IO_TIME; 220 int sd_failfast_enable = 1; 221 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 222 int sd_report_pfa = 1; 223 int sd_max_throttle = SD_MAX_THROTTLE; 224 int sd_min_throttle = SD_MIN_THROTTLE; 225 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 226 int sd_qfull_throttle_enable = TRUE; 227 228 int sd_retry_on_reservation_conflict = 1; 229 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 230 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 231 232 static int sd_dtype_optical_bind = -1; 233 234 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 235 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 236 237 /* 238 * Global data for debug logging. To enable debug printing, sd_component_mask 239 * and sd_level_mask should be set to the desired bit patterns as outlined in 240 * sddef.h. 241 */ 242 uint_t sd_component_mask = 0x0; 243 uint_t sd_level_mask = 0x0; 244 struct sd_lun *sd_debug_un = NULL; 245 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 246 247 /* Note: these may go away in the future... */ 248 static uint32_t sd_xbuf_active_limit = 512; 249 static uint32_t sd_xbuf_reserve_limit = 16; 250 251 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 252 253 /* 254 * Timer value used to reset the throttle after it has been reduced 255 * (typically in response to TRAN_BUSY or STATUS_QFULL) 256 */ 257 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 258 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 259 260 /* 261 * Interval value associated with the media change scsi watch. 262 */ 263 static int sd_check_media_time = 3000000; 264 265 /* 266 * Wait value used for in progress operations during a DDI_SUSPEND 267 */ 268 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 269 270 /* 271 * sd_label_mutex protects a static buffer used in the disk label 272 * component of the driver 273 */ 274 static kmutex_t sd_label_mutex; 275 276 /* 277 * sd_detach_mutex protects un_layer_count, un_detach_count, and 278 * un_opens_in_progress in the sd_lun structure. 279 */ 280 static kmutex_t sd_detach_mutex; 281 282 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 283 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 284 285 /* 286 * Global buffer and mutex for debug logging 287 */ 288 static char sd_log_buf[1024]; 289 static kmutex_t sd_log_mutex; 290 291 /* 292 * Structs and globals for recording attached lun information. 293 * This maintains a chain. Each node in the chain represents a SCSI controller. 294 * The structure records the number of luns attached to each target connected 295 * with the controller. 296 * For parallel scsi device only. 297 */ 298 struct sd_scsi_hba_tgt_lun { 299 struct sd_scsi_hba_tgt_lun *next; 300 dev_info_t *pdip; 301 int nlun[NTARGETS_WIDE]; 302 }; 303 304 /* 305 * Flag to indicate the lun is attached or detached 306 */ 307 #define SD_SCSI_LUN_ATTACH 0 308 #define SD_SCSI_LUN_DETACH 1 309 310 static kmutex_t sd_scsi_target_lun_mutex; 311 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 312 313 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 314 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 315 316 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 317 sd_scsi_target_lun_head)) 318 319 /* 320 * "Smart" Probe Caching structs, globals, #defines, etc. 321 * For parallel scsi and non-self-identify device only. 322 */ 323 324 /* 325 * The following resources and routines are implemented to support 326 * "smart" probing, which caches the scsi_probe() results in an array, 327 * in order to help avoid long probe times. 328 */ 329 struct sd_scsi_probe_cache { 330 struct sd_scsi_probe_cache *next; 331 dev_info_t *pdip; 332 int cache[NTARGETS_WIDE]; 333 }; 334 335 static kmutex_t sd_scsi_probe_cache_mutex; 336 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 337 338 /* 339 * Really we only need protection on the head of the linked list, but 340 * better safe than sorry. 341 */ 342 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 343 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 344 345 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 346 sd_scsi_probe_cache_head)) 347 348 349 /* 350 * Vendor specific data name property declarations 351 */ 352 353 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 354 355 static sd_tunables seagate_properties = { 356 SEAGATE_THROTTLE_VALUE, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0, 363 0, 364 0 365 }; 366 367 368 static sd_tunables fujitsu_properties = { 369 FUJITSU_THROTTLE_VALUE, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0, 376 0, 377 0 378 }; 379 380 static sd_tunables ibm_properties = { 381 IBM_THROTTLE_VALUE, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0 390 }; 391 392 static sd_tunables purple_properties = { 393 PURPLE_THROTTLE_VALUE, 394 0, 395 0, 396 PURPLE_BUSY_RETRIES, 397 PURPLE_RESET_RETRY_COUNT, 398 PURPLE_RESERVE_RELEASE_TIME, 399 0, 400 0, 401 0 402 }; 403 404 static sd_tunables sve_properties = { 405 SVE_THROTTLE_VALUE, 406 0, 407 0, 408 SVE_BUSY_RETRIES, 409 SVE_RESET_RETRY_COUNT, 410 SVE_RESERVE_RELEASE_TIME, 411 SVE_MIN_THROTTLE_VALUE, 412 SVE_DISKSORT_DISABLED_FLAG, 413 0 414 }; 415 416 static sd_tunables maserati_properties = { 417 0, 418 0, 419 0, 420 0, 421 0, 422 0, 423 0, 424 MASERATI_DISKSORT_DISABLED_FLAG, 425 MASERATI_LUN_RESET_ENABLED_FLAG 426 }; 427 428 static sd_tunables pirus_properties = { 429 PIRUS_THROTTLE_VALUE, 430 0, 431 PIRUS_NRR_COUNT, 432 PIRUS_BUSY_RETRIES, 433 PIRUS_RESET_RETRY_COUNT, 434 0, 435 PIRUS_MIN_THROTTLE_VALUE, 436 PIRUS_DISKSORT_DISABLED_FLAG, 437 PIRUS_LUN_RESET_ENABLED_FLAG 438 }; 439 440 #endif 441 442 #if (defined(__sparc) && !defined(__fibre)) || \ 443 (defined(__i386) || defined(__amd64)) 444 445 446 static sd_tunables elite_properties = { 447 ELITE_THROTTLE_VALUE, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0, 454 0, 455 0 456 }; 457 458 static sd_tunables st31200n_properties = { 459 ST31200N_THROTTLE_VALUE, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0, 467 0 468 }; 469 470 #endif /* Fibre or not */ 471 472 static sd_tunables lsi_properties_scsi = { 473 LSI_THROTTLE_VALUE, 474 0, 475 LSI_NOTREADY_RETRIES, 476 0, 477 0, 478 0, 479 0, 480 0, 481 0 482 }; 483 484 static sd_tunables symbios_properties = { 485 SYMBIOS_THROTTLE_VALUE, 486 0, 487 SYMBIOS_NOTREADY_RETRIES, 488 0, 489 0, 490 0, 491 0, 492 0, 493 0 494 }; 495 496 static sd_tunables lsi_properties = { 497 0, 498 0, 499 LSI_NOTREADY_RETRIES, 500 0, 501 0, 502 0, 503 0, 504 0, 505 0 506 }; 507 508 static sd_tunables lsi_oem_properties = { 509 0, 510 0, 511 LSI_OEM_NOTREADY_RETRIES, 512 0, 513 0, 514 0, 515 0, 516 0, 517 0, 518 1 519 }; 520 521 522 523 #if (defined(SD_PROP_TST)) 524 525 #define SD_TST_CTYPE_VAL CTYPE_CDROM 526 #define SD_TST_THROTTLE_VAL 16 527 #define SD_TST_NOTREADY_VAL 12 528 #define SD_TST_BUSY_VAL 60 529 #define SD_TST_RST_RETRY_VAL 36 530 #define SD_TST_RSV_REL_TIME 60 531 532 static sd_tunables tst_properties = { 533 SD_TST_THROTTLE_VAL, 534 SD_TST_CTYPE_VAL, 535 SD_TST_NOTREADY_VAL, 536 SD_TST_BUSY_VAL, 537 SD_TST_RST_RETRY_VAL, 538 SD_TST_RSV_REL_TIME, 539 0, 540 0, 541 0 542 }; 543 #endif 544 545 /* This is similar to the ANSI toupper implementation */ 546 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 547 548 /* 549 * Static Driver Configuration Table 550 * 551 * This is the table of disks which need throttle adjustment (or, perhaps 552 * something else as defined by the flags at a future time.) device_id 553 * is a string consisting of concatenated vid (vendor), pid (product/model) 554 * and revision strings as defined in the scsi_inquiry structure. Offsets of 555 * the parts of the string are as defined by the sizes in the scsi_inquiry 556 * structure. Device type is searched as far as the device_id string is 557 * defined. Flags defines which values are to be set in the driver from the 558 * properties list. 559 * 560 * Entries below which begin and end with a "*" are a special case. 561 * These do not have a specific vendor, and the string which follows 562 * can appear anywhere in the 16 byte PID portion of the inquiry data. 563 * 564 * Entries below which begin and end with a " " (blank) are a special 565 * case. The comparison function will treat multiple consecutive blanks 566 * as equivalent to a single blank. For example, this causes a 567 * sd_disk_table entry of " NEC CDROM " to match a device's id string 568 * of "NEC CDROM". 569 * 570 * Note: The MD21 controller type has been obsoleted. 571 * ST318202F is a Legacy device 572 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 573 * made with an FC connection. The entries here are a legacy. 574 */ 575 static sd_disk_config_t sd_disk_table[] = { 576 #if defined(__fibre) || defined(__i386) || defined(__amd64) 577 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 599 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 600 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 603 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 604 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 627 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 628 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 629 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 630 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 631 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 632 { "SUN T3", SD_CONF_BSET_THROTTLE | 633 SD_CONF_BSET_BSY_RETRY_COUNT| 634 SD_CONF_BSET_RST_RETRIES| 635 SD_CONF_BSET_RSV_REL_TIME, 636 &purple_properties }, 637 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 638 SD_CONF_BSET_BSY_RETRY_COUNT| 639 SD_CONF_BSET_RST_RETRIES| 640 SD_CONF_BSET_RSV_REL_TIME| 641 SD_CONF_BSET_MIN_THROTTLE| 642 SD_CONF_BSET_DISKSORT_DISABLED, 643 &sve_properties }, 644 { "SUN T4", SD_CONF_BSET_THROTTLE | 645 SD_CONF_BSET_BSY_RETRY_COUNT| 646 SD_CONF_BSET_RST_RETRIES| 647 SD_CONF_BSET_RSV_REL_TIME, 648 &purple_properties }, 649 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 650 SD_CONF_BSET_LUN_RESET_ENABLED, 651 &maserati_properties }, 652 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 653 SD_CONF_BSET_NRR_COUNT| 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_MIN_THROTTLE| 657 SD_CONF_BSET_DISKSORT_DISABLED| 658 SD_CONF_BSET_LUN_RESET_ENABLED, 659 &pirus_properties }, 660 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 661 SD_CONF_BSET_NRR_COUNT| 662 SD_CONF_BSET_BSY_RETRY_COUNT| 663 SD_CONF_BSET_RST_RETRIES| 664 SD_CONF_BSET_MIN_THROTTLE| 665 SD_CONF_BSET_DISKSORT_DISABLED| 666 SD_CONF_BSET_LUN_RESET_ENABLED, 667 &pirus_properties }, 668 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 669 SD_CONF_BSET_NRR_COUNT| 670 SD_CONF_BSET_BSY_RETRY_COUNT| 671 SD_CONF_BSET_RST_RETRIES| 672 SD_CONF_BSET_MIN_THROTTLE| 673 SD_CONF_BSET_DISKSORT_DISABLED| 674 SD_CONF_BSET_LUN_RESET_ENABLED, 675 &pirus_properties }, 676 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 677 SD_CONF_BSET_NRR_COUNT| 678 SD_CONF_BSET_BSY_RETRY_COUNT| 679 SD_CONF_BSET_RST_RETRIES| 680 SD_CONF_BSET_MIN_THROTTLE| 681 SD_CONF_BSET_DISKSORT_DISABLED| 682 SD_CONF_BSET_LUN_RESET_ENABLED, 683 &pirus_properties }, 684 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 685 SD_CONF_BSET_NRR_COUNT| 686 SD_CONF_BSET_BSY_RETRY_COUNT| 687 SD_CONF_BSET_RST_RETRIES| 688 SD_CONF_BSET_MIN_THROTTLE| 689 SD_CONF_BSET_DISKSORT_DISABLED| 690 SD_CONF_BSET_LUN_RESET_ENABLED, 691 &pirus_properties }, 692 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 693 SD_CONF_BSET_NRR_COUNT| 694 SD_CONF_BSET_BSY_RETRY_COUNT| 695 SD_CONF_BSET_RST_RETRIES| 696 SD_CONF_BSET_MIN_THROTTLE| 697 SD_CONF_BSET_DISKSORT_DISABLED| 698 SD_CONF_BSET_LUN_RESET_ENABLED, 699 &pirus_properties }, 700 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 704 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 705 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 706 #endif /* fibre or NON-sparc platforms */ 707 #if ((defined(__sparc) && !defined(__fibre)) ||\ 708 (defined(__i386) || defined(__amd64))) 709 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 710 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 711 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 712 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 713 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 720 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 721 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 722 &symbios_properties }, 723 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 724 &lsi_properties_scsi }, 725 #if defined(__i386) || defined(__amd64) 726 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 727 | SD_CONF_BSET_READSUB_BCD 728 | SD_CONF_BSET_READ_TOC_ADDR_BCD 729 | SD_CONF_BSET_NO_READ_HEADER 730 | SD_CONF_BSET_READ_CD_XD4), NULL }, 731 732 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 733 | SD_CONF_BSET_READSUB_BCD 734 | SD_CONF_BSET_READ_TOC_ADDR_BCD 735 | SD_CONF_BSET_NO_READ_HEADER 736 | SD_CONF_BSET_READ_CD_XD4), NULL }, 737 #endif /* __i386 || __amd64 */ 738 #endif /* sparc NON-fibre or NON-sparc platforms */ 739 740 #if (defined(SD_PROP_TST)) 741 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 742 | SD_CONF_BSET_CTYPE 743 | SD_CONF_BSET_NRR_COUNT 744 | SD_CONF_BSET_FAB_DEVID 745 | SD_CONF_BSET_NOCACHE 746 | SD_CONF_BSET_BSY_RETRY_COUNT 747 | SD_CONF_BSET_PLAYMSF_BCD 748 | SD_CONF_BSET_READSUB_BCD 749 | SD_CONF_BSET_READ_TOC_TRK_BCD 750 | SD_CONF_BSET_READ_TOC_ADDR_BCD 751 | SD_CONF_BSET_NO_READ_HEADER 752 | SD_CONF_BSET_READ_CD_XD4 753 | SD_CONF_BSET_RST_RETRIES 754 | SD_CONF_BSET_RSV_REL_TIME 755 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 756 #endif 757 }; 758 759 static const int sd_disk_table_size = 760 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 761 762 763 764 #define SD_INTERCONNECT_PARALLEL 0 765 #define SD_INTERCONNECT_FABRIC 1 766 #define SD_INTERCONNECT_FIBRE 2 767 #define SD_INTERCONNECT_SSA 3 768 #define SD_INTERCONNECT_SATA 4 769 #define SD_IS_PARALLEL_SCSI(un) \ 770 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 771 #define SD_IS_SERIAL(un) \ 772 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 773 774 /* 775 * Definitions used by device id registration routines 776 */ 777 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 778 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 779 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 780 781 static kmutex_t sd_sense_mutex = {0}; 782 783 /* 784 * Macros for updates of the driver state 785 */ 786 #define New_state(un, s) \ 787 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 788 #define Restore_state(un) \ 789 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 790 791 static struct sd_cdbinfo sd_cdbtab[] = { 792 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 793 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 794 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 795 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 796 }; 797 798 /* 799 * Specifies the number of seconds that must have elapsed since the last 800 * cmd. has completed for a device to be declared idle to the PM framework. 801 */ 802 static int sd_pm_idletime = 1; 803 804 /* 805 * Internal function prototypes 806 */ 807 808 #if (defined(__fibre)) 809 /* 810 * These #defines are to avoid namespace collisions that occur because this 811 * code is currently used to compile two separate driver modules: sd and ssd. 812 * All function names need to be treated this way (even if declared static) 813 * in order to allow the debugger to resolve the names properly. 814 * It is anticipated that in the near future the ssd module will be obsoleted, 815 * at which time this ugliness should go away. 816 */ 817 #define sd_log_trace ssd_log_trace 818 #define sd_log_info ssd_log_info 819 #define sd_log_err ssd_log_err 820 #define sdprobe ssdprobe 821 #define sdinfo ssdinfo 822 #define sd_prop_op ssd_prop_op 823 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 824 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 825 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 826 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 827 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 828 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 829 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 830 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 831 #define sd_spin_up_unit ssd_spin_up_unit 832 #define sd_enable_descr_sense ssd_enable_descr_sense 833 #define sd_reenable_dsense_task ssd_reenable_dsense_task 834 #define sd_set_mmc_caps ssd_set_mmc_caps 835 #define sd_read_unit_properties ssd_read_unit_properties 836 #define sd_process_sdconf_file ssd_process_sdconf_file 837 #define sd_process_sdconf_table ssd_process_sdconf_table 838 #define sd_sdconf_id_match ssd_sdconf_id_match 839 #define sd_blank_cmp ssd_blank_cmp 840 #define sd_chk_vers1_data ssd_chk_vers1_data 841 #define sd_set_vers1_properties ssd_set_vers1_properties 842 843 #define sd_get_physical_geometry ssd_get_physical_geometry 844 #define sd_get_virtual_geometry ssd_get_virtual_geometry 845 #define sd_update_block_info ssd_update_block_info 846 #define sd_register_devid ssd_register_devid 847 #define sd_get_devid ssd_get_devid 848 #define sd_create_devid ssd_create_devid 849 #define sd_write_deviceid ssd_write_deviceid 850 #define sd_check_vpd_page_support ssd_check_vpd_page_support 851 #define sd_setup_pm ssd_setup_pm 852 #define sd_create_pm_components ssd_create_pm_components 853 #define sd_ddi_suspend ssd_ddi_suspend 854 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 855 #define sd_ddi_resume ssd_ddi_resume 856 #define sd_ddi_pm_resume ssd_ddi_pm_resume 857 #define sdpower ssdpower 858 #define sdattach ssdattach 859 #define sddetach ssddetach 860 #define sd_unit_attach ssd_unit_attach 861 #define sd_unit_detach ssd_unit_detach 862 #define sd_set_unit_attributes ssd_set_unit_attributes 863 #define sd_create_errstats ssd_create_errstats 864 #define sd_set_errstats ssd_set_errstats 865 #define sd_set_pstats ssd_set_pstats 866 #define sddump ssddump 867 #define sd_scsi_poll ssd_scsi_poll 868 #define sd_send_polled_RQS ssd_send_polled_RQS 869 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 870 #define sd_init_event_callbacks ssd_init_event_callbacks 871 #define sd_event_callback ssd_event_callback 872 #define sd_cache_control ssd_cache_control 873 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 874 #define sd_get_nv_sup ssd_get_nv_sup 875 #define sd_make_device ssd_make_device 876 #define sdopen ssdopen 877 #define sdclose ssdclose 878 #define sd_ready_and_valid ssd_ready_and_valid 879 #define sdmin ssdmin 880 #define sdread ssdread 881 #define sdwrite ssdwrite 882 #define sdaread ssdaread 883 #define sdawrite ssdawrite 884 #define sdstrategy ssdstrategy 885 #define sdioctl ssdioctl 886 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 887 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 888 #define sd_checksum_iostart ssd_checksum_iostart 889 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 890 #define sd_pm_iostart ssd_pm_iostart 891 #define sd_core_iostart ssd_core_iostart 892 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 893 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 894 #define sd_checksum_iodone ssd_checksum_iodone 895 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 896 #define sd_pm_iodone ssd_pm_iodone 897 #define sd_initpkt_for_buf ssd_initpkt_for_buf 898 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 899 #define sd_setup_rw_pkt ssd_setup_rw_pkt 900 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 901 #define sd_buf_iodone ssd_buf_iodone 902 #define sd_uscsi_strategy ssd_uscsi_strategy 903 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 904 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 905 #define sd_uscsi_iodone ssd_uscsi_iodone 906 #define sd_xbuf_strategy ssd_xbuf_strategy 907 #define sd_xbuf_init ssd_xbuf_init 908 #define sd_pm_entry ssd_pm_entry 909 #define sd_pm_exit ssd_pm_exit 910 911 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 912 #define sd_pm_timeout_handler ssd_pm_timeout_handler 913 914 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 915 #define sdintr ssdintr 916 #define sd_start_cmds ssd_start_cmds 917 #define sd_send_scsi_cmd ssd_send_scsi_cmd 918 #define sd_bioclone_alloc ssd_bioclone_alloc 919 #define sd_bioclone_free ssd_bioclone_free 920 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 921 #define sd_shadow_buf_free ssd_shadow_buf_free 922 #define sd_print_transport_rejected_message \ 923 ssd_print_transport_rejected_message 924 #define sd_retry_command ssd_retry_command 925 #define sd_set_retry_bp ssd_set_retry_bp 926 #define sd_send_request_sense_command ssd_send_request_sense_command 927 #define sd_start_retry_command ssd_start_retry_command 928 #define sd_start_direct_priority_command \ 929 ssd_start_direct_priority_command 930 #define sd_return_failed_command ssd_return_failed_command 931 #define sd_return_failed_command_no_restart \ 932 ssd_return_failed_command_no_restart 933 #define sd_return_command ssd_return_command 934 #define sd_sync_with_callback ssd_sync_with_callback 935 #define sdrunout ssdrunout 936 #define sd_mark_rqs_busy ssd_mark_rqs_busy 937 #define sd_mark_rqs_idle ssd_mark_rqs_idle 938 #define sd_reduce_throttle ssd_reduce_throttle 939 #define sd_restore_throttle ssd_restore_throttle 940 #define sd_print_incomplete_msg ssd_print_incomplete_msg 941 #define sd_init_cdb_limits ssd_init_cdb_limits 942 #define sd_pkt_status_good ssd_pkt_status_good 943 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 944 #define sd_pkt_status_busy ssd_pkt_status_busy 945 #define sd_pkt_status_reservation_conflict \ 946 ssd_pkt_status_reservation_conflict 947 #define sd_pkt_status_qfull ssd_pkt_status_qfull 948 #define sd_handle_request_sense ssd_handle_request_sense 949 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 950 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 951 #define sd_validate_sense_data ssd_validate_sense_data 952 #define sd_decode_sense ssd_decode_sense 953 #define sd_print_sense_msg ssd_print_sense_msg 954 #define sd_sense_key_no_sense ssd_sense_key_no_sense 955 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 956 #define sd_sense_key_not_ready ssd_sense_key_not_ready 957 #define sd_sense_key_medium_or_hardware_error \ 958 ssd_sense_key_medium_or_hardware_error 959 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 960 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 961 #define sd_sense_key_fail_command ssd_sense_key_fail_command 962 #define sd_sense_key_blank_check ssd_sense_key_blank_check 963 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 964 #define sd_sense_key_default ssd_sense_key_default 965 #define sd_print_retry_msg ssd_print_retry_msg 966 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 967 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 968 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 969 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 970 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 971 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 972 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 973 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 974 #define sd_pkt_reason_default ssd_pkt_reason_default 975 #define sd_reset_target ssd_reset_target 976 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 977 #define sd_start_stop_unit_task ssd_start_stop_unit_task 978 #define sd_taskq_create ssd_taskq_create 979 #define sd_taskq_delete ssd_taskq_delete 980 #define sd_target_change_task ssd_target_change_task 981 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 982 #define sd_media_change_task ssd_media_change_task 983 #define sd_handle_mchange ssd_handle_mchange 984 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 985 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 986 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 987 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 988 #define sd_send_scsi_feature_GET_CONFIGURATION \ 989 sd_send_scsi_feature_GET_CONFIGURATION 990 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 991 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 992 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 993 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 994 ssd_send_scsi_PERSISTENT_RESERVE_IN 995 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 996 ssd_send_scsi_PERSISTENT_RESERVE_OUT 997 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 998 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 999 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1000 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1001 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1002 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1003 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1004 #define sd_alloc_rqs ssd_alloc_rqs 1005 #define sd_free_rqs ssd_free_rqs 1006 #define sd_dump_memory ssd_dump_memory 1007 #define sd_get_media_info ssd_get_media_info 1008 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1009 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1010 #define sd_setup_next_xfer ssd_setup_next_xfer 1011 #define sd_dkio_get_temp ssd_dkio_get_temp 1012 #define sd_check_mhd ssd_check_mhd 1013 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1014 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1015 #define sd_sname ssd_sname 1016 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1017 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1018 #define sd_take_ownership ssd_take_ownership 1019 #define sd_reserve_release ssd_reserve_release 1020 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1021 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1022 #define sd_persistent_reservation_in_read_keys \ 1023 ssd_persistent_reservation_in_read_keys 1024 #define sd_persistent_reservation_in_read_resv \ 1025 ssd_persistent_reservation_in_read_resv 1026 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1027 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1028 #define sd_mhdioc_release ssd_mhdioc_release 1029 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1030 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1031 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1032 #define sr_change_blkmode ssr_change_blkmode 1033 #define sr_change_speed ssr_change_speed 1034 #define sr_atapi_change_speed ssr_atapi_change_speed 1035 #define sr_pause_resume ssr_pause_resume 1036 #define sr_play_msf ssr_play_msf 1037 #define sr_play_trkind ssr_play_trkind 1038 #define sr_read_all_subcodes ssr_read_all_subcodes 1039 #define sr_read_subchannel ssr_read_subchannel 1040 #define sr_read_tocentry ssr_read_tocentry 1041 #define sr_read_tochdr ssr_read_tochdr 1042 #define sr_read_cdda ssr_read_cdda 1043 #define sr_read_cdxa ssr_read_cdxa 1044 #define sr_read_mode1 ssr_read_mode1 1045 #define sr_read_mode2 ssr_read_mode2 1046 #define sr_read_cd_mode2 ssr_read_cd_mode2 1047 #define sr_sector_mode ssr_sector_mode 1048 #define sr_eject ssr_eject 1049 #define sr_ejected ssr_ejected 1050 #define sr_check_wp ssr_check_wp 1051 #define sd_check_media ssd_check_media 1052 #define sd_media_watch_cb ssd_media_watch_cb 1053 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1054 #define sr_volume_ctrl ssr_volume_ctrl 1055 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1056 #define sd_log_page_supported ssd_log_page_supported 1057 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1058 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1059 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1060 #define sd_range_lock ssd_range_lock 1061 #define sd_get_range ssd_get_range 1062 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1063 #define sd_range_unlock ssd_range_unlock 1064 #define sd_read_modify_write_task ssd_read_modify_write_task 1065 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1066 1067 #define sd_iostart_chain ssd_iostart_chain 1068 #define sd_iodone_chain ssd_iodone_chain 1069 #define sd_initpkt_map ssd_initpkt_map 1070 #define sd_destroypkt_map ssd_destroypkt_map 1071 #define sd_chain_type_map ssd_chain_type_map 1072 #define sd_chain_index_map ssd_chain_index_map 1073 1074 #define sd_failfast_flushctl ssd_failfast_flushctl 1075 #define sd_failfast_flushq ssd_failfast_flushq 1076 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1077 1078 #define sd_is_lsi ssd_is_lsi 1079 #define sd_tg_rdwr ssd_tg_rdwr 1080 #define sd_tg_getinfo ssd_tg_getinfo 1081 1082 #endif /* #if (defined(__fibre)) */ 1083 1084 1085 int _init(void); 1086 int _fini(void); 1087 int _info(struct modinfo *modinfop); 1088 1089 /*PRINTFLIKE3*/ 1090 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1091 /*PRINTFLIKE3*/ 1092 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1093 /*PRINTFLIKE3*/ 1094 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1095 1096 static int sdprobe(dev_info_t *devi); 1097 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1098 void **result); 1099 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1100 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1101 1102 /* 1103 * Smart probe for parallel scsi 1104 */ 1105 static void sd_scsi_probe_cache_init(void); 1106 static void sd_scsi_probe_cache_fini(void); 1107 static void sd_scsi_clear_probe_cache(void); 1108 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1109 1110 /* 1111 * Attached luns on target for parallel scsi 1112 */ 1113 static void sd_scsi_target_lun_init(void); 1114 static void sd_scsi_target_lun_fini(void); 1115 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1116 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1117 1118 static int sd_spin_up_unit(struct sd_lun *un); 1119 #ifdef _LP64 1120 static void sd_enable_descr_sense(struct sd_lun *un); 1121 static void sd_reenable_dsense_task(void *arg); 1122 #endif /* _LP64 */ 1123 1124 static void sd_set_mmc_caps(struct sd_lun *un); 1125 1126 static void sd_read_unit_properties(struct sd_lun *un); 1127 static int sd_process_sdconf_file(struct sd_lun *un); 1128 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1129 int *data_list, sd_tunables *values); 1130 static void sd_process_sdconf_table(struct sd_lun *un); 1131 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1132 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1133 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1134 int list_len, char *dataname_ptr); 1135 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1136 sd_tunables *prop_list); 1137 1138 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1139 int reservation_flag); 1140 static int sd_get_devid(struct sd_lun *un); 1141 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1142 static int sd_write_deviceid(struct sd_lun *un); 1143 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1144 static int sd_check_vpd_page_support(struct sd_lun *un); 1145 1146 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1148 1149 static int sd_ddi_suspend(dev_info_t *devi); 1150 static int sd_ddi_pm_suspend(struct sd_lun *un); 1151 static int sd_ddi_resume(dev_info_t *devi); 1152 static int sd_ddi_pm_resume(struct sd_lun *un); 1153 static int sdpower(dev_info_t *devi, int component, int level); 1154 1155 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1156 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1157 static int sd_unit_attach(dev_info_t *devi); 1158 static int sd_unit_detach(dev_info_t *devi); 1159 1160 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1161 static void sd_create_errstats(struct sd_lun *un, int instance); 1162 static void sd_set_errstats(struct sd_lun *un); 1163 static void sd_set_pstats(struct sd_lun *un); 1164 1165 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1166 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1167 static int sd_send_polled_RQS(struct sd_lun *un); 1168 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1169 1170 #if (defined(__fibre)) 1171 /* 1172 * Event callbacks (photon) 1173 */ 1174 static void sd_init_event_callbacks(struct sd_lun *un); 1175 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1176 #endif 1177 1178 /* 1179 * Defines for sd_cache_control 1180 */ 1181 1182 #define SD_CACHE_ENABLE 1 1183 #define SD_CACHE_DISABLE 0 1184 #define SD_CACHE_NOCHANGE -1 1185 1186 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1187 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1188 static void sd_get_nv_sup(struct sd_lun *un); 1189 static dev_t sd_make_device(dev_info_t *devi); 1190 1191 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1192 uint64_t capacity); 1193 1194 /* 1195 * Driver entry point functions. 1196 */ 1197 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1198 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1199 static int sd_ready_and_valid(struct sd_lun *un); 1200 1201 static void sdmin(struct buf *bp); 1202 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1203 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1204 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1205 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1206 1207 static int sdstrategy(struct buf *bp); 1208 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1209 1210 /* 1211 * Function prototypes for layering functions in the iostart chain. 1212 */ 1213 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1221 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1222 1223 /* 1224 * Function prototypes for layering functions in the iodone chain. 1225 */ 1226 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1227 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1228 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1229 struct buf *bp); 1230 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1231 struct buf *bp); 1232 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1233 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1234 struct buf *bp); 1235 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1236 1237 /* 1238 * Prototypes for functions to support buf(9S) based IO. 1239 */ 1240 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1241 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_buf(struct buf *); 1243 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1244 struct buf *bp, int flags, 1245 int (*callback)(caddr_t), caddr_t callback_arg, 1246 diskaddr_t lba, uint32_t blockcount); 1247 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1248 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1249 1250 /* 1251 * Prototypes for functions to support USCSI IO. 1252 */ 1253 static int sd_uscsi_strategy(struct buf *bp); 1254 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1255 static void sd_destroypkt_for_uscsi(struct buf *); 1256 1257 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1258 uchar_t chain_type, void *pktinfop); 1259 1260 static int sd_pm_entry(struct sd_lun *un); 1261 static void sd_pm_exit(struct sd_lun *un); 1262 1263 static void sd_pm_idletimeout_handler(void *arg); 1264 1265 /* 1266 * sd_core internal functions (used at the sd_core_io layer). 1267 */ 1268 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1269 static void sdintr(struct scsi_pkt *pktp); 1270 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1271 1272 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1273 enum uio_seg dataspace, int path_flag); 1274 1275 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1276 daddr_t blkno, int (*func)(struct buf *)); 1277 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1278 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1279 static void sd_bioclone_free(struct buf *bp); 1280 static void sd_shadow_buf_free(struct buf *bp); 1281 1282 static void sd_print_transport_rejected_message(struct sd_lun *un, 1283 struct sd_xbuf *xp, int code); 1284 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1285 void *arg, int code); 1286 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1287 void *arg, int code); 1288 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1289 void *arg, int code); 1290 1291 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1292 int retry_check_flag, 1293 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1294 int c), 1295 void *user_arg, int failure_code, clock_t retry_delay, 1296 void (*statp)(kstat_io_t *)); 1297 1298 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1299 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1300 1301 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1302 struct scsi_pkt *pktp); 1303 static void sd_start_retry_command(void *arg); 1304 static void sd_start_direct_priority_command(void *arg); 1305 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1306 int errcode); 1307 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1308 struct buf *bp, int errcode); 1309 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1310 static void sd_sync_with_callback(struct sd_lun *un); 1311 static int sdrunout(caddr_t arg); 1312 1313 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1314 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1315 1316 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1317 static void sd_restore_throttle(void *arg); 1318 1319 static void sd_init_cdb_limits(struct sd_lun *un); 1320 1321 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 1324 /* 1325 * Error handling functions 1326 */ 1327 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1332 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1334 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1335 1336 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, size_t actual_len); 1342 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 1345 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1346 void *arg, int code); 1347 1348 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1349 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_not_ready(struct sd_lun *un, 1354 uint8_t *sense_datap, 1355 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1357 uint8_t *sense_datap, 1358 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1359 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1360 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1361 static void sd_sense_key_unit_attention(struct sd_lun *un, 1362 uint8_t *sense_datap, 1363 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_sense_key_default(struct sd_lun *un, 1371 uint8_t *sense_datap, 1372 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 1374 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1375 void *arg, int flag); 1376 1377 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 1394 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1395 1396 static void sd_start_stop_unit_callback(void *arg); 1397 static void sd_start_stop_unit_task(void *arg); 1398 1399 static void sd_taskq_create(void); 1400 static void sd_taskq_delete(void); 1401 static void sd_target_change_task(void *arg); 1402 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1403 static void sd_media_change_task(void *arg); 1404 1405 static int sd_handle_mchange(struct sd_lun *un); 1406 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1407 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1408 uint32_t *lbap, int path_flag); 1409 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1410 uint32_t *lbap, int path_flag); 1411 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1412 int path_flag); 1413 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1414 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1415 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1416 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1417 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1418 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1419 uchar_t usr_cmd, uchar_t *usr_bufp); 1420 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1421 struct dk_callback *dkc); 1422 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1423 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1424 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1425 uchar_t *bufaddr, uint_t buflen, int path_flag); 1426 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1427 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1428 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1429 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1430 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1431 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1432 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1433 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1434 size_t buflen, daddr_t start_block, int path_flag); 1435 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1436 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1437 path_flag) 1438 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1439 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1440 path_flag) 1441 1442 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1443 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1444 uint16_t param_ptr, int path_flag); 1445 1446 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1447 static void sd_free_rqs(struct sd_lun *un); 1448 1449 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1450 uchar_t *data, int len, int fmt); 1451 static void sd_panic_for_res_conflict(struct sd_lun *un); 1452 1453 /* 1454 * Disk Ioctl Function Prototypes 1455 */ 1456 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1457 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1458 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1459 1460 /* 1461 * Multi-host Ioctl Prototypes 1462 */ 1463 static int sd_check_mhd(dev_t dev, int interval); 1464 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1465 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1466 static char *sd_sname(uchar_t status); 1467 static void sd_mhd_resvd_recover(void *arg); 1468 static void sd_resv_reclaim_thread(); 1469 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1470 static int sd_reserve_release(dev_t dev, int cmd); 1471 static void sd_rmv_resv_reclaim_req(dev_t dev); 1472 static void sd_mhd_reset_notify_cb(caddr_t arg); 1473 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1474 mhioc_inkeys_t *usrp, int flag); 1475 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1476 mhioc_inresvs_t *usrp, int flag); 1477 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1478 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1479 static int sd_mhdioc_release(dev_t dev); 1480 static int sd_mhdioc_register_devid(dev_t dev); 1481 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1482 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1483 1484 /* 1485 * SCSI removable prototypes 1486 */ 1487 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1488 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1489 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1490 static int sr_pause_resume(dev_t dev, int mode); 1491 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1492 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1494 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1496 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1501 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1502 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1503 static int sr_eject(dev_t dev); 1504 static void sr_ejected(register struct sd_lun *un); 1505 static int sr_check_wp(dev_t dev); 1506 static int sd_check_media(dev_t dev, enum dkio_state state); 1507 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1508 static void sd_delayed_cv_broadcast(void *arg); 1509 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1510 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1511 1512 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1513 1514 /* 1515 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1516 */ 1517 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1518 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1519 static void sd_wm_cache_destructor(void *wm, void *un); 1520 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1521 daddr_t endb, ushort_t typ); 1522 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1523 daddr_t endb); 1524 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1525 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1526 static void sd_read_modify_write_task(void * arg); 1527 static int 1528 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1529 struct buf **bpp); 1530 1531 1532 /* 1533 * Function prototypes for failfast support. 1534 */ 1535 static void sd_failfast_flushq(struct sd_lun *un); 1536 static int sd_failfast_flushq_callback(struct buf *bp); 1537 1538 /* 1539 * Function prototypes to check for lsi devices 1540 */ 1541 static void sd_is_lsi(struct sd_lun *un); 1542 1543 /* 1544 * Function prototypes for partial DMA support 1545 */ 1546 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1547 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1548 1549 1550 /* Function prototypes for cmlb */ 1551 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1552 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1553 1554 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1555 1556 /* 1557 * Constants for failfast support: 1558 * 1559 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1560 * failfast processing being performed. 1561 * 1562 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1563 * failfast processing on all bufs with B_FAILFAST set. 1564 */ 1565 1566 #define SD_FAILFAST_INACTIVE 0 1567 #define SD_FAILFAST_ACTIVE 1 1568 1569 /* 1570 * Bitmask to control behavior of buf(9S) flushes when a transition to 1571 * the failfast state occurs. Optional bits include: 1572 * 1573 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1574 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1575 * be flushed. 1576 * 1577 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1578 * driver, in addition to the regular wait queue. This includes the xbuf 1579 * queues. When clear, only the driver's wait queue will be flushed. 1580 */ 1581 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1582 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1583 1584 /* 1585 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1586 * to flush all queues within the driver. 1587 */ 1588 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1589 1590 1591 /* 1592 * SD Testing Fault Injection 1593 */ 1594 #ifdef SD_FAULT_INJECTION 1595 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1596 static void sd_faultinjection(struct scsi_pkt *pktp); 1597 static void sd_injection_log(char *buf, struct sd_lun *un); 1598 #endif 1599 1600 /* 1601 * Device driver ops vector 1602 */ 1603 static struct cb_ops sd_cb_ops = { 1604 sdopen, /* open */ 1605 sdclose, /* close */ 1606 sdstrategy, /* strategy */ 1607 nodev, /* print */ 1608 sddump, /* dump */ 1609 sdread, /* read */ 1610 sdwrite, /* write */ 1611 sdioctl, /* ioctl */ 1612 nodev, /* devmap */ 1613 nodev, /* mmap */ 1614 nodev, /* segmap */ 1615 nochpoll, /* poll */ 1616 sd_prop_op, /* cb_prop_op */ 1617 0, /* streamtab */ 1618 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1619 CB_REV, /* cb_rev */ 1620 sdaread, /* async I/O read entry point */ 1621 sdawrite /* async I/O write entry point */ 1622 }; 1623 1624 static struct dev_ops sd_ops = { 1625 DEVO_REV, /* devo_rev, */ 1626 0, /* refcnt */ 1627 sdinfo, /* info */ 1628 nulldev, /* identify */ 1629 sdprobe, /* probe */ 1630 sdattach, /* attach */ 1631 sddetach, /* detach */ 1632 nodev, /* reset */ 1633 &sd_cb_ops, /* driver operations */ 1634 NULL, /* bus operations */ 1635 sdpower /* power */ 1636 }; 1637 1638 1639 /* 1640 * This is the loadable module wrapper. 1641 */ 1642 #include <sys/modctl.h> 1643 1644 static struct modldrv modldrv = { 1645 &mod_driverops, /* Type of module. This one is a driver */ 1646 SD_MODULE_NAME, /* Module name. */ 1647 &sd_ops /* driver ops */ 1648 }; 1649 1650 1651 static struct modlinkage modlinkage = { 1652 MODREV_1, 1653 &modldrv, 1654 NULL 1655 }; 1656 1657 static cmlb_tg_ops_t sd_tgops = { 1658 TG_DK_OPS_VERSION_1, 1659 sd_tg_rdwr, 1660 sd_tg_getinfo 1661 }; 1662 1663 static struct scsi_asq_key_strings sd_additional_codes[] = { 1664 0x81, 0, "Logical Unit is Reserved", 1665 0x85, 0, "Audio Address Not Valid", 1666 0xb6, 0, "Media Load Mechanism Failed", 1667 0xB9, 0, "Audio Play Operation Aborted", 1668 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1669 0x53, 2, "Medium removal prevented", 1670 0x6f, 0, "Authentication failed during key exchange", 1671 0x6f, 1, "Key not present", 1672 0x6f, 2, "Key not established", 1673 0x6f, 3, "Read without proper authentication", 1674 0x6f, 4, "Mismatched region to this logical unit", 1675 0x6f, 5, "Region reset count error", 1676 0xffff, 0x0, NULL 1677 }; 1678 1679 1680 /* 1681 * Struct for passing printing information for sense data messages 1682 */ 1683 struct sd_sense_info { 1684 int ssi_severity; 1685 int ssi_pfa_flag; 1686 }; 1687 1688 /* 1689 * Table of function pointers for iostart-side routines. Separate "chains" 1690 * of layered function calls are formed by placing the function pointers 1691 * sequentially in the desired order. Functions are called according to an 1692 * incrementing table index ordering. The last function in each chain must 1693 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1694 * in the sd_iodone_chain[] array. 1695 * 1696 * Note: It may seem more natural to organize both the iostart and iodone 1697 * functions together, into an array of structures (or some similar 1698 * organization) with a common index, rather than two separate arrays which 1699 * must be maintained in synchronization. The purpose of this division is 1700 * to achieve improved performance: individual arrays allows for more 1701 * effective cache line utilization on certain platforms. 1702 */ 1703 1704 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1705 1706 1707 static sd_chain_t sd_iostart_chain[] = { 1708 1709 /* Chain for buf IO for disk drive targets (PM enabled) */ 1710 sd_mapblockaddr_iostart, /* Index: 0 */ 1711 sd_pm_iostart, /* Index: 1 */ 1712 sd_core_iostart, /* Index: 2 */ 1713 1714 /* Chain for buf IO for disk drive targets (PM disabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 3 */ 1716 sd_core_iostart, /* Index: 4 */ 1717 1718 /* Chain for buf IO for removable-media targets (PM enabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 5 */ 1720 sd_mapblocksize_iostart, /* Index: 6 */ 1721 sd_pm_iostart, /* Index: 7 */ 1722 sd_core_iostart, /* Index: 8 */ 1723 1724 /* Chain for buf IO for removable-media targets (PM disabled) */ 1725 sd_mapblockaddr_iostart, /* Index: 9 */ 1726 sd_mapblocksize_iostart, /* Index: 10 */ 1727 sd_core_iostart, /* Index: 11 */ 1728 1729 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1730 sd_mapblockaddr_iostart, /* Index: 12 */ 1731 sd_checksum_iostart, /* Index: 13 */ 1732 sd_pm_iostart, /* Index: 14 */ 1733 sd_core_iostart, /* Index: 15 */ 1734 1735 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1736 sd_mapblockaddr_iostart, /* Index: 16 */ 1737 sd_checksum_iostart, /* Index: 17 */ 1738 sd_core_iostart, /* Index: 18 */ 1739 1740 /* Chain for USCSI commands (all targets) */ 1741 sd_pm_iostart, /* Index: 19 */ 1742 sd_core_iostart, /* Index: 20 */ 1743 1744 /* Chain for checksumming USCSI commands (all targets) */ 1745 sd_checksum_uscsi_iostart, /* Index: 21 */ 1746 sd_pm_iostart, /* Index: 22 */ 1747 sd_core_iostart, /* Index: 23 */ 1748 1749 /* Chain for "direct" USCSI commands (all targets) */ 1750 sd_core_iostart, /* Index: 24 */ 1751 1752 /* Chain for "direct priority" USCSI commands (all targets) */ 1753 sd_core_iostart, /* Index: 25 */ 1754 }; 1755 1756 /* 1757 * Macros to locate the first function of each iostart chain in the 1758 * sd_iostart_chain[] array. These are located by the index in the array. 1759 */ 1760 #define SD_CHAIN_DISK_IOSTART 0 1761 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1762 #define SD_CHAIN_RMMEDIA_IOSTART 5 1763 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1764 #define SD_CHAIN_CHKSUM_IOSTART 12 1765 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1766 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1767 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1768 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1769 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1770 1771 1772 /* 1773 * Table of function pointers for the iodone-side routines for the driver- 1774 * internal layering mechanism. The calling sequence for iodone routines 1775 * uses a decrementing table index, so the last routine called in a chain 1776 * must be at the lowest array index location for that chain. The last 1777 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1778 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1779 * of the functions in an iodone side chain must correspond to the ordering 1780 * of the iostart routines for that chain. Note that there is no iodone 1781 * side routine that corresponds to sd_core_iostart(), so there is no 1782 * entry in the table for this. 1783 */ 1784 1785 static sd_chain_t sd_iodone_chain[] = { 1786 1787 /* Chain for buf IO for disk drive targets (PM enabled) */ 1788 sd_buf_iodone, /* Index: 0 */ 1789 sd_mapblockaddr_iodone, /* Index: 1 */ 1790 sd_pm_iodone, /* Index: 2 */ 1791 1792 /* Chain for buf IO for disk drive targets (PM disabled) */ 1793 sd_buf_iodone, /* Index: 3 */ 1794 sd_mapblockaddr_iodone, /* Index: 4 */ 1795 1796 /* Chain for buf IO for removable-media targets (PM enabled) */ 1797 sd_buf_iodone, /* Index: 5 */ 1798 sd_mapblockaddr_iodone, /* Index: 6 */ 1799 sd_mapblocksize_iodone, /* Index: 7 */ 1800 sd_pm_iodone, /* Index: 8 */ 1801 1802 /* Chain for buf IO for removable-media targets (PM disabled) */ 1803 sd_buf_iodone, /* Index: 9 */ 1804 sd_mapblockaddr_iodone, /* Index: 10 */ 1805 sd_mapblocksize_iodone, /* Index: 11 */ 1806 1807 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1808 sd_buf_iodone, /* Index: 12 */ 1809 sd_mapblockaddr_iodone, /* Index: 13 */ 1810 sd_checksum_iodone, /* Index: 14 */ 1811 sd_pm_iodone, /* Index: 15 */ 1812 1813 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1814 sd_buf_iodone, /* Index: 16 */ 1815 sd_mapblockaddr_iodone, /* Index: 17 */ 1816 sd_checksum_iodone, /* Index: 18 */ 1817 1818 /* Chain for USCSI commands (non-checksum targets) */ 1819 sd_uscsi_iodone, /* Index: 19 */ 1820 sd_pm_iodone, /* Index: 20 */ 1821 1822 /* Chain for USCSI commands (checksum targets) */ 1823 sd_uscsi_iodone, /* Index: 21 */ 1824 sd_checksum_uscsi_iodone, /* Index: 22 */ 1825 sd_pm_iodone, /* Index: 22 */ 1826 1827 /* Chain for "direct" USCSI commands (all targets) */ 1828 sd_uscsi_iodone, /* Index: 24 */ 1829 1830 /* Chain for "direct priority" USCSI commands (all targets) */ 1831 sd_uscsi_iodone, /* Index: 25 */ 1832 }; 1833 1834 1835 /* 1836 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1837 * each iodone-side chain. These are located by the array index, but as the 1838 * iodone side functions are called in a decrementing-index order, the 1839 * highest index number in each chain must be specified (as these correspond 1840 * to the first function in the iodone chain that will be called by the core 1841 * at IO completion time). 1842 */ 1843 1844 #define SD_CHAIN_DISK_IODONE 2 1845 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1846 #define SD_CHAIN_RMMEDIA_IODONE 8 1847 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1848 #define SD_CHAIN_CHKSUM_IODONE 15 1849 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1850 #define SD_CHAIN_USCSI_CMD_IODONE 20 1851 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1852 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1853 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1854 1855 1856 1857 1858 /* 1859 * Array to map a layering chain index to the appropriate initpkt routine. 1860 * The redundant entries are present so that the index used for accessing 1861 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1862 * with this table as well. 1863 */ 1864 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1865 1866 static sd_initpkt_t sd_initpkt_map[] = { 1867 1868 /* Chain for buf IO for disk drive targets (PM enabled) */ 1869 sd_initpkt_for_buf, /* Index: 0 */ 1870 sd_initpkt_for_buf, /* Index: 1 */ 1871 sd_initpkt_for_buf, /* Index: 2 */ 1872 1873 /* Chain for buf IO for disk drive targets (PM disabled) */ 1874 sd_initpkt_for_buf, /* Index: 3 */ 1875 sd_initpkt_for_buf, /* Index: 4 */ 1876 1877 /* Chain for buf IO for removable-media targets (PM enabled) */ 1878 sd_initpkt_for_buf, /* Index: 5 */ 1879 sd_initpkt_for_buf, /* Index: 6 */ 1880 sd_initpkt_for_buf, /* Index: 7 */ 1881 sd_initpkt_for_buf, /* Index: 8 */ 1882 1883 /* Chain for buf IO for removable-media targets (PM disabled) */ 1884 sd_initpkt_for_buf, /* Index: 9 */ 1885 sd_initpkt_for_buf, /* Index: 10 */ 1886 sd_initpkt_for_buf, /* Index: 11 */ 1887 1888 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1889 sd_initpkt_for_buf, /* Index: 12 */ 1890 sd_initpkt_for_buf, /* Index: 13 */ 1891 sd_initpkt_for_buf, /* Index: 14 */ 1892 sd_initpkt_for_buf, /* Index: 15 */ 1893 1894 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1895 sd_initpkt_for_buf, /* Index: 16 */ 1896 sd_initpkt_for_buf, /* Index: 17 */ 1897 sd_initpkt_for_buf, /* Index: 18 */ 1898 1899 /* Chain for USCSI commands (non-checksum targets) */ 1900 sd_initpkt_for_uscsi, /* Index: 19 */ 1901 sd_initpkt_for_uscsi, /* Index: 20 */ 1902 1903 /* Chain for USCSI commands (checksum targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 21 */ 1905 sd_initpkt_for_uscsi, /* Index: 22 */ 1906 sd_initpkt_for_uscsi, /* Index: 22 */ 1907 1908 /* Chain for "direct" USCSI commands (all targets) */ 1909 sd_initpkt_for_uscsi, /* Index: 24 */ 1910 1911 /* Chain for "direct priority" USCSI commands (all targets) */ 1912 sd_initpkt_for_uscsi, /* Index: 25 */ 1913 1914 }; 1915 1916 1917 /* 1918 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1919 * The redundant entries are present so that the index used for accessing 1920 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1921 * with this table as well. 1922 */ 1923 typedef void (*sd_destroypkt_t)(struct buf *); 1924 1925 static sd_destroypkt_t sd_destroypkt_map[] = { 1926 1927 /* Chain for buf IO for disk drive targets (PM enabled) */ 1928 sd_destroypkt_for_buf, /* Index: 0 */ 1929 sd_destroypkt_for_buf, /* Index: 1 */ 1930 sd_destroypkt_for_buf, /* Index: 2 */ 1931 1932 /* Chain for buf IO for disk drive targets (PM disabled) */ 1933 sd_destroypkt_for_buf, /* Index: 3 */ 1934 sd_destroypkt_for_buf, /* Index: 4 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM enabled) */ 1937 sd_destroypkt_for_buf, /* Index: 5 */ 1938 sd_destroypkt_for_buf, /* Index: 6 */ 1939 sd_destroypkt_for_buf, /* Index: 7 */ 1940 sd_destroypkt_for_buf, /* Index: 8 */ 1941 1942 /* Chain for buf IO for removable-media targets (PM disabled) */ 1943 sd_destroypkt_for_buf, /* Index: 9 */ 1944 sd_destroypkt_for_buf, /* Index: 10 */ 1945 sd_destroypkt_for_buf, /* Index: 11 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1948 sd_destroypkt_for_buf, /* Index: 12 */ 1949 sd_destroypkt_for_buf, /* Index: 13 */ 1950 sd_destroypkt_for_buf, /* Index: 14 */ 1951 sd_destroypkt_for_buf, /* Index: 15 */ 1952 1953 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1954 sd_destroypkt_for_buf, /* Index: 16 */ 1955 sd_destroypkt_for_buf, /* Index: 17 */ 1956 sd_destroypkt_for_buf, /* Index: 18 */ 1957 1958 /* Chain for USCSI commands (non-checksum targets) */ 1959 sd_destroypkt_for_uscsi, /* Index: 19 */ 1960 sd_destroypkt_for_uscsi, /* Index: 20 */ 1961 1962 /* Chain for USCSI commands (checksum targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 21 */ 1964 sd_destroypkt_for_uscsi, /* Index: 22 */ 1965 sd_destroypkt_for_uscsi, /* Index: 22 */ 1966 1967 /* Chain for "direct" USCSI commands (all targets) */ 1968 sd_destroypkt_for_uscsi, /* Index: 24 */ 1969 1970 /* Chain for "direct priority" USCSI commands (all targets) */ 1971 sd_destroypkt_for_uscsi, /* Index: 25 */ 1972 1973 }; 1974 1975 1976 1977 /* 1978 * Array to map a layering chain index to the appropriate chain "type". 1979 * The chain type indicates a specific property/usage of the chain. 1980 * The redundant entries are present so that the index used for accessing 1981 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1982 * with this table as well. 1983 */ 1984 1985 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1986 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1987 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1988 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1989 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1990 /* (for error recovery) */ 1991 1992 static int sd_chain_type_map[] = { 1993 1994 /* Chain for buf IO for disk drive targets (PM enabled) */ 1995 SD_CHAIN_BUFIO, /* Index: 0 */ 1996 SD_CHAIN_BUFIO, /* Index: 1 */ 1997 SD_CHAIN_BUFIO, /* Index: 2 */ 1998 1999 /* Chain for buf IO for disk drive targets (PM disabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 3 */ 2001 SD_CHAIN_BUFIO, /* Index: 4 */ 2002 2003 /* Chain for buf IO for removable-media targets (PM enabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 5 */ 2005 SD_CHAIN_BUFIO, /* Index: 6 */ 2006 SD_CHAIN_BUFIO, /* Index: 7 */ 2007 SD_CHAIN_BUFIO, /* Index: 8 */ 2008 2009 /* Chain for buf IO for removable-media targets (PM disabled) */ 2010 SD_CHAIN_BUFIO, /* Index: 9 */ 2011 SD_CHAIN_BUFIO, /* Index: 10 */ 2012 SD_CHAIN_BUFIO, /* Index: 11 */ 2013 2014 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2015 SD_CHAIN_BUFIO, /* Index: 12 */ 2016 SD_CHAIN_BUFIO, /* Index: 13 */ 2017 SD_CHAIN_BUFIO, /* Index: 14 */ 2018 SD_CHAIN_BUFIO, /* Index: 15 */ 2019 2020 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2021 SD_CHAIN_BUFIO, /* Index: 16 */ 2022 SD_CHAIN_BUFIO, /* Index: 17 */ 2023 SD_CHAIN_BUFIO, /* Index: 18 */ 2024 2025 /* Chain for USCSI commands (non-checksum targets) */ 2026 SD_CHAIN_USCSI, /* Index: 19 */ 2027 SD_CHAIN_USCSI, /* Index: 20 */ 2028 2029 /* Chain for USCSI commands (checksum targets) */ 2030 SD_CHAIN_USCSI, /* Index: 21 */ 2031 SD_CHAIN_USCSI, /* Index: 22 */ 2032 SD_CHAIN_USCSI, /* Index: 22 */ 2033 2034 /* Chain for "direct" USCSI commands (all targets) */ 2035 SD_CHAIN_DIRECT, /* Index: 24 */ 2036 2037 /* Chain for "direct priority" USCSI commands (all targets) */ 2038 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2039 }; 2040 2041 2042 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2043 #define SD_IS_BUFIO(xp) \ 2044 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2045 2046 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2047 #define SD_IS_DIRECT_PRIORITY(xp) \ 2048 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2049 2050 2051 2052 /* 2053 * Struct, array, and macros to map a specific chain to the appropriate 2054 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2055 * 2056 * The sd_chain_index_map[] array is used at attach time to set the various 2057 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2058 * chain to be used with the instance. This allows different instances to use 2059 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2060 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2061 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2062 * dynamically & without the use of locking; and (2) a layer may update the 2063 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2064 * to allow for deferred processing of an IO within the same chain from a 2065 * different execution context. 2066 */ 2067 2068 struct sd_chain_index { 2069 int sci_iostart_index; 2070 int sci_iodone_index; 2071 }; 2072 2073 static struct sd_chain_index sd_chain_index_map[] = { 2074 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2075 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2076 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2077 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2078 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2079 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2080 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2081 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2082 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2083 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2084 }; 2085 2086 2087 /* 2088 * The following are indexes into the sd_chain_index_map[] array. 2089 */ 2090 2091 /* un->un_buf_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_DISK 0 2093 #define SD_CHAIN_INFO_DISK_NO_PM 1 2094 #define SD_CHAIN_INFO_RMMEDIA 2 2095 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2096 #define SD_CHAIN_INFO_CHKSUM 4 2097 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2098 2099 /* un->un_uscsi_chain_type must be set to one of these */ 2100 #define SD_CHAIN_INFO_USCSI_CMD 6 2101 /* USCSI with PM disabled is the same as DIRECT */ 2102 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2103 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2104 2105 /* un->un_direct_chain_type must be set to one of these */ 2106 #define SD_CHAIN_INFO_DIRECT_CMD 8 2107 2108 /* un->un_priority_chain_type must be set to one of these */ 2109 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2110 2111 /* size for devid inquiries */ 2112 #define MAX_INQUIRY_SIZE 0xF0 2113 2114 /* 2115 * Macros used by functions to pass a given buf(9S) struct along to the 2116 * next function in the layering chain for further processing. 2117 * 2118 * In the following macros, passing more than three arguments to the called 2119 * routines causes the optimizer for the SPARC compiler to stop doing tail 2120 * call elimination which results in significant performance degradation. 2121 */ 2122 #define SD_BEGIN_IOSTART(index, un, bp) \ 2123 ((*(sd_iostart_chain[index]))(index, un, bp)) 2124 2125 #define SD_BEGIN_IODONE(index, un, bp) \ 2126 ((*(sd_iodone_chain[index]))(index, un, bp)) 2127 2128 #define SD_NEXT_IOSTART(index, un, bp) \ 2129 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2130 2131 #define SD_NEXT_IODONE(index, un, bp) \ 2132 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2133 2134 /* 2135 * Function: _init 2136 * 2137 * Description: This is the driver _init(9E) entry point. 2138 * 2139 * Return Code: Returns the value from mod_install(9F) or 2140 * ddi_soft_state_init(9F) as appropriate. 2141 * 2142 * Context: Called when driver module loaded. 2143 */ 2144 2145 int 2146 _init(void) 2147 { 2148 int err; 2149 2150 /* establish driver name from module name */ 2151 sd_label = (char *)mod_modname(&modlinkage); 2152 2153 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2154 SD_MAXUNIT); 2155 2156 if (err != 0) { 2157 return (err); 2158 } 2159 2160 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2161 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2162 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2163 2164 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2165 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2166 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2167 2168 /* 2169 * it's ok to init here even for fibre device 2170 */ 2171 sd_scsi_probe_cache_init(); 2172 2173 sd_scsi_target_lun_init(); 2174 2175 /* 2176 * Creating taskq before mod_install ensures that all callers (threads) 2177 * that enter the module after a successfull mod_install encounter 2178 * a valid taskq. 2179 */ 2180 sd_taskq_create(); 2181 2182 err = mod_install(&modlinkage); 2183 if (err != 0) { 2184 /* delete taskq if install fails */ 2185 sd_taskq_delete(); 2186 2187 mutex_destroy(&sd_detach_mutex); 2188 mutex_destroy(&sd_log_mutex); 2189 mutex_destroy(&sd_label_mutex); 2190 2191 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2192 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2193 cv_destroy(&sd_tr.srq_inprocess_cv); 2194 2195 sd_scsi_probe_cache_fini(); 2196 2197 sd_scsi_target_lun_fini(); 2198 2199 ddi_soft_state_fini(&sd_state); 2200 return (err); 2201 } 2202 2203 return (err); 2204 } 2205 2206 2207 /* 2208 * Function: _fini 2209 * 2210 * Description: This is the driver _fini(9E) entry point. 2211 * 2212 * Return Code: Returns the value from mod_remove(9F) 2213 * 2214 * Context: Called when driver module is unloaded. 2215 */ 2216 2217 int 2218 _fini(void) 2219 { 2220 int err; 2221 2222 if ((err = mod_remove(&modlinkage)) != 0) { 2223 return (err); 2224 } 2225 2226 sd_taskq_delete(); 2227 2228 mutex_destroy(&sd_detach_mutex); 2229 mutex_destroy(&sd_log_mutex); 2230 mutex_destroy(&sd_label_mutex); 2231 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2232 2233 sd_scsi_probe_cache_fini(); 2234 2235 sd_scsi_target_lun_fini(); 2236 2237 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2238 cv_destroy(&sd_tr.srq_inprocess_cv); 2239 2240 ddi_soft_state_fini(&sd_state); 2241 2242 return (err); 2243 } 2244 2245 2246 /* 2247 * Function: _info 2248 * 2249 * Description: This is the driver _info(9E) entry point. 2250 * 2251 * Arguments: modinfop - pointer to the driver modinfo structure 2252 * 2253 * Return Code: Returns the value from mod_info(9F). 2254 * 2255 * Context: Kernel thread context 2256 */ 2257 2258 int 2259 _info(struct modinfo *modinfop) 2260 { 2261 return (mod_info(&modlinkage, modinfop)); 2262 } 2263 2264 2265 /* 2266 * The following routines implement the driver message logging facility. 2267 * They provide component- and level- based debug output filtering. 2268 * Output may also be restricted to messages for a single instance by 2269 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2270 * to NULL, then messages for all instances are printed. 2271 * 2272 * These routines have been cloned from each other due to the language 2273 * constraints of macros and variable argument list processing. 2274 */ 2275 2276 2277 /* 2278 * Function: sd_log_err 2279 * 2280 * Description: This routine is called by the SD_ERROR macro for debug 2281 * logging of error conditions. 2282 * 2283 * Arguments: comp - driver component being logged 2284 * dev - pointer to driver info structure 2285 * fmt - error string and format to be logged 2286 */ 2287 2288 static void 2289 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2290 { 2291 va_list ap; 2292 dev_info_t *dev; 2293 2294 ASSERT(un != NULL); 2295 dev = SD_DEVINFO(un); 2296 ASSERT(dev != NULL); 2297 2298 /* 2299 * Filter messages based on the global component and level masks. 2300 * Also print if un matches the value of sd_debug_un, or if 2301 * sd_debug_un is set to NULL. 2302 */ 2303 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2304 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2305 mutex_enter(&sd_log_mutex); 2306 va_start(ap, fmt); 2307 (void) vsprintf(sd_log_buf, fmt, ap); 2308 va_end(ap); 2309 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2310 mutex_exit(&sd_log_mutex); 2311 } 2312 #ifdef SD_FAULT_INJECTION 2313 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2314 if (un->sd_injection_mask & comp) { 2315 mutex_enter(&sd_log_mutex); 2316 va_start(ap, fmt); 2317 (void) vsprintf(sd_log_buf, fmt, ap); 2318 va_end(ap); 2319 sd_injection_log(sd_log_buf, un); 2320 mutex_exit(&sd_log_mutex); 2321 } 2322 #endif 2323 } 2324 2325 2326 /* 2327 * Function: sd_log_info 2328 * 2329 * Description: This routine is called by the SD_INFO macro for debug 2330 * logging of general purpose informational conditions. 2331 * 2332 * Arguments: comp - driver component being logged 2333 * dev - pointer to driver info structure 2334 * fmt - info string and format to be logged 2335 */ 2336 2337 static void 2338 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2339 { 2340 va_list ap; 2341 dev_info_t *dev; 2342 2343 ASSERT(un != NULL); 2344 dev = SD_DEVINFO(un); 2345 ASSERT(dev != NULL); 2346 2347 /* 2348 * Filter messages based on the global component and level masks. 2349 * Also print if un matches the value of sd_debug_un, or if 2350 * sd_debug_un is set to NULL. 2351 */ 2352 if ((sd_component_mask & component) && 2353 (sd_level_mask & SD_LOGMASK_INFO) && 2354 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2355 mutex_enter(&sd_log_mutex); 2356 va_start(ap, fmt); 2357 (void) vsprintf(sd_log_buf, fmt, ap); 2358 va_end(ap); 2359 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2360 mutex_exit(&sd_log_mutex); 2361 } 2362 #ifdef SD_FAULT_INJECTION 2363 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2364 if (un->sd_injection_mask & component) { 2365 mutex_enter(&sd_log_mutex); 2366 va_start(ap, fmt); 2367 (void) vsprintf(sd_log_buf, fmt, ap); 2368 va_end(ap); 2369 sd_injection_log(sd_log_buf, un); 2370 mutex_exit(&sd_log_mutex); 2371 } 2372 #endif 2373 } 2374 2375 2376 /* 2377 * Function: sd_log_trace 2378 * 2379 * Description: This routine is called by the SD_TRACE macro for debug 2380 * logging of trace conditions (i.e. function entry/exit). 2381 * 2382 * Arguments: comp - driver component being logged 2383 * dev - pointer to driver info structure 2384 * fmt - trace string and format to be logged 2385 */ 2386 2387 static void 2388 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2389 { 2390 va_list ap; 2391 dev_info_t *dev; 2392 2393 ASSERT(un != NULL); 2394 dev = SD_DEVINFO(un); 2395 ASSERT(dev != NULL); 2396 2397 /* 2398 * Filter messages based on the global component and level masks. 2399 * Also print if un matches the value of sd_debug_un, or if 2400 * sd_debug_un is set to NULL. 2401 */ 2402 if ((sd_component_mask & component) && 2403 (sd_level_mask & SD_LOGMASK_TRACE) && 2404 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2405 mutex_enter(&sd_log_mutex); 2406 va_start(ap, fmt); 2407 (void) vsprintf(sd_log_buf, fmt, ap); 2408 va_end(ap); 2409 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2410 mutex_exit(&sd_log_mutex); 2411 } 2412 #ifdef SD_FAULT_INJECTION 2413 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2414 if (un->sd_injection_mask & component) { 2415 mutex_enter(&sd_log_mutex); 2416 va_start(ap, fmt); 2417 (void) vsprintf(sd_log_buf, fmt, ap); 2418 va_end(ap); 2419 sd_injection_log(sd_log_buf, un); 2420 mutex_exit(&sd_log_mutex); 2421 } 2422 #endif 2423 } 2424 2425 2426 /* 2427 * Function: sdprobe 2428 * 2429 * Description: This is the driver probe(9e) entry point function. 2430 * 2431 * Arguments: devi - opaque device info handle 2432 * 2433 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2434 * DDI_PROBE_FAILURE: If the probe failed. 2435 * DDI_PROBE_PARTIAL: If the instance is not present now, 2436 * but may be present in the future. 2437 */ 2438 2439 static int 2440 sdprobe(dev_info_t *devi) 2441 { 2442 struct scsi_device *devp; 2443 int rval; 2444 int instance; 2445 2446 /* 2447 * if it wasn't for pln, sdprobe could actually be nulldev 2448 * in the "__fibre" case. 2449 */ 2450 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2451 return (DDI_PROBE_DONTCARE); 2452 } 2453 2454 devp = ddi_get_driver_private(devi); 2455 2456 if (devp == NULL) { 2457 /* Ooops... nexus driver is mis-configured... */ 2458 return (DDI_PROBE_FAILURE); 2459 } 2460 2461 instance = ddi_get_instance(devi); 2462 2463 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2464 return (DDI_PROBE_PARTIAL); 2465 } 2466 2467 /* 2468 * Call the SCSA utility probe routine to see if we actually 2469 * have a target at this SCSI nexus. 2470 */ 2471 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2472 case SCSIPROBE_EXISTS: 2473 switch (devp->sd_inq->inq_dtype) { 2474 case DTYPE_DIRECT: 2475 rval = DDI_PROBE_SUCCESS; 2476 break; 2477 case DTYPE_RODIRECT: 2478 /* CDs etc. Can be removable media */ 2479 rval = DDI_PROBE_SUCCESS; 2480 break; 2481 case DTYPE_OPTICAL: 2482 /* 2483 * Rewritable optical driver HP115AA 2484 * Can also be removable media 2485 */ 2486 2487 /* 2488 * Do not attempt to bind to DTYPE_OPTICAL if 2489 * pre solaris 9 sparc sd behavior is required 2490 * 2491 * If first time through and sd_dtype_optical_bind 2492 * has not been set in /etc/system check properties 2493 */ 2494 2495 if (sd_dtype_optical_bind < 0) { 2496 sd_dtype_optical_bind = ddi_prop_get_int 2497 (DDI_DEV_T_ANY, devi, 0, 2498 "optical-device-bind", 1); 2499 } 2500 2501 if (sd_dtype_optical_bind == 0) { 2502 rval = DDI_PROBE_FAILURE; 2503 } else { 2504 rval = DDI_PROBE_SUCCESS; 2505 } 2506 break; 2507 2508 case DTYPE_NOTPRESENT: 2509 default: 2510 rval = DDI_PROBE_FAILURE; 2511 break; 2512 } 2513 break; 2514 default: 2515 rval = DDI_PROBE_PARTIAL; 2516 break; 2517 } 2518 2519 /* 2520 * This routine checks for resource allocation prior to freeing, 2521 * so it will take care of the "smart probing" case where a 2522 * scsi_probe() may or may not have been issued and will *not* 2523 * free previously-freed resources. 2524 */ 2525 scsi_unprobe(devp); 2526 return (rval); 2527 } 2528 2529 2530 /* 2531 * Function: sdinfo 2532 * 2533 * Description: This is the driver getinfo(9e) entry point function. 2534 * Given the device number, return the devinfo pointer from 2535 * the scsi_device structure or the instance number 2536 * associated with the dev_t. 2537 * 2538 * Arguments: dip - pointer to device info structure 2539 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2540 * DDI_INFO_DEVT2INSTANCE) 2541 * arg - driver dev_t 2542 * resultp - user buffer for request response 2543 * 2544 * Return Code: DDI_SUCCESS 2545 * DDI_FAILURE 2546 */ 2547 /* ARGSUSED */ 2548 static int 2549 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2550 { 2551 struct sd_lun *un; 2552 dev_t dev; 2553 int instance; 2554 int error; 2555 2556 switch (infocmd) { 2557 case DDI_INFO_DEVT2DEVINFO: 2558 dev = (dev_t)arg; 2559 instance = SDUNIT(dev); 2560 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2561 return (DDI_FAILURE); 2562 } 2563 *result = (void *) SD_DEVINFO(un); 2564 error = DDI_SUCCESS; 2565 break; 2566 case DDI_INFO_DEVT2INSTANCE: 2567 dev = (dev_t)arg; 2568 instance = SDUNIT(dev); 2569 *result = (void *)(uintptr_t)instance; 2570 error = DDI_SUCCESS; 2571 break; 2572 default: 2573 error = DDI_FAILURE; 2574 } 2575 return (error); 2576 } 2577 2578 /* 2579 * Function: sd_prop_op 2580 * 2581 * Description: This is the driver prop_op(9e) entry point function. 2582 * Return the number of blocks for the partition in question 2583 * or forward the request to the property facilities. 2584 * 2585 * Arguments: dev - device number 2586 * dip - pointer to device info structure 2587 * prop_op - property operator 2588 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2589 * name - pointer to property name 2590 * valuep - pointer or address of the user buffer 2591 * lengthp - property length 2592 * 2593 * Return Code: DDI_PROP_SUCCESS 2594 * DDI_PROP_NOT_FOUND 2595 * DDI_PROP_UNDEFINED 2596 * DDI_PROP_NO_MEMORY 2597 * DDI_PROP_BUF_TOO_SMALL 2598 */ 2599 2600 static int 2601 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2602 char *name, caddr_t valuep, int *lengthp) 2603 { 2604 int instance = ddi_get_instance(dip); 2605 struct sd_lun *un; 2606 uint64_t nblocks64; 2607 uint_t dblk; 2608 2609 /* 2610 * Our dynamic properties are all device specific and size oriented. 2611 * Requests issued under conditions where size is valid are passed 2612 * to ddi_prop_op_nblocks with the size information, otherwise the 2613 * request is passed to ddi_prop_op. Size depends on valid geometry. 2614 */ 2615 un = ddi_get_soft_state(sd_state, instance); 2616 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2617 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2618 name, valuep, lengthp)); 2619 } else if (!SD_IS_VALID_LABEL(un)) { 2620 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2621 valuep, lengthp)); 2622 } 2623 2624 /* get nblocks value */ 2625 ASSERT(!mutex_owned(SD_MUTEX(un))); 2626 2627 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2628 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2629 2630 /* report size in target size blocks */ 2631 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2632 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2633 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2634 } 2635 2636 /* 2637 * The following functions are for smart probing: 2638 * sd_scsi_probe_cache_init() 2639 * sd_scsi_probe_cache_fini() 2640 * sd_scsi_clear_probe_cache() 2641 * sd_scsi_probe_with_cache() 2642 */ 2643 2644 /* 2645 * Function: sd_scsi_probe_cache_init 2646 * 2647 * Description: Initializes the probe response cache mutex and head pointer. 2648 * 2649 * Context: Kernel thread context 2650 */ 2651 2652 static void 2653 sd_scsi_probe_cache_init(void) 2654 { 2655 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2656 sd_scsi_probe_cache_head = NULL; 2657 } 2658 2659 2660 /* 2661 * Function: sd_scsi_probe_cache_fini 2662 * 2663 * Description: Frees all resources associated with the probe response cache. 2664 * 2665 * Context: Kernel thread context 2666 */ 2667 2668 static void 2669 sd_scsi_probe_cache_fini(void) 2670 { 2671 struct sd_scsi_probe_cache *cp; 2672 struct sd_scsi_probe_cache *ncp; 2673 2674 /* Clean up our smart probing linked list */ 2675 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2676 ncp = cp->next; 2677 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2678 } 2679 sd_scsi_probe_cache_head = NULL; 2680 mutex_destroy(&sd_scsi_probe_cache_mutex); 2681 } 2682 2683 2684 /* 2685 * Function: sd_scsi_clear_probe_cache 2686 * 2687 * Description: This routine clears the probe response cache. This is 2688 * done when open() returns ENXIO so that when deferred 2689 * attach is attempted (possibly after a device has been 2690 * turned on) we will retry the probe. Since we don't know 2691 * which target we failed to open, we just clear the 2692 * entire cache. 2693 * 2694 * Context: Kernel thread context 2695 */ 2696 2697 static void 2698 sd_scsi_clear_probe_cache(void) 2699 { 2700 struct sd_scsi_probe_cache *cp; 2701 int i; 2702 2703 mutex_enter(&sd_scsi_probe_cache_mutex); 2704 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2705 /* 2706 * Reset all entries to SCSIPROBE_EXISTS. This will 2707 * force probing to be performed the next time 2708 * sd_scsi_probe_with_cache is called. 2709 */ 2710 for (i = 0; i < NTARGETS_WIDE; i++) { 2711 cp->cache[i] = SCSIPROBE_EXISTS; 2712 } 2713 } 2714 mutex_exit(&sd_scsi_probe_cache_mutex); 2715 } 2716 2717 2718 /* 2719 * Function: sd_scsi_probe_with_cache 2720 * 2721 * Description: This routine implements support for a scsi device probe 2722 * with cache. The driver maintains a cache of the target 2723 * responses to scsi probes. If we get no response from a 2724 * target during a probe inquiry, we remember that, and we 2725 * avoid additional calls to scsi_probe on non-zero LUNs 2726 * on the same target until the cache is cleared. By doing 2727 * so we avoid the 1/4 sec selection timeout for nonzero 2728 * LUNs. lun0 of a target is always probed. 2729 * 2730 * Arguments: devp - Pointer to a scsi_device(9S) structure 2731 * waitfunc - indicates what the allocator routines should 2732 * do when resources are not available. This value 2733 * is passed on to scsi_probe() when that routine 2734 * is called. 2735 * 2736 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2737 * otherwise the value returned by scsi_probe(9F). 2738 * 2739 * Context: Kernel thread context 2740 */ 2741 2742 static int 2743 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2744 { 2745 struct sd_scsi_probe_cache *cp; 2746 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2747 int lun, tgt; 2748 2749 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2750 SCSI_ADDR_PROP_LUN, 0); 2751 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2752 SCSI_ADDR_PROP_TARGET, -1); 2753 2754 /* Make sure caching enabled and target in range */ 2755 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2756 /* do it the old way (no cache) */ 2757 return (scsi_probe(devp, waitfn)); 2758 } 2759 2760 mutex_enter(&sd_scsi_probe_cache_mutex); 2761 2762 /* Find the cache for this scsi bus instance */ 2763 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2764 if (cp->pdip == pdip) { 2765 break; 2766 } 2767 } 2768 2769 /* If we can't find a cache for this pdip, create one */ 2770 if (cp == NULL) { 2771 int i; 2772 2773 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2774 KM_SLEEP); 2775 cp->pdip = pdip; 2776 cp->next = sd_scsi_probe_cache_head; 2777 sd_scsi_probe_cache_head = cp; 2778 for (i = 0; i < NTARGETS_WIDE; i++) { 2779 cp->cache[i] = SCSIPROBE_EXISTS; 2780 } 2781 } 2782 2783 mutex_exit(&sd_scsi_probe_cache_mutex); 2784 2785 /* Recompute the cache for this target if LUN zero */ 2786 if (lun == 0) { 2787 cp->cache[tgt] = SCSIPROBE_EXISTS; 2788 } 2789 2790 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2791 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2792 return (SCSIPROBE_NORESP); 2793 } 2794 2795 /* Do the actual probe; save & return the result */ 2796 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2797 } 2798 2799 2800 /* 2801 * Function: sd_scsi_target_lun_init 2802 * 2803 * Description: Initializes the attached lun chain mutex and head pointer. 2804 * 2805 * Context: Kernel thread context 2806 */ 2807 2808 static void 2809 sd_scsi_target_lun_init(void) 2810 { 2811 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2812 sd_scsi_target_lun_head = NULL; 2813 } 2814 2815 2816 /* 2817 * Function: sd_scsi_target_lun_fini 2818 * 2819 * Description: Frees all resources associated with the attached lun 2820 * chain 2821 * 2822 * Context: Kernel thread context 2823 */ 2824 2825 static void 2826 sd_scsi_target_lun_fini(void) 2827 { 2828 struct sd_scsi_hba_tgt_lun *cp; 2829 struct sd_scsi_hba_tgt_lun *ncp; 2830 2831 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2832 ncp = cp->next; 2833 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2834 } 2835 sd_scsi_target_lun_head = NULL; 2836 mutex_destroy(&sd_scsi_target_lun_mutex); 2837 } 2838 2839 2840 /* 2841 * Function: sd_scsi_get_target_lun_count 2842 * 2843 * Description: This routine will check in the attached lun chain to see 2844 * how many luns are attached on the required SCSI controller 2845 * and target. Currently, some capabilities like tagged queue 2846 * are supported per target based by HBA. So all luns in a 2847 * target have the same capabilities. Based on this assumption, 2848 * sd should only set these capabilities once per target. This 2849 * function is called when sd needs to decide how many luns 2850 * already attached on a target. 2851 * 2852 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2853 * controller device. 2854 * target - The target ID on the controller's SCSI bus. 2855 * 2856 * Return Code: The number of luns attached on the required target and 2857 * controller. 2858 * -1 if target ID is not in parallel SCSI scope or the given 2859 * dip is not in the chain. 2860 * 2861 * Context: Kernel thread context 2862 */ 2863 2864 static int 2865 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2866 { 2867 struct sd_scsi_hba_tgt_lun *cp; 2868 2869 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2870 return (-1); 2871 } 2872 2873 mutex_enter(&sd_scsi_target_lun_mutex); 2874 2875 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2876 if (cp->pdip == dip) { 2877 break; 2878 } 2879 } 2880 2881 mutex_exit(&sd_scsi_target_lun_mutex); 2882 2883 if (cp == NULL) { 2884 return (-1); 2885 } 2886 2887 return (cp->nlun[target]); 2888 } 2889 2890 2891 /* 2892 * Function: sd_scsi_update_lun_on_target 2893 * 2894 * Description: This routine is used to update the attached lun chain when a 2895 * lun is attached or detached on a target. 2896 * 2897 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2898 * controller device. 2899 * target - The target ID on the controller's SCSI bus. 2900 * flag - Indicate the lun is attached or detached. 2901 * 2902 * Context: Kernel thread context 2903 */ 2904 2905 static void 2906 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2907 { 2908 struct sd_scsi_hba_tgt_lun *cp; 2909 2910 mutex_enter(&sd_scsi_target_lun_mutex); 2911 2912 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2913 if (cp->pdip == dip) { 2914 break; 2915 } 2916 } 2917 2918 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2919 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2920 KM_SLEEP); 2921 cp->pdip = dip; 2922 cp->next = sd_scsi_target_lun_head; 2923 sd_scsi_target_lun_head = cp; 2924 } 2925 2926 mutex_exit(&sd_scsi_target_lun_mutex); 2927 2928 if (cp != NULL) { 2929 if (flag == SD_SCSI_LUN_ATTACH) { 2930 cp->nlun[target] ++; 2931 } else { 2932 cp->nlun[target] --; 2933 } 2934 } 2935 } 2936 2937 2938 /* 2939 * Function: sd_spin_up_unit 2940 * 2941 * Description: Issues the following commands to spin-up the device: 2942 * START STOP UNIT, and INQUIRY. 2943 * 2944 * Arguments: un - driver soft state (unit) structure 2945 * 2946 * Return Code: 0 - success 2947 * EIO - failure 2948 * EACCES - reservation conflict 2949 * 2950 * Context: Kernel thread context 2951 */ 2952 2953 static int 2954 sd_spin_up_unit(struct sd_lun *un) 2955 { 2956 size_t resid = 0; 2957 int has_conflict = FALSE; 2958 uchar_t *bufaddr; 2959 2960 ASSERT(un != NULL); 2961 2962 /* 2963 * Send a throwaway START UNIT command. 2964 * 2965 * If we fail on this, we don't care presently what precisely 2966 * is wrong. EMC's arrays will also fail this with a check 2967 * condition (0x2/0x4/0x3) if the device is "inactive," but 2968 * we don't want to fail the attach because it may become 2969 * "active" later. 2970 */ 2971 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2972 == EACCES) 2973 has_conflict = TRUE; 2974 2975 /* 2976 * Send another INQUIRY command to the target. This is necessary for 2977 * non-removable media direct access devices because their INQUIRY data 2978 * may not be fully qualified until they are spun up (perhaps via the 2979 * START command above). Note: This seems to be needed for some 2980 * legacy devices only.) The INQUIRY command should succeed even if a 2981 * Reservation Conflict is present. 2982 */ 2983 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2984 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2985 kmem_free(bufaddr, SUN_INQSIZE); 2986 return (EIO); 2987 } 2988 2989 /* 2990 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2991 * Note that this routine does not return a failure here even if the 2992 * INQUIRY command did not return any data. This is a legacy behavior. 2993 */ 2994 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2995 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2996 } 2997 2998 kmem_free(bufaddr, SUN_INQSIZE); 2999 3000 /* If we hit a reservation conflict above, tell the caller. */ 3001 if (has_conflict == TRUE) { 3002 return (EACCES); 3003 } 3004 3005 return (0); 3006 } 3007 3008 #ifdef _LP64 3009 /* 3010 * Function: sd_enable_descr_sense 3011 * 3012 * Description: This routine attempts to select descriptor sense format 3013 * using the Control mode page. Devices that support 64 bit 3014 * LBAs (for >2TB luns) should also implement descriptor 3015 * sense data so we will call this function whenever we see 3016 * a lun larger than 2TB. If for some reason the device 3017 * supports 64 bit LBAs but doesn't support descriptor sense 3018 * presumably the mode select will fail. Everything will 3019 * continue to work normally except that we will not get 3020 * complete sense data for commands that fail with an LBA 3021 * larger than 32 bits. 3022 * 3023 * Arguments: un - driver soft state (unit) structure 3024 * 3025 * Context: Kernel thread context only 3026 */ 3027 3028 static void 3029 sd_enable_descr_sense(struct sd_lun *un) 3030 { 3031 uchar_t *header; 3032 struct mode_control_scsi3 *ctrl_bufp; 3033 size_t buflen; 3034 size_t bd_len; 3035 3036 /* 3037 * Read MODE SENSE page 0xA, Control Mode Page 3038 */ 3039 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3040 sizeof (struct mode_control_scsi3); 3041 header = kmem_zalloc(buflen, KM_SLEEP); 3042 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3043 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3044 SD_ERROR(SD_LOG_COMMON, un, 3045 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3046 goto eds_exit; 3047 } 3048 3049 /* 3050 * Determine size of Block Descriptors in order to locate 3051 * the mode page data. ATAPI devices return 0, SCSI devices 3052 * should return MODE_BLK_DESC_LENGTH. 3053 */ 3054 bd_len = ((struct mode_header *)header)->bdesc_length; 3055 3056 /* Clear the mode data length field for MODE SELECT */ 3057 ((struct mode_header *)header)->length = 0; 3058 3059 ctrl_bufp = (struct mode_control_scsi3 *) 3060 (header + MODE_HEADER_LENGTH + bd_len); 3061 3062 /* 3063 * If the page length is smaller than the expected value, 3064 * the target device doesn't support D_SENSE. Bail out here. 3065 */ 3066 if (ctrl_bufp->mode_page.length < 3067 sizeof (struct mode_control_scsi3) - 2) { 3068 SD_ERROR(SD_LOG_COMMON, un, 3069 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3070 goto eds_exit; 3071 } 3072 3073 /* 3074 * Clear PS bit for MODE SELECT 3075 */ 3076 ctrl_bufp->mode_page.ps = 0; 3077 3078 /* 3079 * Set D_SENSE to enable descriptor sense format. 3080 */ 3081 ctrl_bufp->d_sense = 1; 3082 3083 /* 3084 * Use MODE SELECT to commit the change to the D_SENSE bit 3085 */ 3086 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3087 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3088 SD_INFO(SD_LOG_COMMON, un, 3089 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3090 goto eds_exit; 3091 } 3092 3093 eds_exit: 3094 kmem_free(header, buflen); 3095 } 3096 3097 /* 3098 * Function: sd_reenable_dsense_task 3099 * 3100 * Description: Re-enable descriptor sense after device or bus reset 3101 * 3102 * Context: Executes in a taskq() thread context 3103 */ 3104 static void 3105 sd_reenable_dsense_task(void *arg) 3106 { 3107 struct sd_lun *un = arg; 3108 3109 ASSERT(un != NULL); 3110 sd_enable_descr_sense(un); 3111 } 3112 #endif /* _LP64 */ 3113 3114 /* 3115 * Function: sd_set_mmc_caps 3116 * 3117 * Description: This routine determines if the device is MMC compliant and if 3118 * the device supports CDDA via a mode sense of the CDVD 3119 * capabilities mode page. Also checks if the device is a 3120 * dvdram writable device. 3121 * 3122 * Arguments: un - driver soft state (unit) structure 3123 * 3124 * Context: Kernel thread context only 3125 */ 3126 3127 static void 3128 sd_set_mmc_caps(struct sd_lun *un) 3129 { 3130 struct mode_header_grp2 *sense_mhp; 3131 uchar_t *sense_page; 3132 caddr_t buf; 3133 int bd_len; 3134 int status; 3135 struct uscsi_cmd com; 3136 int rtn; 3137 uchar_t *out_data_rw, *out_data_hd; 3138 uchar_t *rqbuf_rw, *rqbuf_hd; 3139 3140 ASSERT(un != NULL); 3141 3142 /* 3143 * The flags which will be set in this function are - mmc compliant, 3144 * dvdram writable device, cdda support. Initialize them to FALSE 3145 * and if a capability is detected - it will be set to TRUE. 3146 */ 3147 un->un_f_mmc_cap = FALSE; 3148 un->un_f_dvdram_writable_device = FALSE; 3149 un->un_f_cfg_cdda = FALSE; 3150 3151 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3152 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3153 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3154 3155 if (status != 0) { 3156 /* command failed; just return */ 3157 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3158 return; 3159 } 3160 /* 3161 * If the mode sense request for the CDROM CAPABILITIES 3162 * page (0x2A) succeeds the device is assumed to be MMC. 3163 */ 3164 un->un_f_mmc_cap = TRUE; 3165 3166 /* Get to the page data */ 3167 sense_mhp = (struct mode_header_grp2 *)buf; 3168 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3169 sense_mhp->bdesc_length_lo; 3170 if (bd_len > MODE_BLK_DESC_LENGTH) { 3171 /* 3172 * We did not get back the expected block descriptor 3173 * length so we cannot determine if the device supports 3174 * CDDA. However, we still indicate the device is MMC 3175 * according to the successful response to the page 3176 * 0x2A mode sense request. 3177 */ 3178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3179 "sd_set_mmc_caps: Mode Sense returned " 3180 "invalid block descriptor length\n"); 3181 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3182 return; 3183 } 3184 3185 /* See if read CDDA is supported */ 3186 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3187 bd_len); 3188 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3189 3190 /* See if writing DVD RAM is supported. */ 3191 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3192 if (un->un_f_dvdram_writable_device == TRUE) { 3193 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3194 return; 3195 } 3196 3197 /* 3198 * If the device presents DVD or CD capabilities in the mode 3199 * page, we can return here since a RRD will not have 3200 * these capabilities. 3201 */ 3202 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3203 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3204 return; 3205 } 3206 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3207 3208 /* 3209 * If un->un_f_dvdram_writable_device is still FALSE, 3210 * check for a Removable Rigid Disk (RRD). A RRD 3211 * device is identified by the features RANDOM_WRITABLE and 3212 * HARDWARE_DEFECT_MANAGEMENT. 3213 */ 3214 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3215 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3216 3217 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3218 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3219 RANDOM_WRITABLE, SD_PATH_STANDARD); 3220 if (rtn != 0) { 3221 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3222 kmem_free(rqbuf_rw, SENSE_LENGTH); 3223 return; 3224 } 3225 3226 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3227 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3228 3229 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3230 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3231 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3232 if (rtn == 0) { 3233 /* 3234 * We have good information, check for random writable 3235 * and hardware defect features. 3236 */ 3237 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3238 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3239 un->un_f_dvdram_writable_device = TRUE; 3240 } 3241 } 3242 3243 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3244 kmem_free(rqbuf_rw, SENSE_LENGTH); 3245 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3246 kmem_free(rqbuf_hd, SENSE_LENGTH); 3247 } 3248 3249 /* 3250 * Function: sd_check_for_writable_cd 3251 * 3252 * Description: This routine determines if the media in the device is 3253 * writable or not. It uses the get configuration command (0x46) 3254 * to determine if the media is writable 3255 * 3256 * Arguments: un - driver soft state (unit) structure 3257 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3258 * chain and the normal command waitq, or 3259 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3260 * "direct" chain and bypass the normal command 3261 * waitq. 3262 * 3263 * Context: Never called at interrupt context. 3264 */ 3265 3266 static void 3267 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3268 { 3269 struct uscsi_cmd com; 3270 uchar_t *out_data; 3271 uchar_t *rqbuf; 3272 int rtn; 3273 uchar_t *out_data_rw, *out_data_hd; 3274 uchar_t *rqbuf_rw, *rqbuf_hd; 3275 struct mode_header_grp2 *sense_mhp; 3276 uchar_t *sense_page; 3277 caddr_t buf; 3278 int bd_len; 3279 int status; 3280 3281 ASSERT(un != NULL); 3282 ASSERT(mutex_owned(SD_MUTEX(un))); 3283 3284 /* 3285 * Initialize the writable media to false, if configuration info. 3286 * tells us otherwise then only we will set it. 3287 */ 3288 un->un_f_mmc_writable_media = FALSE; 3289 mutex_exit(SD_MUTEX(un)); 3290 3291 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3292 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3293 3294 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3295 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3296 3297 mutex_enter(SD_MUTEX(un)); 3298 if (rtn == 0) { 3299 /* 3300 * We have good information, check for writable DVD. 3301 */ 3302 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3303 un->un_f_mmc_writable_media = TRUE; 3304 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3305 kmem_free(rqbuf, SENSE_LENGTH); 3306 return; 3307 } 3308 } 3309 3310 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3311 kmem_free(rqbuf, SENSE_LENGTH); 3312 3313 /* 3314 * Determine if this is a RRD type device. 3315 */ 3316 mutex_exit(SD_MUTEX(un)); 3317 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3318 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3319 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3320 mutex_enter(SD_MUTEX(un)); 3321 if (status != 0) { 3322 /* command failed; just return */ 3323 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3324 return; 3325 } 3326 3327 /* Get to the page data */ 3328 sense_mhp = (struct mode_header_grp2 *)buf; 3329 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3330 if (bd_len > MODE_BLK_DESC_LENGTH) { 3331 /* 3332 * We did not get back the expected block descriptor length so 3333 * we cannot check the mode page. 3334 */ 3335 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3336 "sd_check_for_writable_cd: Mode Sense returned " 3337 "invalid block descriptor length\n"); 3338 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3339 return; 3340 } 3341 3342 /* 3343 * If the device presents DVD or CD capabilities in the mode 3344 * page, we can return here since a RRD device will not have 3345 * these capabilities. 3346 */ 3347 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3348 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3349 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3350 return; 3351 } 3352 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3353 3354 /* 3355 * If un->un_f_mmc_writable_media is still FALSE, 3356 * check for RRD type media. A RRD device is identified 3357 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3358 */ 3359 mutex_exit(SD_MUTEX(un)); 3360 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3361 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3362 3363 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3364 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3365 RANDOM_WRITABLE, path_flag); 3366 if (rtn != 0) { 3367 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3368 kmem_free(rqbuf_rw, SENSE_LENGTH); 3369 mutex_enter(SD_MUTEX(un)); 3370 return; 3371 } 3372 3373 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3374 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3375 3376 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3377 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3378 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3379 mutex_enter(SD_MUTEX(un)); 3380 if (rtn == 0) { 3381 /* 3382 * We have good information, check for random writable 3383 * and hardware defect features as current. 3384 */ 3385 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3386 (out_data_rw[10] & 0x1) && 3387 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3388 (out_data_hd[10] & 0x1)) { 3389 un->un_f_mmc_writable_media = TRUE; 3390 } 3391 } 3392 3393 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3394 kmem_free(rqbuf_rw, SENSE_LENGTH); 3395 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3396 kmem_free(rqbuf_hd, SENSE_LENGTH); 3397 } 3398 3399 /* 3400 * Function: sd_read_unit_properties 3401 * 3402 * Description: The following implements a property lookup mechanism. 3403 * Properties for particular disks (keyed on vendor, model 3404 * and rev numbers) are sought in the sd.conf file via 3405 * sd_process_sdconf_file(), and if not found there, are 3406 * looked for in a list hardcoded in this driver via 3407 * sd_process_sdconf_table() Once located the properties 3408 * are used to update the driver unit structure. 3409 * 3410 * Arguments: un - driver soft state (unit) structure 3411 */ 3412 3413 static void 3414 sd_read_unit_properties(struct sd_lun *un) 3415 { 3416 /* 3417 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3418 * the "sd-config-list" property (from the sd.conf file) or if 3419 * there was not a match for the inquiry vid/pid. If this event 3420 * occurs the static driver configuration table is searched for 3421 * a match. 3422 */ 3423 ASSERT(un != NULL); 3424 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3425 sd_process_sdconf_table(un); 3426 } 3427 3428 /* check for LSI device */ 3429 sd_is_lsi(un); 3430 3431 3432 } 3433 3434 3435 /* 3436 * Function: sd_process_sdconf_file 3437 * 3438 * Description: Use ddi_getlongprop to obtain the properties from the 3439 * driver's config file (ie, sd.conf) and update the driver 3440 * soft state structure accordingly. 3441 * 3442 * Arguments: un - driver soft state (unit) structure 3443 * 3444 * Return Code: SD_SUCCESS - The properties were successfully set according 3445 * to the driver configuration file. 3446 * SD_FAILURE - The driver config list was not obtained or 3447 * there was no vid/pid match. This indicates that 3448 * the static config table should be used. 3449 * 3450 * The config file has a property, "sd-config-list", which consists of 3451 * one or more duplets as follows: 3452 * 3453 * sd-config-list= 3454 * <duplet>, 3455 * [<duplet>,] 3456 * [<duplet>]; 3457 * 3458 * The structure of each duplet is as follows: 3459 * 3460 * <duplet>:= <vid+pid>,<data-property-name_list> 3461 * 3462 * The first entry of the duplet is the device ID string (the concatenated 3463 * vid & pid; not to be confused with a device_id). This is defined in 3464 * the same way as in the sd_disk_table. 3465 * 3466 * The second part of the duplet is a string that identifies a 3467 * data-property-name-list. The data-property-name-list is defined as 3468 * follows: 3469 * 3470 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3471 * 3472 * The syntax of <data-property-name> depends on the <version> field. 3473 * 3474 * If version = SD_CONF_VERSION_1 we have the following syntax: 3475 * 3476 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3477 * 3478 * where the prop0 value will be used to set prop0 if bit0 set in the 3479 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3480 * 3481 */ 3482 3483 static int 3484 sd_process_sdconf_file(struct sd_lun *un) 3485 { 3486 char *config_list = NULL; 3487 int config_list_len; 3488 int len; 3489 int dupletlen = 0; 3490 char *vidptr; 3491 int vidlen; 3492 char *dnlist_ptr; 3493 char *dataname_ptr; 3494 int dnlist_len; 3495 int dataname_len; 3496 int *data_list; 3497 int data_list_len; 3498 int rval = SD_FAILURE; 3499 int i; 3500 3501 ASSERT(un != NULL); 3502 3503 /* Obtain the configuration list associated with the .conf file */ 3504 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3505 sd_config_list, (caddr_t)&config_list, &config_list_len) 3506 != DDI_PROP_SUCCESS) { 3507 return (SD_FAILURE); 3508 } 3509 3510 /* 3511 * Compare vids in each duplet to the inquiry vid - if a match is 3512 * made, get the data value and update the soft state structure 3513 * accordingly. 3514 * 3515 * Note: This algorithm is complex and difficult to maintain. It should 3516 * be replaced with a more robust implementation. 3517 */ 3518 for (len = config_list_len, vidptr = config_list; len > 0; 3519 vidptr += dupletlen, len -= dupletlen) { 3520 /* 3521 * Note: The assumption here is that each vid entry is on 3522 * a unique line from its associated duplet. 3523 */ 3524 vidlen = dupletlen = (int)strlen(vidptr); 3525 if ((vidlen == 0) || 3526 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3527 dupletlen++; 3528 continue; 3529 } 3530 3531 /* 3532 * dnlist contains 1 or more blank separated 3533 * data-property-name entries 3534 */ 3535 dnlist_ptr = vidptr + vidlen + 1; 3536 dnlist_len = (int)strlen(dnlist_ptr); 3537 dupletlen += dnlist_len + 2; 3538 3539 /* 3540 * Set a pointer for the first data-property-name 3541 * entry in the list 3542 */ 3543 dataname_ptr = dnlist_ptr; 3544 dataname_len = 0; 3545 3546 /* 3547 * Loop through all data-property-name entries in the 3548 * data-property-name-list setting the properties for each. 3549 */ 3550 while (dataname_len < dnlist_len) { 3551 int version; 3552 3553 /* 3554 * Determine the length of the current 3555 * data-property-name entry by indexing until a 3556 * blank or NULL is encountered. When the space is 3557 * encountered reset it to a NULL for compliance 3558 * with ddi_getlongprop(). 3559 */ 3560 for (i = 0; ((dataname_ptr[i] != ' ') && 3561 (dataname_ptr[i] != '\0')); i++) { 3562 ; 3563 } 3564 3565 dataname_len += i; 3566 /* If not null terminated, Make it so */ 3567 if (dataname_ptr[i] == ' ') { 3568 dataname_ptr[i] = '\0'; 3569 } 3570 dataname_len++; 3571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3572 "sd_process_sdconf_file: disk:%s, data:%s\n", 3573 vidptr, dataname_ptr); 3574 3575 /* Get the data list */ 3576 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3577 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3578 != DDI_PROP_SUCCESS) { 3579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3580 "sd_process_sdconf_file: data property (%s)" 3581 " has no value\n", dataname_ptr); 3582 dataname_ptr = dnlist_ptr + dataname_len; 3583 continue; 3584 } 3585 3586 version = data_list[0]; 3587 3588 if (version == SD_CONF_VERSION_1) { 3589 sd_tunables values; 3590 3591 /* Set the properties */ 3592 if (sd_chk_vers1_data(un, data_list[1], 3593 &data_list[2], data_list_len, dataname_ptr) 3594 == SD_SUCCESS) { 3595 sd_get_tunables_from_conf(un, 3596 data_list[1], &data_list[2], 3597 &values); 3598 sd_set_vers1_properties(un, 3599 data_list[1], &values); 3600 rval = SD_SUCCESS; 3601 } else { 3602 rval = SD_FAILURE; 3603 } 3604 } else { 3605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3606 "data property %s version 0x%x is invalid.", 3607 dataname_ptr, version); 3608 rval = SD_FAILURE; 3609 } 3610 kmem_free(data_list, data_list_len); 3611 dataname_ptr = dnlist_ptr + dataname_len; 3612 } 3613 } 3614 3615 /* free up the memory allocated by ddi_getlongprop */ 3616 if (config_list) { 3617 kmem_free(config_list, config_list_len); 3618 } 3619 3620 return (rval); 3621 } 3622 3623 /* 3624 * Function: sd_get_tunables_from_conf() 3625 * 3626 * 3627 * This function reads the data list from the sd.conf file and pulls 3628 * the values that can have numeric values as arguments and places 3629 * the values in the appropriate sd_tunables member. 3630 * Since the order of the data list members varies across platforms 3631 * This function reads them from the data list in a platform specific 3632 * order and places them into the correct sd_tunable member that is 3633 * consistent across all platforms. 3634 */ 3635 static void 3636 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3637 sd_tunables *values) 3638 { 3639 int i; 3640 int mask; 3641 3642 bzero(values, sizeof (sd_tunables)); 3643 3644 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3645 3646 mask = 1 << i; 3647 if (mask > flags) { 3648 break; 3649 } 3650 3651 switch (mask & flags) { 3652 case 0: /* This mask bit not set in flags */ 3653 continue; 3654 case SD_CONF_BSET_THROTTLE: 3655 values->sdt_throttle = data_list[i]; 3656 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3657 "sd_get_tunables_from_conf: throttle = %d\n", 3658 values->sdt_throttle); 3659 break; 3660 case SD_CONF_BSET_CTYPE: 3661 values->sdt_ctype = data_list[i]; 3662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3663 "sd_get_tunables_from_conf: ctype = %d\n", 3664 values->sdt_ctype); 3665 break; 3666 case SD_CONF_BSET_NRR_COUNT: 3667 values->sdt_not_rdy_retries = data_list[i]; 3668 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3669 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3670 values->sdt_not_rdy_retries); 3671 break; 3672 case SD_CONF_BSET_BSY_RETRY_COUNT: 3673 values->sdt_busy_retries = data_list[i]; 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_get_tunables_from_conf: busy_retries = %d\n", 3676 values->sdt_busy_retries); 3677 break; 3678 case SD_CONF_BSET_RST_RETRIES: 3679 values->sdt_reset_retries = data_list[i]; 3680 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3681 "sd_get_tunables_from_conf: reset_retries = %d\n", 3682 values->sdt_reset_retries); 3683 break; 3684 case SD_CONF_BSET_RSV_REL_TIME: 3685 values->sdt_reserv_rel_time = data_list[i]; 3686 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3687 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3688 values->sdt_reserv_rel_time); 3689 break; 3690 case SD_CONF_BSET_MIN_THROTTLE: 3691 values->sdt_min_throttle = data_list[i]; 3692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3693 "sd_get_tunables_from_conf: min_throttle = %d\n", 3694 values->sdt_min_throttle); 3695 break; 3696 case SD_CONF_BSET_DISKSORT_DISABLED: 3697 values->sdt_disk_sort_dis = data_list[i]; 3698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3699 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3700 values->sdt_disk_sort_dis); 3701 break; 3702 case SD_CONF_BSET_LUN_RESET_ENABLED: 3703 values->sdt_lun_reset_enable = data_list[i]; 3704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3705 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3706 "\n", values->sdt_lun_reset_enable); 3707 break; 3708 case SD_CONF_BSET_CACHE_IS_NV: 3709 values->sdt_suppress_cache_flush = data_list[i]; 3710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3711 "sd_get_tunables_from_conf: \ 3712 suppress_cache_flush = %d" 3713 "\n", values->sdt_suppress_cache_flush); 3714 break; 3715 } 3716 } 3717 } 3718 3719 /* 3720 * Function: sd_process_sdconf_table 3721 * 3722 * Description: Search the static configuration table for a match on the 3723 * inquiry vid/pid and update the driver soft state structure 3724 * according to the table property values for the device. 3725 * 3726 * The form of a configuration table entry is: 3727 * <vid+pid>,<flags>,<property-data> 3728 * "SEAGATE ST42400N",1,0x40000, 3729 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3730 * 3731 * Arguments: un - driver soft state (unit) structure 3732 */ 3733 3734 static void 3735 sd_process_sdconf_table(struct sd_lun *un) 3736 { 3737 char *id = NULL; 3738 int table_index; 3739 int idlen; 3740 3741 ASSERT(un != NULL); 3742 for (table_index = 0; table_index < sd_disk_table_size; 3743 table_index++) { 3744 id = sd_disk_table[table_index].device_id; 3745 idlen = strlen(id); 3746 if (idlen == 0) { 3747 continue; 3748 } 3749 3750 /* 3751 * The static configuration table currently does not 3752 * implement version 10 properties. Additionally, 3753 * multiple data-property-name entries are not 3754 * implemented in the static configuration table. 3755 */ 3756 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3757 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3758 "sd_process_sdconf_table: disk %s\n", id); 3759 sd_set_vers1_properties(un, 3760 sd_disk_table[table_index].flags, 3761 sd_disk_table[table_index].properties); 3762 break; 3763 } 3764 } 3765 } 3766 3767 3768 /* 3769 * Function: sd_sdconf_id_match 3770 * 3771 * Description: This local function implements a case sensitive vid/pid 3772 * comparison as well as the boundary cases of wild card and 3773 * multiple blanks. 3774 * 3775 * Note: An implicit assumption made here is that the scsi 3776 * inquiry structure will always keep the vid, pid and 3777 * revision strings in consecutive sequence, so they can be 3778 * read as a single string. If this assumption is not the 3779 * case, a separate string, to be used for the check, needs 3780 * to be built with these strings concatenated. 3781 * 3782 * Arguments: un - driver soft state (unit) structure 3783 * id - table or config file vid/pid 3784 * idlen - length of the vid/pid (bytes) 3785 * 3786 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3787 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3788 */ 3789 3790 static int 3791 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3792 { 3793 struct scsi_inquiry *sd_inq; 3794 int rval = SD_SUCCESS; 3795 3796 ASSERT(un != NULL); 3797 sd_inq = un->un_sd->sd_inq; 3798 ASSERT(id != NULL); 3799 3800 /* 3801 * We use the inq_vid as a pointer to a buffer containing the 3802 * vid and pid and use the entire vid/pid length of the table 3803 * entry for the comparison. This works because the inq_pid 3804 * data member follows inq_vid in the scsi_inquiry structure. 3805 */ 3806 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3807 /* 3808 * The user id string is compared to the inquiry vid/pid 3809 * using a case insensitive comparison and ignoring 3810 * multiple spaces. 3811 */ 3812 rval = sd_blank_cmp(un, id, idlen); 3813 if (rval != SD_SUCCESS) { 3814 /* 3815 * User id strings that start and end with a "*" 3816 * are a special case. These do not have a 3817 * specific vendor, and the product string can 3818 * appear anywhere in the 16 byte PID portion of 3819 * the inquiry data. This is a simple strstr() 3820 * type search for the user id in the inquiry data. 3821 */ 3822 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3823 char *pidptr = &id[1]; 3824 int i; 3825 int j; 3826 int pidstrlen = idlen - 2; 3827 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3828 pidstrlen; 3829 3830 if (j < 0) { 3831 return (SD_FAILURE); 3832 } 3833 for (i = 0; i < j; i++) { 3834 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3835 pidptr, pidstrlen) == 0) { 3836 rval = SD_SUCCESS; 3837 break; 3838 } 3839 } 3840 } 3841 } 3842 } 3843 return (rval); 3844 } 3845 3846 3847 /* 3848 * Function: sd_blank_cmp 3849 * 3850 * Description: If the id string starts and ends with a space, treat 3851 * multiple consecutive spaces as equivalent to a single 3852 * space. For example, this causes a sd_disk_table entry 3853 * of " NEC CDROM " to match a device's id string of 3854 * "NEC CDROM". 3855 * 3856 * Note: The success exit condition for this routine is if 3857 * the pointer to the table entry is '\0' and the cnt of 3858 * the inquiry length is zero. This will happen if the inquiry 3859 * string returned by the device is padded with spaces to be 3860 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3861 * SCSI spec states that the inquiry string is to be padded with 3862 * spaces. 3863 * 3864 * Arguments: un - driver soft state (unit) structure 3865 * id - table or config file vid/pid 3866 * idlen - length of the vid/pid (bytes) 3867 * 3868 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3869 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3870 */ 3871 3872 static int 3873 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3874 { 3875 char *p1; 3876 char *p2; 3877 int cnt; 3878 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3879 sizeof (SD_INQUIRY(un)->inq_pid); 3880 3881 ASSERT(un != NULL); 3882 p2 = un->un_sd->sd_inq->inq_vid; 3883 ASSERT(id != NULL); 3884 p1 = id; 3885 3886 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3887 /* 3888 * Note: string p1 is terminated by a NUL but string p2 3889 * isn't. The end of p2 is determined by cnt. 3890 */ 3891 for (;;) { 3892 /* skip over any extra blanks in both strings */ 3893 while ((*p1 != '\0') && (*p1 == ' ')) { 3894 p1++; 3895 } 3896 while ((cnt != 0) && (*p2 == ' ')) { 3897 p2++; 3898 cnt--; 3899 } 3900 3901 /* compare the two strings */ 3902 if ((cnt == 0) || 3903 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3904 break; 3905 } 3906 while ((cnt > 0) && 3907 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3908 p1++; 3909 p2++; 3910 cnt--; 3911 } 3912 } 3913 } 3914 3915 /* return SD_SUCCESS if both strings match */ 3916 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3917 } 3918 3919 3920 /* 3921 * Function: sd_chk_vers1_data 3922 * 3923 * Description: Verify the version 1 device properties provided by the 3924 * user via the configuration file 3925 * 3926 * Arguments: un - driver soft state (unit) structure 3927 * flags - integer mask indicating properties to be set 3928 * prop_list - integer list of property values 3929 * list_len - length of user provided data 3930 * 3931 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3932 * SD_FAILURE - Indicates the user provided data is invalid 3933 */ 3934 3935 static int 3936 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3937 int list_len, char *dataname_ptr) 3938 { 3939 int i; 3940 int mask = 1; 3941 int index = 0; 3942 3943 ASSERT(un != NULL); 3944 3945 /* Check for a NULL property name and list */ 3946 if (dataname_ptr == NULL) { 3947 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3948 "sd_chk_vers1_data: NULL data property name."); 3949 return (SD_FAILURE); 3950 } 3951 if (prop_list == NULL) { 3952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3953 "sd_chk_vers1_data: %s NULL data property list.", 3954 dataname_ptr); 3955 return (SD_FAILURE); 3956 } 3957 3958 /* Display a warning if undefined bits are set in the flags */ 3959 if (flags & ~SD_CONF_BIT_MASK) { 3960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3961 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3962 "Properties not set.", 3963 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3964 return (SD_FAILURE); 3965 } 3966 3967 /* 3968 * Verify the length of the list by identifying the highest bit set 3969 * in the flags and validating that the property list has a length 3970 * up to the index of this bit. 3971 */ 3972 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3973 if (flags & mask) { 3974 index++; 3975 } 3976 mask = 1 << i; 3977 } 3978 if ((list_len / sizeof (int)) < (index + 2)) { 3979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3980 "sd_chk_vers1_data: " 3981 "Data property list %s size is incorrect. " 3982 "Properties not set.", dataname_ptr); 3983 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3984 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3985 return (SD_FAILURE); 3986 } 3987 return (SD_SUCCESS); 3988 } 3989 3990 3991 /* 3992 * Function: sd_set_vers1_properties 3993 * 3994 * Description: Set version 1 device properties based on a property list 3995 * retrieved from the driver configuration file or static 3996 * configuration table. Version 1 properties have the format: 3997 * 3998 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3999 * 4000 * where the prop0 value will be used to set prop0 if bit0 4001 * is set in the flags 4002 * 4003 * Arguments: un - driver soft state (unit) structure 4004 * flags - integer mask indicating properties to be set 4005 * prop_list - integer list of property values 4006 */ 4007 4008 static void 4009 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4010 { 4011 ASSERT(un != NULL); 4012 4013 /* 4014 * Set the flag to indicate cache is to be disabled. An attempt 4015 * to disable the cache via sd_cache_control() will be made 4016 * later during attach once the basic initialization is complete. 4017 */ 4018 if (flags & SD_CONF_BSET_NOCACHE) { 4019 un->un_f_opt_disable_cache = TRUE; 4020 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4021 "sd_set_vers1_properties: caching disabled flag set\n"); 4022 } 4023 4024 /* CD-specific configuration parameters */ 4025 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4026 un->un_f_cfg_playmsf_bcd = TRUE; 4027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4028 "sd_set_vers1_properties: playmsf_bcd set\n"); 4029 } 4030 if (flags & SD_CONF_BSET_READSUB_BCD) { 4031 un->un_f_cfg_readsub_bcd = TRUE; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_set_vers1_properties: readsub_bcd set\n"); 4034 } 4035 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4036 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4039 } 4040 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4041 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4044 } 4045 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4046 un->un_f_cfg_no_read_header = TRUE; 4047 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4048 "sd_set_vers1_properties: no_read_header set\n"); 4049 } 4050 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4051 un->un_f_cfg_read_cd_xd4 = TRUE; 4052 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4053 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4054 } 4055 4056 /* Support for devices which do not have valid/unique serial numbers */ 4057 if (flags & SD_CONF_BSET_FAB_DEVID) { 4058 un->un_f_opt_fab_devid = TRUE; 4059 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4060 "sd_set_vers1_properties: fab_devid bit set\n"); 4061 } 4062 4063 /* Support for user throttle configuration */ 4064 if (flags & SD_CONF_BSET_THROTTLE) { 4065 ASSERT(prop_list != NULL); 4066 un->un_saved_throttle = un->un_throttle = 4067 prop_list->sdt_throttle; 4068 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4069 "sd_set_vers1_properties: throttle set to %d\n", 4070 prop_list->sdt_throttle); 4071 } 4072 4073 /* Set the per disk retry count according to the conf file or table. */ 4074 if (flags & SD_CONF_BSET_NRR_COUNT) { 4075 ASSERT(prop_list != NULL); 4076 if (prop_list->sdt_not_rdy_retries) { 4077 un->un_notready_retry_count = 4078 prop_list->sdt_not_rdy_retries; 4079 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4080 "sd_set_vers1_properties: not ready retry count" 4081 " set to %d\n", un->un_notready_retry_count); 4082 } 4083 } 4084 4085 /* The controller type is reported for generic disk driver ioctls */ 4086 if (flags & SD_CONF_BSET_CTYPE) { 4087 ASSERT(prop_list != NULL); 4088 switch (prop_list->sdt_ctype) { 4089 case CTYPE_CDROM: 4090 un->un_ctype = prop_list->sdt_ctype; 4091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4092 "sd_set_vers1_properties: ctype set to " 4093 "CTYPE_CDROM\n"); 4094 break; 4095 case CTYPE_CCS: 4096 un->un_ctype = prop_list->sdt_ctype; 4097 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4098 "sd_set_vers1_properties: ctype set to " 4099 "CTYPE_CCS\n"); 4100 break; 4101 case CTYPE_ROD: /* RW optical */ 4102 un->un_ctype = prop_list->sdt_ctype; 4103 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4104 "sd_set_vers1_properties: ctype set to " 4105 "CTYPE_ROD\n"); 4106 break; 4107 default: 4108 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4109 "sd_set_vers1_properties: Could not set " 4110 "invalid ctype value (%d)", 4111 prop_list->sdt_ctype); 4112 } 4113 } 4114 4115 /* Purple failover timeout */ 4116 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4117 ASSERT(prop_list != NULL); 4118 un->un_busy_retry_count = 4119 prop_list->sdt_busy_retries; 4120 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4121 "sd_set_vers1_properties: " 4122 "busy retry count set to %d\n", 4123 un->un_busy_retry_count); 4124 } 4125 4126 /* Purple reset retry count */ 4127 if (flags & SD_CONF_BSET_RST_RETRIES) { 4128 ASSERT(prop_list != NULL); 4129 un->un_reset_retry_count = 4130 prop_list->sdt_reset_retries; 4131 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4132 "sd_set_vers1_properties: " 4133 "reset retry count set to %d\n", 4134 un->un_reset_retry_count); 4135 } 4136 4137 /* Purple reservation release timeout */ 4138 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4139 ASSERT(prop_list != NULL); 4140 un->un_reserve_release_time = 4141 prop_list->sdt_reserv_rel_time; 4142 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4143 "sd_set_vers1_properties: " 4144 "reservation release timeout set to %d\n", 4145 un->un_reserve_release_time); 4146 } 4147 4148 /* 4149 * Driver flag telling the driver to verify that no commands are pending 4150 * for a device before issuing a Test Unit Ready. This is a workaround 4151 * for a firmware bug in some Seagate eliteI drives. 4152 */ 4153 if (flags & SD_CONF_BSET_TUR_CHECK) { 4154 un->un_f_cfg_tur_check = TRUE; 4155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4156 "sd_set_vers1_properties: tur queue check set\n"); 4157 } 4158 4159 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4160 un->un_min_throttle = prop_list->sdt_min_throttle; 4161 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4162 "sd_set_vers1_properties: min throttle set to %d\n", 4163 un->un_min_throttle); 4164 } 4165 4166 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4167 un->un_f_disksort_disabled = 4168 (prop_list->sdt_disk_sort_dis != 0) ? 4169 TRUE : FALSE; 4170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4171 "sd_set_vers1_properties: disksort disabled " 4172 "flag set to %d\n", 4173 prop_list->sdt_disk_sort_dis); 4174 } 4175 4176 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4177 un->un_f_lun_reset_enabled = 4178 (prop_list->sdt_lun_reset_enable != 0) ? 4179 TRUE : FALSE; 4180 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4181 "sd_set_vers1_properties: lun reset enabled " 4182 "flag set to %d\n", 4183 prop_list->sdt_lun_reset_enable); 4184 } 4185 4186 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4187 un->un_f_suppress_cache_flush = 4188 (prop_list->sdt_suppress_cache_flush != 0) ? 4189 TRUE : FALSE; 4190 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4191 "sd_set_vers1_properties: suppress_cache_flush " 4192 "flag set to %d\n", 4193 prop_list->sdt_suppress_cache_flush); 4194 } 4195 4196 /* 4197 * Validate the throttle values. 4198 * If any of the numbers are invalid, set everything to defaults. 4199 */ 4200 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4201 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4202 (un->un_min_throttle > un->un_throttle)) { 4203 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4204 un->un_min_throttle = sd_min_throttle; 4205 } 4206 } 4207 4208 /* 4209 * Function: sd_is_lsi() 4210 * 4211 * Description: Check for lsi devices, step through the static device 4212 * table to match vid/pid. 4213 * 4214 * Args: un - ptr to sd_lun 4215 * 4216 * Notes: When creating new LSI property, need to add the new LSI property 4217 * to this function. 4218 */ 4219 static void 4220 sd_is_lsi(struct sd_lun *un) 4221 { 4222 char *id = NULL; 4223 int table_index; 4224 int idlen; 4225 void *prop; 4226 4227 ASSERT(un != NULL); 4228 for (table_index = 0; table_index < sd_disk_table_size; 4229 table_index++) { 4230 id = sd_disk_table[table_index].device_id; 4231 idlen = strlen(id); 4232 if (idlen == 0) { 4233 continue; 4234 } 4235 4236 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4237 prop = sd_disk_table[table_index].properties; 4238 if (prop == &lsi_properties || 4239 prop == &lsi_oem_properties || 4240 prop == &lsi_properties_scsi || 4241 prop == &symbios_properties) { 4242 un->un_f_cfg_is_lsi = TRUE; 4243 } 4244 break; 4245 } 4246 } 4247 } 4248 4249 /* 4250 * Function: sd_get_physical_geometry 4251 * 4252 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4253 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4254 * target, and use this information to initialize the physical 4255 * geometry cache specified by pgeom_p. 4256 * 4257 * MODE SENSE is an optional command, so failure in this case 4258 * does not necessarily denote an error. We want to use the 4259 * MODE SENSE commands to derive the physical geometry of the 4260 * device, but if either command fails, the logical geometry is 4261 * used as the fallback for disk label geometry in cmlb. 4262 * 4263 * This requires that un->un_blockcount and un->un_tgt_blocksize 4264 * have already been initialized for the current target and 4265 * that the current values be passed as args so that we don't 4266 * end up ever trying to use -1 as a valid value. This could 4267 * happen if either value is reset while we're not holding 4268 * the mutex. 4269 * 4270 * Arguments: un - driver soft state (unit) structure 4271 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4272 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4273 * to use the USCSI "direct" chain and bypass the normal 4274 * command waitq. 4275 * 4276 * Context: Kernel thread only (can sleep). 4277 */ 4278 4279 static int 4280 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4281 diskaddr_t capacity, int lbasize, int path_flag) 4282 { 4283 struct mode_format *page3p; 4284 struct mode_geometry *page4p; 4285 struct mode_header *headerp; 4286 int sector_size; 4287 int nsect; 4288 int nhead; 4289 int ncyl; 4290 int intrlv; 4291 int spc; 4292 diskaddr_t modesense_capacity; 4293 int rpm; 4294 int bd_len; 4295 int mode_header_length; 4296 uchar_t *p3bufp; 4297 uchar_t *p4bufp; 4298 int cdbsize; 4299 int ret = EIO; 4300 4301 ASSERT(un != NULL); 4302 4303 if (lbasize == 0) { 4304 if (ISCD(un)) { 4305 lbasize = 2048; 4306 } else { 4307 lbasize = un->un_sys_blocksize; 4308 } 4309 } 4310 pgeom_p->g_secsize = (unsigned short)lbasize; 4311 4312 /* 4313 * If the unit is a cd/dvd drive MODE SENSE page three 4314 * and MODE SENSE page four are reserved (see SBC spec 4315 * and MMC spec). To prevent soft errors just return 4316 * using the default LBA size. 4317 */ 4318 if (ISCD(un)) 4319 return (ret); 4320 4321 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4322 4323 /* 4324 * Retrieve MODE SENSE page 3 - Format Device Page 4325 */ 4326 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4327 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4328 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4329 != 0) { 4330 SD_ERROR(SD_LOG_COMMON, un, 4331 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4332 goto page3_exit; 4333 } 4334 4335 /* 4336 * Determine size of Block Descriptors in order to locate the mode 4337 * page data. ATAPI devices return 0, SCSI devices should return 4338 * MODE_BLK_DESC_LENGTH. 4339 */ 4340 headerp = (struct mode_header *)p3bufp; 4341 if (un->un_f_cfg_is_atapi == TRUE) { 4342 struct mode_header_grp2 *mhp = 4343 (struct mode_header_grp2 *)headerp; 4344 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4345 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4346 } else { 4347 mode_header_length = MODE_HEADER_LENGTH; 4348 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4349 } 4350 4351 if (bd_len > MODE_BLK_DESC_LENGTH) { 4352 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4353 "received unexpected bd_len of %d, page3\n", bd_len); 4354 goto page3_exit; 4355 } 4356 4357 page3p = (struct mode_format *) 4358 ((caddr_t)headerp + mode_header_length + bd_len); 4359 4360 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4361 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4362 "mode sense pg3 code mismatch %d\n", 4363 page3p->mode_page.code); 4364 goto page3_exit; 4365 } 4366 4367 /* 4368 * Use this physical geometry data only if BOTH MODE SENSE commands 4369 * complete successfully; otherwise, revert to the logical geometry. 4370 * So, we need to save everything in temporary variables. 4371 */ 4372 sector_size = BE_16(page3p->data_bytes_sect); 4373 4374 /* 4375 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4376 */ 4377 if (sector_size == 0) { 4378 sector_size = un->un_sys_blocksize; 4379 } else { 4380 sector_size &= ~(un->un_sys_blocksize - 1); 4381 } 4382 4383 nsect = BE_16(page3p->sect_track); 4384 intrlv = BE_16(page3p->interleave); 4385 4386 SD_INFO(SD_LOG_COMMON, un, 4387 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4388 SD_INFO(SD_LOG_COMMON, un, 4389 " mode page: %d; nsect: %d; sector size: %d;\n", 4390 page3p->mode_page.code, nsect, sector_size); 4391 SD_INFO(SD_LOG_COMMON, un, 4392 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4393 BE_16(page3p->track_skew), 4394 BE_16(page3p->cylinder_skew)); 4395 4396 4397 /* 4398 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4399 */ 4400 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4401 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4402 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4403 != 0) { 4404 SD_ERROR(SD_LOG_COMMON, un, 4405 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4406 goto page4_exit; 4407 } 4408 4409 /* 4410 * Determine size of Block Descriptors in order to locate the mode 4411 * page data. ATAPI devices return 0, SCSI devices should return 4412 * MODE_BLK_DESC_LENGTH. 4413 */ 4414 headerp = (struct mode_header *)p4bufp; 4415 if (un->un_f_cfg_is_atapi == TRUE) { 4416 struct mode_header_grp2 *mhp = 4417 (struct mode_header_grp2 *)headerp; 4418 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4419 } else { 4420 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4421 } 4422 4423 if (bd_len > MODE_BLK_DESC_LENGTH) { 4424 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4425 "received unexpected bd_len of %d, page4\n", bd_len); 4426 goto page4_exit; 4427 } 4428 4429 page4p = (struct mode_geometry *) 4430 ((caddr_t)headerp + mode_header_length + bd_len); 4431 4432 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4433 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4434 "mode sense pg4 code mismatch %d\n", 4435 page4p->mode_page.code); 4436 goto page4_exit; 4437 } 4438 4439 /* 4440 * Stash the data now, after we know that both commands completed. 4441 */ 4442 4443 4444 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4445 spc = nhead * nsect; 4446 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4447 rpm = BE_16(page4p->rpm); 4448 4449 modesense_capacity = spc * ncyl; 4450 4451 SD_INFO(SD_LOG_COMMON, un, 4452 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4453 SD_INFO(SD_LOG_COMMON, un, 4454 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4455 SD_INFO(SD_LOG_COMMON, un, 4456 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4457 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4458 (void *)pgeom_p, capacity); 4459 4460 /* 4461 * Compensate if the drive's geometry is not rectangular, i.e., 4462 * the product of C * H * S returned by MODE SENSE >= that returned 4463 * by read capacity. This is an idiosyncrasy of the original x86 4464 * disk subsystem. 4465 */ 4466 if (modesense_capacity >= capacity) { 4467 SD_INFO(SD_LOG_COMMON, un, 4468 "sd_get_physical_geometry: adjusting acyl; " 4469 "old: %d; new: %d\n", pgeom_p->g_acyl, 4470 (modesense_capacity - capacity + spc - 1) / spc); 4471 if (sector_size != 0) { 4472 /* 1243403: NEC D38x7 drives don't support sec size */ 4473 pgeom_p->g_secsize = (unsigned short)sector_size; 4474 } 4475 pgeom_p->g_nsect = (unsigned short)nsect; 4476 pgeom_p->g_nhead = (unsigned short)nhead; 4477 pgeom_p->g_capacity = capacity; 4478 pgeom_p->g_acyl = 4479 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4480 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4481 } 4482 4483 pgeom_p->g_rpm = (unsigned short)rpm; 4484 pgeom_p->g_intrlv = (unsigned short)intrlv; 4485 ret = 0; 4486 4487 SD_INFO(SD_LOG_COMMON, un, 4488 "sd_get_physical_geometry: mode sense geometry:\n"); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 " nsect: %d; sector size: %d; interlv: %d\n", 4491 nsect, sector_size, intrlv); 4492 SD_INFO(SD_LOG_COMMON, un, 4493 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4494 nhead, ncyl, rpm, modesense_capacity); 4495 SD_INFO(SD_LOG_COMMON, un, 4496 "sd_get_physical_geometry: (cached)\n"); 4497 SD_INFO(SD_LOG_COMMON, un, 4498 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4499 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4500 pgeom_p->g_nhead, pgeom_p->g_nsect); 4501 SD_INFO(SD_LOG_COMMON, un, 4502 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4503 pgeom_p->g_secsize, pgeom_p->g_capacity, 4504 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4505 4506 page4_exit: 4507 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4508 page3_exit: 4509 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4510 4511 return (ret); 4512 } 4513 4514 /* 4515 * Function: sd_get_virtual_geometry 4516 * 4517 * Description: Ask the controller to tell us about the target device. 4518 * 4519 * Arguments: un - pointer to softstate 4520 * capacity - disk capacity in #blocks 4521 * lbasize - disk block size in bytes 4522 * 4523 * Context: Kernel thread only 4524 */ 4525 4526 static int 4527 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4528 diskaddr_t capacity, int lbasize) 4529 { 4530 uint_t geombuf; 4531 int spc; 4532 4533 ASSERT(un != NULL); 4534 4535 /* Set sector size, and total number of sectors */ 4536 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4537 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4538 4539 /* Let the HBA tell us its geometry */ 4540 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4541 4542 /* A value of -1 indicates an undefined "geometry" property */ 4543 if (geombuf == (-1)) { 4544 return (EINVAL); 4545 } 4546 4547 /* Initialize the logical geometry cache. */ 4548 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4549 lgeom_p->g_nsect = geombuf & 0xffff; 4550 lgeom_p->g_secsize = un->un_sys_blocksize; 4551 4552 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4553 4554 /* 4555 * Note: The driver originally converted the capacity value from 4556 * target blocks to system blocks. However, the capacity value passed 4557 * to this routine is already in terms of system blocks (this scaling 4558 * is done when the READ CAPACITY command is issued and processed). 4559 * This 'error' may have gone undetected because the usage of g_ncyl 4560 * (which is based upon g_capacity) is very limited within the driver 4561 */ 4562 lgeom_p->g_capacity = capacity; 4563 4564 /* 4565 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4566 * hba may return zero values if the device has been removed. 4567 */ 4568 if (spc == 0) { 4569 lgeom_p->g_ncyl = 0; 4570 } else { 4571 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4572 } 4573 lgeom_p->g_acyl = 0; 4574 4575 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4576 return (0); 4577 4578 } 4579 /* 4580 * Function: sd_update_block_info 4581 * 4582 * Description: Calculate a byte count to sector count bitshift value 4583 * from sector size. 4584 * 4585 * Arguments: un: unit struct. 4586 * lbasize: new target sector size 4587 * capacity: new target capacity, ie. block count 4588 * 4589 * Context: Kernel thread context 4590 */ 4591 4592 static void 4593 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4594 { 4595 uint_t dblk; 4596 4597 if (lbasize != 0) { 4598 un->un_tgt_blocksize = lbasize; 4599 un->un_f_tgt_blocksize_is_valid = TRUE; 4600 } 4601 4602 if (capacity != 0) { 4603 un->un_blockcount = capacity; 4604 un->un_f_blockcount_is_valid = TRUE; 4605 } 4606 4607 /* 4608 * Update device capacity properties. 4609 * 4610 * 'device-nblocks' number of blocks in target's units 4611 * 'device-blksize' data bearing size of target's block 4612 * 4613 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4614 * not be a power of two for checksumming disks with 520/528 byte 4615 * sectors. 4616 */ 4617 if (un->un_f_tgt_blocksize_is_valid && 4618 un->un_f_blockcount_is_valid && 4619 un->un_sys_blocksize) { 4620 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4621 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4622 "device-nblocks", un->un_blockcount / dblk); 4623 /* 4624 * To save memory, only define "device-blksize" when its 4625 * value is differnet than the default DEV_BSIZE value. 4626 */ 4627 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4628 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4629 SD_DEVINFO(un), "device-blksize", 4630 un->un_sys_blocksize * dblk); 4631 } 4632 } 4633 4634 4635 /* 4636 * Function: sd_register_devid 4637 * 4638 * Description: This routine will obtain the device id information from the 4639 * target, obtain the serial number, and register the device 4640 * id with the ddi framework. 4641 * 4642 * Arguments: devi - the system's dev_info_t for the device. 4643 * un - driver soft state (unit) structure 4644 * reservation_flag - indicates if a reservation conflict 4645 * occurred during attach 4646 * 4647 * Context: Kernel Thread 4648 */ 4649 static void 4650 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4651 { 4652 int rval = 0; 4653 uchar_t *inq80 = NULL; 4654 size_t inq80_len = MAX_INQUIRY_SIZE; 4655 size_t inq80_resid = 0; 4656 uchar_t *inq83 = NULL; 4657 size_t inq83_len = MAX_INQUIRY_SIZE; 4658 size_t inq83_resid = 0; 4659 int dlen, len; 4660 char *sn; 4661 4662 ASSERT(un != NULL); 4663 ASSERT(mutex_owned(SD_MUTEX(un))); 4664 ASSERT((SD_DEVINFO(un)) == devi); 4665 4666 /* 4667 * If transport has already registered a devid for this target 4668 * then that takes precedence over the driver's determination 4669 * of the devid. 4670 */ 4671 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4672 ASSERT(un->un_devid); 4673 return; /* use devid registered by the transport */ 4674 } 4675 4676 /* 4677 * This is the case of antiquated Sun disk drives that have the 4678 * FAB_DEVID property set in the disk_table. These drives 4679 * manage the devid's by storing them in last 2 available sectors 4680 * on the drive and have them fabricated by the ddi layer by calling 4681 * ddi_devid_init and passing the DEVID_FAB flag. 4682 */ 4683 if (un->un_f_opt_fab_devid == TRUE) { 4684 /* 4685 * Depending on EINVAL isn't reliable, since a reserved disk 4686 * may result in invalid geometry, so check to make sure a 4687 * reservation conflict did not occur during attach. 4688 */ 4689 if ((sd_get_devid(un) == EINVAL) && 4690 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4691 /* 4692 * The devid is invalid AND there is no reservation 4693 * conflict. Fabricate a new devid. 4694 */ 4695 (void) sd_create_devid(un); 4696 } 4697 4698 /* Register the devid if it exists */ 4699 if (un->un_devid != NULL) { 4700 (void) ddi_devid_register(SD_DEVINFO(un), 4701 un->un_devid); 4702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4703 "sd_register_devid: Devid Fabricated\n"); 4704 } 4705 return; 4706 } 4707 4708 /* 4709 * We check the availibility of the World Wide Name (0x83) and Unit 4710 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4711 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4712 * 0x83 is availible, that is the best choice. Our next choice is 4713 * 0x80. If neither are availible, we munge the devid from the device 4714 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4715 * to fabricate a devid for non-Sun qualified disks. 4716 */ 4717 if (sd_check_vpd_page_support(un) == 0) { 4718 /* collect page 80 data if available */ 4719 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4720 4721 mutex_exit(SD_MUTEX(un)); 4722 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4723 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4724 0x01, 0x80, &inq80_resid); 4725 4726 if (rval != 0) { 4727 kmem_free(inq80, inq80_len); 4728 inq80 = NULL; 4729 inq80_len = 0; 4730 } else if (ddi_prop_exists( 4731 DDI_DEV_T_NONE, SD_DEVINFO(un), 4732 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4733 INQUIRY_SERIAL_NO) == 0) { 4734 /* 4735 * If we don't already have a serial number 4736 * property, do quick verify of data returned 4737 * and define property. 4738 */ 4739 dlen = inq80_len - inq80_resid; 4740 len = (size_t)inq80[3]; 4741 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4742 /* 4743 * Ensure sn termination, skip leading 4744 * blanks, and create property 4745 * 'inquiry-serial-no'. 4746 */ 4747 sn = (char *)&inq80[4]; 4748 sn[len] = 0; 4749 while (*sn && (*sn == ' ')) 4750 sn++; 4751 if (*sn) { 4752 (void) ddi_prop_update_string( 4753 DDI_DEV_T_NONE, 4754 SD_DEVINFO(un), 4755 INQUIRY_SERIAL_NO, sn); 4756 } 4757 } 4758 } 4759 mutex_enter(SD_MUTEX(un)); 4760 } 4761 4762 /* collect page 83 data if available */ 4763 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4764 mutex_exit(SD_MUTEX(un)); 4765 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4766 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4767 0x01, 0x83, &inq83_resid); 4768 4769 if (rval != 0) { 4770 kmem_free(inq83, inq83_len); 4771 inq83 = NULL; 4772 inq83_len = 0; 4773 } 4774 mutex_enter(SD_MUTEX(un)); 4775 } 4776 } 4777 4778 /* encode best devid possible based on data available */ 4779 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4780 (char *)ddi_driver_name(SD_DEVINFO(un)), 4781 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4782 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4783 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4784 4785 /* devid successfully encoded, register devid */ 4786 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4787 4788 } else { 4789 /* 4790 * Unable to encode a devid based on data available. 4791 * This is not a Sun qualified disk. Older Sun disk 4792 * drives that have the SD_FAB_DEVID property 4793 * set in the disk_table and non Sun qualified 4794 * disks are treated in the same manner. These 4795 * drives manage the devid's by storing them in 4796 * last 2 available sectors on the drive and 4797 * have them fabricated by the ddi layer by 4798 * calling ddi_devid_init and passing the 4799 * DEVID_FAB flag. 4800 * Create a fabricate devid only if there's no 4801 * fabricate devid existed. 4802 */ 4803 if (sd_get_devid(un) == EINVAL) { 4804 (void) sd_create_devid(un); 4805 } 4806 un->un_f_opt_fab_devid = TRUE; 4807 4808 /* Register the devid if it exists */ 4809 if (un->un_devid != NULL) { 4810 (void) ddi_devid_register(SD_DEVINFO(un), 4811 un->un_devid); 4812 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4813 "sd_register_devid: devid fabricated using " 4814 "ddi framework\n"); 4815 } 4816 } 4817 4818 /* clean up resources */ 4819 if (inq80 != NULL) { 4820 kmem_free(inq80, inq80_len); 4821 } 4822 if (inq83 != NULL) { 4823 kmem_free(inq83, inq83_len); 4824 } 4825 } 4826 4827 4828 4829 /* 4830 * Function: sd_get_devid 4831 * 4832 * Description: This routine will return 0 if a valid device id has been 4833 * obtained from the target and stored in the soft state. If a 4834 * valid device id has not been previously read and stored, a 4835 * read attempt will be made. 4836 * 4837 * Arguments: un - driver soft state (unit) structure 4838 * 4839 * Return Code: 0 if we successfully get the device id 4840 * 4841 * Context: Kernel Thread 4842 */ 4843 4844 static int 4845 sd_get_devid(struct sd_lun *un) 4846 { 4847 struct dk_devid *dkdevid; 4848 ddi_devid_t tmpid; 4849 uint_t *ip; 4850 size_t sz; 4851 diskaddr_t blk; 4852 int status; 4853 int chksum; 4854 int i; 4855 size_t buffer_size; 4856 4857 ASSERT(un != NULL); 4858 ASSERT(mutex_owned(SD_MUTEX(un))); 4859 4860 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4861 un); 4862 4863 if (un->un_devid != NULL) { 4864 return (0); 4865 } 4866 4867 mutex_exit(SD_MUTEX(un)); 4868 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4869 (void *)SD_PATH_DIRECT) != 0) { 4870 mutex_enter(SD_MUTEX(un)); 4871 return (EINVAL); 4872 } 4873 4874 /* 4875 * Read and verify device id, stored in the reserved cylinders at the 4876 * end of the disk. Backup label is on the odd sectors of the last 4877 * track of the last cylinder. Device id will be on track of the next 4878 * to last cylinder. 4879 */ 4880 mutex_enter(SD_MUTEX(un)); 4881 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4882 mutex_exit(SD_MUTEX(un)); 4883 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4884 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4885 SD_PATH_DIRECT); 4886 if (status != 0) { 4887 goto error; 4888 } 4889 4890 /* Validate the revision */ 4891 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4892 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4893 status = EINVAL; 4894 goto error; 4895 } 4896 4897 /* Calculate the checksum */ 4898 chksum = 0; 4899 ip = (uint_t *)dkdevid; 4900 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4901 i++) { 4902 chksum ^= ip[i]; 4903 } 4904 4905 /* Compare the checksums */ 4906 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4907 status = EINVAL; 4908 goto error; 4909 } 4910 4911 /* Validate the device id */ 4912 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4913 status = EINVAL; 4914 goto error; 4915 } 4916 4917 /* 4918 * Store the device id in the driver soft state 4919 */ 4920 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4921 tmpid = kmem_alloc(sz, KM_SLEEP); 4922 4923 mutex_enter(SD_MUTEX(un)); 4924 4925 un->un_devid = tmpid; 4926 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4927 4928 kmem_free(dkdevid, buffer_size); 4929 4930 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4931 4932 return (status); 4933 error: 4934 mutex_enter(SD_MUTEX(un)); 4935 kmem_free(dkdevid, buffer_size); 4936 return (status); 4937 } 4938 4939 4940 /* 4941 * Function: sd_create_devid 4942 * 4943 * Description: This routine will fabricate the device id and write it 4944 * to the disk. 4945 * 4946 * Arguments: un - driver soft state (unit) structure 4947 * 4948 * Return Code: value of the fabricated device id 4949 * 4950 * Context: Kernel Thread 4951 */ 4952 4953 static ddi_devid_t 4954 sd_create_devid(struct sd_lun *un) 4955 { 4956 ASSERT(un != NULL); 4957 4958 /* Fabricate the devid */ 4959 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4960 == DDI_FAILURE) { 4961 return (NULL); 4962 } 4963 4964 /* Write the devid to disk */ 4965 if (sd_write_deviceid(un) != 0) { 4966 ddi_devid_free(un->un_devid); 4967 un->un_devid = NULL; 4968 } 4969 4970 return (un->un_devid); 4971 } 4972 4973 4974 /* 4975 * Function: sd_write_deviceid 4976 * 4977 * Description: This routine will write the device id to the disk 4978 * reserved sector. 4979 * 4980 * Arguments: un - driver soft state (unit) structure 4981 * 4982 * Return Code: EINVAL 4983 * value returned by sd_send_scsi_cmd 4984 * 4985 * Context: Kernel Thread 4986 */ 4987 4988 static int 4989 sd_write_deviceid(struct sd_lun *un) 4990 { 4991 struct dk_devid *dkdevid; 4992 diskaddr_t blk; 4993 uint_t *ip, chksum; 4994 int status; 4995 int i; 4996 4997 ASSERT(mutex_owned(SD_MUTEX(un))); 4998 4999 mutex_exit(SD_MUTEX(un)); 5000 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5001 (void *)SD_PATH_DIRECT) != 0) { 5002 mutex_enter(SD_MUTEX(un)); 5003 return (-1); 5004 } 5005 5006 5007 /* Allocate the buffer */ 5008 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5009 5010 /* Fill in the revision */ 5011 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5012 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5013 5014 /* Copy in the device id */ 5015 mutex_enter(SD_MUTEX(un)); 5016 bcopy(un->un_devid, &dkdevid->dkd_devid, 5017 ddi_devid_sizeof(un->un_devid)); 5018 mutex_exit(SD_MUTEX(un)); 5019 5020 /* Calculate the checksum */ 5021 chksum = 0; 5022 ip = (uint_t *)dkdevid; 5023 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5024 i++) { 5025 chksum ^= ip[i]; 5026 } 5027 5028 /* Fill-in checksum */ 5029 DKD_FORMCHKSUM(chksum, dkdevid); 5030 5031 /* Write the reserved sector */ 5032 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5033 SD_PATH_DIRECT); 5034 5035 kmem_free(dkdevid, un->un_sys_blocksize); 5036 5037 mutex_enter(SD_MUTEX(un)); 5038 return (status); 5039 } 5040 5041 5042 /* 5043 * Function: sd_check_vpd_page_support 5044 * 5045 * Description: This routine sends an inquiry command with the EVPD bit set and 5046 * a page code of 0x00 to the device. It is used to determine which 5047 * vital product pages are availible to find the devid. We are 5048 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5049 * device does not support that command. 5050 * 5051 * Arguments: un - driver soft state (unit) structure 5052 * 5053 * Return Code: 0 - success 5054 * 1 - check condition 5055 * 5056 * Context: This routine can sleep. 5057 */ 5058 5059 static int 5060 sd_check_vpd_page_support(struct sd_lun *un) 5061 { 5062 uchar_t *page_list = NULL; 5063 uchar_t page_length = 0xff; /* Use max possible length */ 5064 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5065 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5066 int rval = 0; 5067 int counter; 5068 5069 ASSERT(un != NULL); 5070 ASSERT(mutex_owned(SD_MUTEX(un))); 5071 5072 mutex_exit(SD_MUTEX(un)); 5073 5074 /* 5075 * We'll set the page length to the maximum to save figuring it out 5076 * with an additional call. 5077 */ 5078 page_list = kmem_zalloc(page_length, KM_SLEEP); 5079 5080 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5081 page_code, NULL); 5082 5083 mutex_enter(SD_MUTEX(un)); 5084 5085 /* 5086 * Now we must validate that the device accepted the command, as some 5087 * drives do not support it. If the drive does support it, we will 5088 * return 0, and the supported pages will be in un_vpd_page_mask. If 5089 * not, we return -1. 5090 */ 5091 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5092 /* Loop to find one of the 2 pages we need */ 5093 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5094 5095 /* 5096 * Pages are returned in ascending order, and 0x83 is what we 5097 * are hoping for. 5098 */ 5099 while ((page_list[counter] <= 0x86) && 5100 (counter <= (page_list[VPD_PAGE_LENGTH] + 5101 VPD_HEAD_OFFSET))) { 5102 /* 5103 * Add 3 because page_list[3] is the number of 5104 * pages minus 3 5105 */ 5106 5107 switch (page_list[counter]) { 5108 case 0x00: 5109 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5110 break; 5111 case 0x80: 5112 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5113 break; 5114 case 0x81: 5115 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5116 break; 5117 case 0x82: 5118 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5119 break; 5120 case 0x83: 5121 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5122 break; 5123 case 0x86: 5124 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5125 break; 5126 } 5127 counter++; 5128 } 5129 5130 } else { 5131 rval = -1; 5132 5133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5134 "sd_check_vpd_page_support: This drive does not implement " 5135 "VPD pages.\n"); 5136 } 5137 5138 kmem_free(page_list, page_length); 5139 5140 return (rval); 5141 } 5142 5143 5144 /* 5145 * Function: sd_setup_pm 5146 * 5147 * Description: Initialize Power Management on the device 5148 * 5149 * Context: Kernel Thread 5150 */ 5151 5152 static void 5153 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5154 { 5155 uint_t log_page_size; 5156 uchar_t *log_page_data; 5157 int rval; 5158 5159 /* 5160 * Since we are called from attach, holding a mutex for 5161 * un is unnecessary. Because some of the routines called 5162 * from here require SD_MUTEX to not be held, assert this 5163 * right up front. 5164 */ 5165 ASSERT(!mutex_owned(SD_MUTEX(un))); 5166 /* 5167 * Since the sd device does not have the 'reg' property, 5168 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5169 * The following code is to tell cpr that this device 5170 * DOES need to be suspended and resumed. 5171 */ 5172 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5173 "pm-hardware-state", "needs-suspend-resume"); 5174 5175 /* 5176 * This complies with the new power management framework 5177 * for certain desktop machines. Create the pm_components 5178 * property as a string array property. 5179 */ 5180 if (un->un_f_pm_supported) { 5181 /* 5182 * not all devices have a motor, try it first. 5183 * some devices may return ILLEGAL REQUEST, some 5184 * will hang 5185 * The following START_STOP_UNIT is used to check if target 5186 * device has a motor. 5187 */ 5188 un->un_f_start_stop_supported = TRUE; 5189 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5190 SD_PATH_DIRECT) != 0) { 5191 un->un_f_start_stop_supported = FALSE; 5192 } 5193 5194 /* 5195 * create pm properties anyways otherwise the parent can't 5196 * go to sleep 5197 */ 5198 (void) sd_create_pm_components(devi, un); 5199 un->un_f_pm_is_enabled = TRUE; 5200 return; 5201 } 5202 5203 if (!un->un_f_log_sense_supported) { 5204 un->un_power_level = SD_SPINDLE_ON; 5205 un->un_f_pm_is_enabled = FALSE; 5206 return; 5207 } 5208 5209 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5210 5211 #ifdef SDDEBUG 5212 if (sd_force_pm_supported) { 5213 /* Force a successful result */ 5214 rval = 1; 5215 } 5216 #endif 5217 5218 /* 5219 * If the start-stop cycle counter log page is not supported 5220 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5221 * then we should not create the pm_components property. 5222 */ 5223 if (rval == -1) { 5224 /* 5225 * Error. 5226 * Reading log sense failed, most likely this is 5227 * an older drive that does not support log sense. 5228 * If this fails auto-pm is not supported. 5229 */ 5230 un->un_power_level = SD_SPINDLE_ON; 5231 un->un_f_pm_is_enabled = FALSE; 5232 5233 } else if (rval == 0) { 5234 /* 5235 * Page not found. 5236 * The start stop cycle counter is implemented as page 5237 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5238 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5239 */ 5240 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5241 /* 5242 * Page found, use this one. 5243 */ 5244 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5245 un->un_f_pm_is_enabled = TRUE; 5246 } else { 5247 /* 5248 * Error or page not found. 5249 * auto-pm is not supported for this device. 5250 */ 5251 un->un_power_level = SD_SPINDLE_ON; 5252 un->un_f_pm_is_enabled = FALSE; 5253 } 5254 } else { 5255 /* 5256 * Page found, use it. 5257 */ 5258 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5259 un->un_f_pm_is_enabled = TRUE; 5260 } 5261 5262 5263 if (un->un_f_pm_is_enabled == TRUE) { 5264 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5265 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5266 5267 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5268 log_page_size, un->un_start_stop_cycle_page, 5269 0x01, 0, SD_PATH_DIRECT); 5270 #ifdef SDDEBUG 5271 if (sd_force_pm_supported) { 5272 /* Force a successful result */ 5273 rval = 0; 5274 } 5275 #endif 5276 5277 /* 5278 * If the Log sense for Page( Start/stop cycle counter page) 5279 * succeeds, then power managment is supported and we can 5280 * enable auto-pm. 5281 */ 5282 if (rval == 0) { 5283 (void) sd_create_pm_components(devi, un); 5284 } else { 5285 un->un_power_level = SD_SPINDLE_ON; 5286 un->un_f_pm_is_enabled = FALSE; 5287 } 5288 5289 kmem_free(log_page_data, log_page_size); 5290 } 5291 } 5292 5293 5294 /* 5295 * Function: sd_create_pm_components 5296 * 5297 * Description: Initialize PM property. 5298 * 5299 * Context: Kernel thread context 5300 */ 5301 5302 static void 5303 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5304 { 5305 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5306 5307 ASSERT(!mutex_owned(SD_MUTEX(un))); 5308 5309 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5310 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5311 /* 5312 * When components are initially created they are idle, 5313 * power up any non-removables. 5314 * Note: the return value of pm_raise_power can't be used 5315 * for determining if PM should be enabled for this device. 5316 * Even if you check the return values and remove this 5317 * property created above, the PM framework will not honor the 5318 * change after the first call to pm_raise_power. Hence, 5319 * removal of that property does not help if pm_raise_power 5320 * fails. In the case of removable media, the start/stop 5321 * will fail if the media is not present. 5322 */ 5323 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5324 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5325 mutex_enter(SD_MUTEX(un)); 5326 un->un_power_level = SD_SPINDLE_ON; 5327 mutex_enter(&un->un_pm_mutex); 5328 /* Set to on and not busy. */ 5329 un->un_pm_count = 0; 5330 } else { 5331 mutex_enter(SD_MUTEX(un)); 5332 un->un_power_level = SD_SPINDLE_OFF; 5333 mutex_enter(&un->un_pm_mutex); 5334 /* Set to off. */ 5335 un->un_pm_count = -1; 5336 } 5337 mutex_exit(&un->un_pm_mutex); 5338 mutex_exit(SD_MUTEX(un)); 5339 } else { 5340 un->un_power_level = SD_SPINDLE_ON; 5341 un->un_f_pm_is_enabled = FALSE; 5342 } 5343 } 5344 5345 5346 /* 5347 * Function: sd_ddi_suspend 5348 * 5349 * Description: Performs system power-down operations. This includes 5350 * setting the drive state to indicate its suspended so 5351 * that no new commands will be accepted. Also, wait for 5352 * all commands that are in transport or queued to a timer 5353 * for retry to complete. All timeout threads are cancelled. 5354 * 5355 * Return Code: DDI_FAILURE or DDI_SUCCESS 5356 * 5357 * Context: Kernel thread context 5358 */ 5359 5360 static int 5361 sd_ddi_suspend(dev_info_t *devi) 5362 { 5363 struct sd_lun *un; 5364 clock_t wait_cmds_complete; 5365 5366 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5367 if (un == NULL) { 5368 return (DDI_FAILURE); 5369 } 5370 5371 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5372 5373 mutex_enter(SD_MUTEX(un)); 5374 5375 /* Return success if the device is already suspended. */ 5376 if (un->un_state == SD_STATE_SUSPENDED) { 5377 mutex_exit(SD_MUTEX(un)); 5378 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5379 "device already suspended, exiting\n"); 5380 return (DDI_SUCCESS); 5381 } 5382 5383 /* Return failure if the device is being used by HA */ 5384 if (un->un_resvd_status & 5385 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5386 mutex_exit(SD_MUTEX(un)); 5387 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5388 "device in use by HA, exiting\n"); 5389 return (DDI_FAILURE); 5390 } 5391 5392 /* 5393 * Return failure if the device is in a resource wait 5394 * or power changing state. 5395 */ 5396 if ((un->un_state == SD_STATE_RWAIT) || 5397 (un->un_state == SD_STATE_PM_CHANGING)) { 5398 mutex_exit(SD_MUTEX(un)); 5399 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5400 "device in resource wait state, exiting\n"); 5401 return (DDI_FAILURE); 5402 } 5403 5404 5405 un->un_save_state = un->un_last_state; 5406 New_state(un, SD_STATE_SUSPENDED); 5407 5408 /* 5409 * Wait for all commands that are in transport or queued to a timer 5410 * for retry to complete. 5411 * 5412 * While waiting, no new commands will be accepted or sent because of 5413 * the new state we set above. 5414 * 5415 * Wait till current operation has completed. If we are in the resource 5416 * wait state (with an intr outstanding) then we need to wait till the 5417 * intr completes and starts the next cmd. We want to wait for 5418 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5419 */ 5420 wait_cmds_complete = ddi_get_lbolt() + 5421 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5422 5423 while (un->un_ncmds_in_transport != 0) { 5424 /* 5425 * Fail if commands do not finish in the specified time. 5426 */ 5427 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5428 wait_cmds_complete) == -1) { 5429 /* 5430 * Undo the state changes made above. Everything 5431 * must go back to it's original value. 5432 */ 5433 Restore_state(un); 5434 un->un_last_state = un->un_save_state; 5435 /* Wake up any threads that might be waiting. */ 5436 cv_broadcast(&un->un_suspend_cv); 5437 mutex_exit(SD_MUTEX(un)); 5438 SD_ERROR(SD_LOG_IO_PM, un, 5439 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5440 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5441 return (DDI_FAILURE); 5442 } 5443 } 5444 5445 /* 5446 * Cancel SCSI watch thread and timeouts, if any are active 5447 */ 5448 5449 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5450 opaque_t temp_token = un->un_swr_token; 5451 mutex_exit(SD_MUTEX(un)); 5452 scsi_watch_suspend(temp_token); 5453 mutex_enter(SD_MUTEX(un)); 5454 } 5455 5456 if (un->un_reset_throttle_timeid != NULL) { 5457 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5458 un->un_reset_throttle_timeid = NULL; 5459 mutex_exit(SD_MUTEX(un)); 5460 (void) untimeout(temp_id); 5461 mutex_enter(SD_MUTEX(un)); 5462 } 5463 5464 if (un->un_dcvb_timeid != NULL) { 5465 timeout_id_t temp_id = un->un_dcvb_timeid; 5466 un->un_dcvb_timeid = NULL; 5467 mutex_exit(SD_MUTEX(un)); 5468 (void) untimeout(temp_id); 5469 mutex_enter(SD_MUTEX(un)); 5470 } 5471 5472 mutex_enter(&un->un_pm_mutex); 5473 if (un->un_pm_timeid != NULL) { 5474 timeout_id_t temp_id = un->un_pm_timeid; 5475 un->un_pm_timeid = NULL; 5476 mutex_exit(&un->un_pm_mutex); 5477 mutex_exit(SD_MUTEX(un)); 5478 (void) untimeout(temp_id); 5479 mutex_enter(SD_MUTEX(un)); 5480 } else { 5481 mutex_exit(&un->un_pm_mutex); 5482 } 5483 5484 if (un->un_retry_timeid != NULL) { 5485 timeout_id_t temp_id = un->un_retry_timeid; 5486 un->un_retry_timeid = NULL; 5487 mutex_exit(SD_MUTEX(un)); 5488 (void) untimeout(temp_id); 5489 mutex_enter(SD_MUTEX(un)); 5490 5491 if (un->un_retry_bp != NULL) { 5492 un->un_retry_bp->av_forw = un->un_waitq_headp; 5493 un->un_waitq_headp = un->un_retry_bp; 5494 if (un->un_waitq_tailp == NULL) { 5495 un->un_waitq_tailp = un->un_retry_bp; 5496 } 5497 un->un_retry_bp = NULL; 5498 un->un_retry_statp = NULL; 5499 } 5500 } 5501 5502 if (un->un_direct_priority_timeid != NULL) { 5503 timeout_id_t temp_id = un->un_direct_priority_timeid; 5504 un->un_direct_priority_timeid = NULL; 5505 mutex_exit(SD_MUTEX(un)); 5506 (void) untimeout(temp_id); 5507 mutex_enter(SD_MUTEX(un)); 5508 } 5509 5510 if (un->un_f_is_fibre == TRUE) { 5511 /* 5512 * Remove callbacks for insert and remove events 5513 */ 5514 if (un->un_insert_event != NULL) { 5515 mutex_exit(SD_MUTEX(un)); 5516 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5517 mutex_enter(SD_MUTEX(un)); 5518 un->un_insert_event = NULL; 5519 } 5520 5521 if (un->un_remove_event != NULL) { 5522 mutex_exit(SD_MUTEX(un)); 5523 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5524 mutex_enter(SD_MUTEX(un)); 5525 un->un_remove_event = NULL; 5526 } 5527 } 5528 5529 mutex_exit(SD_MUTEX(un)); 5530 5531 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5532 5533 return (DDI_SUCCESS); 5534 } 5535 5536 5537 /* 5538 * Function: sd_ddi_pm_suspend 5539 * 5540 * Description: Set the drive state to low power. 5541 * Someone else is required to actually change the drive 5542 * power level. 5543 * 5544 * Arguments: un - driver soft state (unit) structure 5545 * 5546 * Return Code: DDI_FAILURE or DDI_SUCCESS 5547 * 5548 * Context: Kernel thread context 5549 */ 5550 5551 static int 5552 sd_ddi_pm_suspend(struct sd_lun *un) 5553 { 5554 ASSERT(un != NULL); 5555 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5556 5557 ASSERT(!mutex_owned(SD_MUTEX(un))); 5558 mutex_enter(SD_MUTEX(un)); 5559 5560 /* 5561 * Exit if power management is not enabled for this device, or if 5562 * the device is being used by HA. 5563 */ 5564 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5565 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5566 mutex_exit(SD_MUTEX(un)); 5567 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5568 return (DDI_SUCCESS); 5569 } 5570 5571 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5572 un->un_ncmds_in_driver); 5573 5574 /* 5575 * See if the device is not busy, ie.: 5576 * - we have no commands in the driver for this device 5577 * - not waiting for resources 5578 */ 5579 if ((un->un_ncmds_in_driver == 0) && 5580 (un->un_state != SD_STATE_RWAIT)) { 5581 /* 5582 * The device is not busy, so it is OK to go to low power state. 5583 * Indicate low power, but rely on someone else to actually 5584 * change it. 5585 */ 5586 mutex_enter(&un->un_pm_mutex); 5587 un->un_pm_count = -1; 5588 mutex_exit(&un->un_pm_mutex); 5589 un->un_power_level = SD_SPINDLE_OFF; 5590 } 5591 5592 mutex_exit(SD_MUTEX(un)); 5593 5594 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5595 5596 return (DDI_SUCCESS); 5597 } 5598 5599 5600 /* 5601 * Function: sd_ddi_resume 5602 * 5603 * Description: Performs system power-up operations.. 5604 * 5605 * Return Code: DDI_SUCCESS 5606 * DDI_FAILURE 5607 * 5608 * Context: Kernel thread context 5609 */ 5610 5611 static int 5612 sd_ddi_resume(dev_info_t *devi) 5613 { 5614 struct sd_lun *un; 5615 5616 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5617 if (un == NULL) { 5618 return (DDI_FAILURE); 5619 } 5620 5621 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5622 5623 mutex_enter(SD_MUTEX(un)); 5624 Restore_state(un); 5625 5626 /* 5627 * Restore the state which was saved to give the 5628 * the right state in un_last_state 5629 */ 5630 un->un_last_state = un->un_save_state; 5631 /* 5632 * Note: throttle comes back at full. 5633 * Also note: this MUST be done before calling pm_raise_power 5634 * otherwise the system can get hung in biowait. The scenario where 5635 * this'll happen is under cpr suspend. Writing of the system 5636 * state goes through sddump, which writes 0 to un_throttle. If 5637 * writing the system state then fails, example if the partition is 5638 * too small, then cpr attempts a resume. If throttle isn't restored 5639 * from the saved value until after calling pm_raise_power then 5640 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5641 * in biowait. 5642 */ 5643 un->un_throttle = un->un_saved_throttle; 5644 5645 /* 5646 * The chance of failure is very rare as the only command done in power 5647 * entry point is START command when you transition from 0->1 or 5648 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5649 * which suspend was done. Ignore the return value as the resume should 5650 * not be failed. In the case of removable media the media need not be 5651 * inserted and hence there is a chance that raise power will fail with 5652 * media not present. 5653 */ 5654 if (un->un_f_attach_spinup) { 5655 mutex_exit(SD_MUTEX(un)); 5656 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5657 mutex_enter(SD_MUTEX(un)); 5658 } 5659 5660 /* 5661 * Don't broadcast to the suspend cv and therefore possibly 5662 * start I/O until after power has been restored. 5663 */ 5664 cv_broadcast(&un->un_suspend_cv); 5665 cv_broadcast(&un->un_state_cv); 5666 5667 /* restart thread */ 5668 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5669 scsi_watch_resume(un->un_swr_token); 5670 } 5671 5672 #if (defined(__fibre)) 5673 if (un->un_f_is_fibre == TRUE) { 5674 /* 5675 * Add callbacks for insert and remove events 5676 */ 5677 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5678 sd_init_event_callbacks(un); 5679 } 5680 } 5681 #endif 5682 5683 /* 5684 * Transport any pending commands to the target. 5685 * 5686 * If this is a low-activity device commands in queue will have to wait 5687 * until new commands come in, which may take awhile. Also, we 5688 * specifically don't check un_ncmds_in_transport because we know that 5689 * there really are no commands in progress after the unit was 5690 * suspended and we could have reached the throttle level, been 5691 * suspended, and have no new commands coming in for awhile. Highly 5692 * unlikely, but so is the low-activity disk scenario. 5693 */ 5694 ddi_xbuf_dispatch(un->un_xbuf_attr); 5695 5696 sd_start_cmds(un, NULL); 5697 mutex_exit(SD_MUTEX(un)); 5698 5699 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5700 5701 return (DDI_SUCCESS); 5702 } 5703 5704 5705 /* 5706 * Function: sd_ddi_pm_resume 5707 * 5708 * Description: Set the drive state to powered on. 5709 * Someone else is required to actually change the drive 5710 * power level. 5711 * 5712 * Arguments: un - driver soft state (unit) structure 5713 * 5714 * Return Code: DDI_SUCCESS 5715 * 5716 * Context: Kernel thread context 5717 */ 5718 5719 static int 5720 sd_ddi_pm_resume(struct sd_lun *un) 5721 { 5722 ASSERT(un != NULL); 5723 5724 ASSERT(!mutex_owned(SD_MUTEX(un))); 5725 mutex_enter(SD_MUTEX(un)); 5726 un->un_power_level = SD_SPINDLE_ON; 5727 5728 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5729 mutex_enter(&un->un_pm_mutex); 5730 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5731 un->un_pm_count++; 5732 ASSERT(un->un_pm_count == 0); 5733 /* 5734 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5735 * un_suspend_cv is for a system resume, not a power management 5736 * device resume. (4297749) 5737 * cv_broadcast(&un->un_suspend_cv); 5738 */ 5739 } 5740 mutex_exit(&un->un_pm_mutex); 5741 mutex_exit(SD_MUTEX(un)); 5742 5743 return (DDI_SUCCESS); 5744 } 5745 5746 5747 /* 5748 * Function: sd_pm_idletimeout_handler 5749 * 5750 * Description: A timer routine that's active only while a device is busy. 5751 * The purpose is to extend slightly the pm framework's busy 5752 * view of the device to prevent busy/idle thrashing for 5753 * back-to-back commands. Do this by comparing the current time 5754 * to the time at which the last command completed and when the 5755 * difference is greater than sd_pm_idletime, call 5756 * pm_idle_component. In addition to indicating idle to the pm 5757 * framework, update the chain type to again use the internal pm 5758 * layers of the driver. 5759 * 5760 * Arguments: arg - driver soft state (unit) structure 5761 * 5762 * Context: Executes in a timeout(9F) thread context 5763 */ 5764 5765 static void 5766 sd_pm_idletimeout_handler(void *arg) 5767 { 5768 struct sd_lun *un = arg; 5769 5770 time_t now; 5771 5772 mutex_enter(&sd_detach_mutex); 5773 if (un->un_detach_count != 0) { 5774 /* Abort if the instance is detaching */ 5775 mutex_exit(&sd_detach_mutex); 5776 return; 5777 } 5778 mutex_exit(&sd_detach_mutex); 5779 5780 now = ddi_get_time(); 5781 /* 5782 * Grab both mutexes, in the proper order, since we're accessing 5783 * both PM and softstate variables. 5784 */ 5785 mutex_enter(SD_MUTEX(un)); 5786 mutex_enter(&un->un_pm_mutex); 5787 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5788 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5789 /* 5790 * Update the chain types. 5791 * This takes affect on the next new command received. 5792 */ 5793 if (un->un_f_non_devbsize_supported) { 5794 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5795 } else { 5796 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5797 } 5798 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5799 5800 SD_TRACE(SD_LOG_IO_PM, un, 5801 "sd_pm_idletimeout_handler: idling device\n"); 5802 (void) pm_idle_component(SD_DEVINFO(un), 0); 5803 un->un_pm_idle_timeid = NULL; 5804 } else { 5805 un->un_pm_idle_timeid = 5806 timeout(sd_pm_idletimeout_handler, un, 5807 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5808 } 5809 mutex_exit(&un->un_pm_mutex); 5810 mutex_exit(SD_MUTEX(un)); 5811 } 5812 5813 5814 /* 5815 * Function: sd_pm_timeout_handler 5816 * 5817 * Description: Callback to tell framework we are idle. 5818 * 5819 * Context: timeout(9f) thread context. 5820 */ 5821 5822 static void 5823 sd_pm_timeout_handler(void *arg) 5824 { 5825 struct sd_lun *un = arg; 5826 5827 (void) pm_idle_component(SD_DEVINFO(un), 0); 5828 mutex_enter(&un->un_pm_mutex); 5829 un->un_pm_timeid = NULL; 5830 mutex_exit(&un->un_pm_mutex); 5831 } 5832 5833 5834 /* 5835 * Function: sdpower 5836 * 5837 * Description: PM entry point. 5838 * 5839 * Return Code: DDI_SUCCESS 5840 * DDI_FAILURE 5841 * 5842 * Context: Kernel thread context 5843 */ 5844 5845 static int 5846 sdpower(dev_info_t *devi, int component, int level) 5847 { 5848 struct sd_lun *un; 5849 int instance; 5850 int rval = DDI_SUCCESS; 5851 uint_t i, log_page_size, maxcycles, ncycles; 5852 uchar_t *log_page_data; 5853 int log_sense_page; 5854 int medium_present; 5855 time_t intvlp; 5856 dev_t dev; 5857 struct pm_trans_data sd_pm_tran_data; 5858 uchar_t save_state; 5859 int sval; 5860 uchar_t state_before_pm; 5861 int got_semaphore_here; 5862 5863 instance = ddi_get_instance(devi); 5864 5865 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5866 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5867 component != 0) { 5868 return (DDI_FAILURE); 5869 } 5870 5871 dev = sd_make_device(SD_DEVINFO(un)); 5872 5873 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5874 5875 /* 5876 * Must synchronize power down with close. 5877 * Attempt to decrement/acquire the open/close semaphore, 5878 * but do NOT wait on it. If it's not greater than zero, 5879 * ie. it can't be decremented without waiting, then 5880 * someone else, either open or close, already has it 5881 * and the try returns 0. Use that knowledge here to determine 5882 * if it's OK to change the device power level. 5883 * Also, only increment it on exit if it was decremented, ie. gotten, 5884 * here. 5885 */ 5886 got_semaphore_here = sema_tryp(&un->un_semoclose); 5887 5888 mutex_enter(SD_MUTEX(un)); 5889 5890 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5891 un->un_ncmds_in_driver); 5892 5893 /* 5894 * If un_ncmds_in_driver is non-zero it indicates commands are 5895 * already being processed in the driver, or if the semaphore was 5896 * not gotten here it indicates an open or close is being processed. 5897 * At the same time somebody is requesting to go low power which 5898 * can't happen, therefore we need to return failure. 5899 */ 5900 if ((level == SD_SPINDLE_OFF) && 5901 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5902 mutex_exit(SD_MUTEX(un)); 5903 5904 if (got_semaphore_here != 0) { 5905 sema_v(&un->un_semoclose); 5906 } 5907 SD_TRACE(SD_LOG_IO_PM, un, 5908 "sdpower: exit, device has queued cmds.\n"); 5909 return (DDI_FAILURE); 5910 } 5911 5912 /* 5913 * if it is OFFLINE that means the disk is completely dead 5914 * in our case we have to put the disk in on or off by sending commands 5915 * Of course that will fail anyway so return back here. 5916 * 5917 * Power changes to a device that's OFFLINE or SUSPENDED 5918 * are not allowed. 5919 */ 5920 if ((un->un_state == SD_STATE_OFFLINE) || 5921 (un->un_state == SD_STATE_SUSPENDED)) { 5922 mutex_exit(SD_MUTEX(un)); 5923 5924 if (got_semaphore_here != 0) { 5925 sema_v(&un->un_semoclose); 5926 } 5927 SD_TRACE(SD_LOG_IO_PM, un, 5928 "sdpower: exit, device is off-line.\n"); 5929 return (DDI_FAILURE); 5930 } 5931 5932 /* 5933 * Change the device's state to indicate it's power level 5934 * is being changed. Do this to prevent a power off in the 5935 * middle of commands, which is especially bad on devices 5936 * that are really powered off instead of just spun down. 5937 */ 5938 state_before_pm = un->un_state; 5939 un->un_state = SD_STATE_PM_CHANGING; 5940 5941 mutex_exit(SD_MUTEX(un)); 5942 5943 /* 5944 * If "pm-capable" property is set to TRUE by HBA drivers, 5945 * bypass the following checking, otherwise, check the log 5946 * sense information for this device 5947 */ 5948 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5949 /* 5950 * Get the log sense information to understand whether the 5951 * the powercycle counts have gone beyond the threshhold. 5952 */ 5953 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5954 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5955 5956 mutex_enter(SD_MUTEX(un)); 5957 log_sense_page = un->un_start_stop_cycle_page; 5958 mutex_exit(SD_MUTEX(un)); 5959 5960 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5961 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5962 #ifdef SDDEBUG 5963 if (sd_force_pm_supported) { 5964 /* Force a successful result */ 5965 rval = 0; 5966 } 5967 #endif 5968 if (rval != 0) { 5969 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5970 "Log Sense Failed\n"); 5971 kmem_free(log_page_data, log_page_size); 5972 /* Cannot support power management on those drives */ 5973 5974 if (got_semaphore_here != 0) { 5975 sema_v(&un->un_semoclose); 5976 } 5977 /* 5978 * On exit put the state back to it's original value 5979 * and broadcast to anyone waiting for the power 5980 * change completion. 5981 */ 5982 mutex_enter(SD_MUTEX(un)); 5983 un->un_state = state_before_pm; 5984 cv_broadcast(&un->un_suspend_cv); 5985 mutex_exit(SD_MUTEX(un)); 5986 SD_TRACE(SD_LOG_IO_PM, un, 5987 "sdpower: exit, Log Sense Failed.\n"); 5988 return (DDI_FAILURE); 5989 } 5990 5991 /* 5992 * From the page data - Convert the essential information to 5993 * pm_trans_data 5994 */ 5995 maxcycles = 5996 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5997 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5998 5999 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6000 6001 ncycles = 6002 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6003 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6004 6005 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6006 6007 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6008 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6009 log_page_data[8+i]; 6010 } 6011 6012 kmem_free(log_page_data, log_page_size); 6013 6014 /* 6015 * Call pm_trans_check routine to get the Ok from 6016 * the global policy 6017 */ 6018 6019 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6020 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6021 6022 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6023 #ifdef SDDEBUG 6024 if (sd_force_pm_supported) { 6025 /* Force a successful result */ 6026 rval = 1; 6027 } 6028 #endif 6029 switch (rval) { 6030 case 0: 6031 /* 6032 * Not Ok to Power cycle or error in parameters passed 6033 * Would have given the advised time to consider power 6034 * cycle. Based on the new intvlp parameter we are 6035 * supposed to pretend we are busy so that pm framework 6036 * will never call our power entry point. Because of 6037 * that install a timeout handler and wait for the 6038 * recommended time to elapse so that power management 6039 * can be effective again. 6040 * 6041 * To effect this behavior, call pm_busy_component to 6042 * indicate to the framework this device is busy. 6043 * By not adjusting un_pm_count the rest of PM in 6044 * the driver will function normally, and independant 6045 * of this but because the framework is told the device 6046 * is busy it won't attempt powering down until it gets 6047 * a matching idle. The timeout handler sends this. 6048 * Note: sd_pm_entry can't be called here to do this 6049 * because sdpower may have been called as a result 6050 * of a call to pm_raise_power from within sd_pm_entry. 6051 * 6052 * If a timeout handler is already active then 6053 * don't install another. 6054 */ 6055 mutex_enter(&un->un_pm_mutex); 6056 if (un->un_pm_timeid == NULL) { 6057 un->un_pm_timeid = 6058 timeout(sd_pm_timeout_handler, 6059 un, intvlp * drv_usectohz(1000000)); 6060 mutex_exit(&un->un_pm_mutex); 6061 (void) pm_busy_component(SD_DEVINFO(un), 0); 6062 } else { 6063 mutex_exit(&un->un_pm_mutex); 6064 } 6065 if (got_semaphore_here != 0) { 6066 sema_v(&un->un_semoclose); 6067 } 6068 /* 6069 * On exit put the state back to it's original value 6070 * and broadcast to anyone waiting for the power 6071 * change completion. 6072 */ 6073 mutex_enter(SD_MUTEX(un)); 6074 un->un_state = state_before_pm; 6075 cv_broadcast(&un->un_suspend_cv); 6076 mutex_exit(SD_MUTEX(un)); 6077 6078 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6079 "trans check Failed, not ok to power cycle.\n"); 6080 return (DDI_FAILURE); 6081 6082 case -1: 6083 if (got_semaphore_here != 0) { 6084 sema_v(&un->un_semoclose); 6085 } 6086 /* 6087 * On exit put the state back to it's original value 6088 * and broadcast to anyone waiting for the power 6089 * change completion. 6090 */ 6091 mutex_enter(SD_MUTEX(un)); 6092 un->un_state = state_before_pm; 6093 cv_broadcast(&un->un_suspend_cv); 6094 mutex_exit(SD_MUTEX(un)); 6095 SD_TRACE(SD_LOG_IO_PM, un, 6096 "sdpower: exit, trans check command Failed.\n"); 6097 return (DDI_FAILURE); 6098 } 6099 } 6100 6101 if (level == SD_SPINDLE_OFF) { 6102 /* 6103 * Save the last state... if the STOP FAILS we need it 6104 * for restoring 6105 */ 6106 mutex_enter(SD_MUTEX(un)); 6107 save_state = un->un_last_state; 6108 /* 6109 * There must not be any cmds. getting processed 6110 * in the driver when we get here. Power to the 6111 * device is potentially going off. 6112 */ 6113 ASSERT(un->un_ncmds_in_driver == 0); 6114 mutex_exit(SD_MUTEX(un)); 6115 6116 /* 6117 * For now suspend the device completely before spindle is 6118 * turned off 6119 */ 6120 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6121 if (got_semaphore_here != 0) { 6122 sema_v(&un->un_semoclose); 6123 } 6124 /* 6125 * On exit put the state back to it's original value 6126 * and broadcast to anyone waiting for the power 6127 * change completion. 6128 */ 6129 mutex_enter(SD_MUTEX(un)); 6130 un->un_state = state_before_pm; 6131 cv_broadcast(&un->un_suspend_cv); 6132 mutex_exit(SD_MUTEX(un)); 6133 SD_TRACE(SD_LOG_IO_PM, un, 6134 "sdpower: exit, PM suspend Failed.\n"); 6135 return (DDI_FAILURE); 6136 } 6137 } 6138 6139 /* 6140 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6141 * close, or strategy. Dump no long uses this routine, it uses it's 6142 * own code so it can be done in polled mode. 6143 */ 6144 6145 medium_present = TRUE; 6146 6147 /* 6148 * When powering up, issue a TUR in case the device is at unit 6149 * attention. Don't do retries. Bypass the PM layer, otherwise 6150 * a deadlock on un_pm_busy_cv will occur. 6151 */ 6152 if (level == SD_SPINDLE_ON) { 6153 (void) sd_send_scsi_TEST_UNIT_READY(un, 6154 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6155 } 6156 6157 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6158 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6159 6160 sval = sd_send_scsi_START_STOP_UNIT(un, 6161 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6162 SD_PATH_DIRECT); 6163 /* Command failed, check for media present. */ 6164 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6165 medium_present = FALSE; 6166 } 6167 6168 /* 6169 * The conditions of interest here are: 6170 * if a spindle off with media present fails, 6171 * then restore the state and return an error. 6172 * else if a spindle on fails, 6173 * then return an error (there's no state to restore). 6174 * In all other cases we setup for the new state 6175 * and return success. 6176 */ 6177 switch (level) { 6178 case SD_SPINDLE_OFF: 6179 if ((medium_present == TRUE) && (sval != 0)) { 6180 /* The stop command from above failed */ 6181 rval = DDI_FAILURE; 6182 /* 6183 * The stop command failed, and we have media 6184 * present. Put the level back by calling the 6185 * sd_pm_resume() and set the state back to 6186 * it's previous value. 6187 */ 6188 (void) sd_ddi_pm_resume(un); 6189 mutex_enter(SD_MUTEX(un)); 6190 un->un_last_state = save_state; 6191 mutex_exit(SD_MUTEX(un)); 6192 break; 6193 } 6194 /* 6195 * The stop command from above succeeded. 6196 */ 6197 if (un->un_f_monitor_media_state) { 6198 /* 6199 * Terminate watch thread in case of removable media 6200 * devices going into low power state. This is as per 6201 * the requirements of pm framework, otherwise commands 6202 * will be generated for the device (through watch 6203 * thread), even when the device is in low power state. 6204 */ 6205 mutex_enter(SD_MUTEX(un)); 6206 un->un_f_watcht_stopped = FALSE; 6207 if (un->un_swr_token != NULL) { 6208 opaque_t temp_token = un->un_swr_token; 6209 un->un_f_watcht_stopped = TRUE; 6210 un->un_swr_token = NULL; 6211 mutex_exit(SD_MUTEX(un)); 6212 (void) scsi_watch_request_terminate(temp_token, 6213 SCSI_WATCH_TERMINATE_WAIT); 6214 } else { 6215 mutex_exit(SD_MUTEX(un)); 6216 } 6217 } 6218 break; 6219 6220 default: /* The level requested is spindle on... */ 6221 /* 6222 * Legacy behavior: return success on a failed spinup 6223 * if there is no media in the drive. 6224 * Do this by looking at medium_present here. 6225 */ 6226 if ((sval != 0) && medium_present) { 6227 /* The start command from above failed */ 6228 rval = DDI_FAILURE; 6229 break; 6230 } 6231 /* 6232 * The start command from above succeeded 6233 * Resume the devices now that we have 6234 * started the disks 6235 */ 6236 (void) sd_ddi_pm_resume(un); 6237 6238 /* 6239 * Resume the watch thread since it was suspended 6240 * when the device went into low power mode. 6241 */ 6242 if (un->un_f_monitor_media_state) { 6243 mutex_enter(SD_MUTEX(un)); 6244 if (un->un_f_watcht_stopped == TRUE) { 6245 opaque_t temp_token; 6246 6247 un->un_f_watcht_stopped = FALSE; 6248 mutex_exit(SD_MUTEX(un)); 6249 temp_token = scsi_watch_request_submit( 6250 SD_SCSI_DEVP(un), 6251 sd_check_media_time, 6252 SENSE_LENGTH, sd_media_watch_cb, 6253 (caddr_t)dev); 6254 mutex_enter(SD_MUTEX(un)); 6255 un->un_swr_token = temp_token; 6256 } 6257 mutex_exit(SD_MUTEX(un)); 6258 } 6259 } 6260 if (got_semaphore_here != 0) { 6261 sema_v(&un->un_semoclose); 6262 } 6263 /* 6264 * On exit put the state back to it's original value 6265 * and broadcast to anyone waiting for the power 6266 * change completion. 6267 */ 6268 mutex_enter(SD_MUTEX(un)); 6269 un->un_state = state_before_pm; 6270 cv_broadcast(&un->un_suspend_cv); 6271 mutex_exit(SD_MUTEX(un)); 6272 6273 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6274 6275 return (rval); 6276 } 6277 6278 6279 6280 /* 6281 * Function: sdattach 6282 * 6283 * Description: Driver's attach(9e) entry point function. 6284 * 6285 * Arguments: devi - opaque device info handle 6286 * cmd - attach type 6287 * 6288 * Return Code: DDI_SUCCESS 6289 * DDI_FAILURE 6290 * 6291 * Context: Kernel thread context 6292 */ 6293 6294 static int 6295 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6296 { 6297 switch (cmd) { 6298 case DDI_ATTACH: 6299 return (sd_unit_attach(devi)); 6300 case DDI_RESUME: 6301 return (sd_ddi_resume(devi)); 6302 default: 6303 break; 6304 } 6305 return (DDI_FAILURE); 6306 } 6307 6308 6309 /* 6310 * Function: sddetach 6311 * 6312 * Description: Driver's detach(9E) entry point function. 6313 * 6314 * Arguments: devi - opaque device info handle 6315 * cmd - detach type 6316 * 6317 * Return Code: DDI_SUCCESS 6318 * DDI_FAILURE 6319 * 6320 * Context: Kernel thread context 6321 */ 6322 6323 static int 6324 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6325 { 6326 switch (cmd) { 6327 case DDI_DETACH: 6328 return (sd_unit_detach(devi)); 6329 case DDI_SUSPEND: 6330 return (sd_ddi_suspend(devi)); 6331 default: 6332 break; 6333 } 6334 return (DDI_FAILURE); 6335 } 6336 6337 6338 /* 6339 * Function: sd_sync_with_callback 6340 * 6341 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6342 * state while the callback routine is active. 6343 * 6344 * Arguments: un: softstate structure for the instance 6345 * 6346 * Context: Kernel thread context 6347 */ 6348 6349 static void 6350 sd_sync_with_callback(struct sd_lun *un) 6351 { 6352 ASSERT(un != NULL); 6353 6354 mutex_enter(SD_MUTEX(un)); 6355 6356 ASSERT(un->un_in_callback >= 0); 6357 6358 while (un->un_in_callback > 0) { 6359 mutex_exit(SD_MUTEX(un)); 6360 delay(2); 6361 mutex_enter(SD_MUTEX(un)); 6362 } 6363 6364 mutex_exit(SD_MUTEX(un)); 6365 } 6366 6367 /* 6368 * Function: sd_unit_attach 6369 * 6370 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6371 * the soft state structure for the device and performs 6372 * all necessary structure and device initializations. 6373 * 6374 * Arguments: devi: the system's dev_info_t for the device. 6375 * 6376 * Return Code: DDI_SUCCESS if attach is successful. 6377 * DDI_FAILURE if any part of the attach fails. 6378 * 6379 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6380 * Kernel thread context only. Can sleep. 6381 */ 6382 6383 static int 6384 sd_unit_attach(dev_info_t *devi) 6385 { 6386 struct scsi_device *devp; 6387 struct sd_lun *un; 6388 char *variantp; 6389 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6390 int instance; 6391 int rval; 6392 int wc_enabled; 6393 int tgt; 6394 uint64_t capacity; 6395 uint_t lbasize = 0; 6396 dev_info_t *pdip = ddi_get_parent(devi); 6397 int offbyone = 0; 6398 int geom_label_valid = 0; 6399 #if defined(__sparc) 6400 int max_xfer_size; 6401 #endif 6402 6403 /* 6404 * Retrieve the target driver's private data area. This was set 6405 * up by the HBA. 6406 */ 6407 devp = ddi_get_driver_private(devi); 6408 6409 /* 6410 * Retrieve the target ID of the device. 6411 */ 6412 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6413 SCSI_ADDR_PROP_TARGET, -1); 6414 6415 /* 6416 * Since we have no idea what state things were left in by the last 6417 * user of the device, set up some 'default' settings, ie. turn 'em 6418 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6419 * Do this before the scsi_probe, which sends an inquiry. 6420 * This is a fix for bug (4430280). 6421 * Of special importance is wide-xfer. The drive could have been left 6422 * in wide transfer mode by the last driver to communicate with it, 6423 * this includes us. If that's the case, and if the following is not 6424 * setup properly or we don't re-negotiate with the drive prior to 6425 * transferring data to/from the drive, it causes bus parity errors, 6426 * data overruns, and unexpected interrupts. This first occurred when 6427 * the fix for bug (4378686) was made. 6428 */ 6429 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6430 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6431 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6432 6433 /* 6434 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6435 * on a target. Setting it per lun instance actually sets the 6436 * capability of this target, which affects those luns already 6437 * attached on the same target. So during attach, we can only disable 6438 * this capability only when no other lun has been attached on this 6439 * target. By doing this, we assume a target has the same tagged-qing 6440 * capability for every lun. The condition can be removed when HBA 6441 * is changed to support per lun based tagged-qing capability. 6442 */ 6443 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6444 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6445 } 6446 6447 /* 6448 * Use scsi_probe() to issue an INQUIRY command to the device. 6449 * This call will allocate and fill in the scsi_inquiry structure 6450 * and point the sd_inq member of the scsi_device structure to it. 6451 * If the attach succeeds, then this memory will not be de-allocated 6452 * (via scsi_unprobe()) until the instance is detached. 6453 */ 6454 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6455 goto probe_failed; 6456 } 6457 6458 /* 6459 * Check the device type as specified in the inquiry data and 6460 * claim it if it is of a type that we support. 6461 */ 6462 switch (devp->sd_inq->inq_dtype) { 6463 case DTYPE_DIRECT: 6464 break; 6465 case DTYPE_RODIRECT: 6466 break; 6467 case DTYPE_OPTICAL: 6468 break; 6469 case DTYPE_NOTPRESENT: 6470 default: 6471 /* Unsupported device type; fail the attach. */ 6472 goto probe_failed; 6473 } 6474 6475 /* 6476 * Allocate the soft state structure for this unit. 6477 * 6478 * We rely upon this memory being set to all zeroes by 6479 * ddi_soft_state_zalloc(). We assume that any member of the 6480 * soft state structure that is not explicitly initialized by 6481 * this routine will have a value of zero. 6482 */ 6483 instance = ddi_get_instance(devp->sd_dev); 6484 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6485 goto probe_failed; 6486 } 6487 6488 /* 6489 * Retrieve a pointer to the newly-allocated soft state. 6490 * 6491 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6492 * was successful, unless something has gone horribly wrong and the 6493 * ddi's soft state internals are corrupt (in which case it is 6494 * probably better to halt here than just fail the attach....) 6495 */ 6496 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6497 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6498 instance); 6499 /*NOTREACHED*/ 6500 } 6501 6502 /* 6503 * Link the back ptr of the driver soft state to the scsi_device 6504 * struct for this lun. 6505 * Save a pointer to the softstate in the driver-private area of 6506 * the scsi_device struct. 6507 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6508 * we first set un->un_sd below. 6509 */ 6510 un->un_sd = devp; 6511 devp->sd_private = (opaque_t)un; 6512 6513 /* 6514 * The following must be after devp is stored in the soft state struct. 6515 */ 6516 #ifdef SDDEBUG 6517 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6518 "%s_unit_attach: un:0x%p instance:%d\n", 6519 ddi_driver_name(devi), un, instance); 6520 #endif 6521 6522 /* 6523 * Set up the device type and node type (for the minor nodes). 6524 * By default we assume that the device can at least support the 6525 * Common Command Set. Call it a CD-ROM if it reports itself 6526 * as a RODIRECT device. 6527 */ 6528 switch (devp->sd_inq->inq_dtype) { 6529 case DTYPE_RODIRECT: 6530 un->un_node_type = DDI_NT_CD_CHAN; 6531 un->un_ctype = CTYPE_CDROM; 6532 break; 6533 case DTYPE_OPTICAL: 6534 un->un_node_type = DDI_NT_BLOCK_CHAN; 6535 un->un_ctype = CTYPE_ROD; 6536 break; 6537 default: 6538 un->un_node_type = DDI_NT_BLOCK_CHAN; 6539 un->un_ctype = CTYPE_CCS; 6540 break; 6541 } 6542 6543 /* 6544 * Try to read the interconnect type from the HBA. 6545 * 6546 * Note: This driver is currently compiled as two binaries, a parallel 6547 * scsi version (sd) and a fibre channel version (ssd). All functional 6548 * differences are determined at compile time. In the future a single 6549 * binary will be provided and the inteconnect type will be used to 6550 * differentiate between fibre and parallel scsi behaviors. At that time 6551 * it will be necessary for all fibre channel HBAs to support this 6552 * property. 6553 * 6554 * set un_f_is_fiber to TRUE ( default fiber ) 6555 */ 6556 un->un_f_is_fibre = TRUE; 6557 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6558 case INTERCONNECT_SSA: 6559 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6560 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6561 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6562 break; 6563 case INTERCONNECT_PARALLEL: 6564 un->un_f_is_fibre = FALSE; 6565 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6566 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6567 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6568 break; 6569 case INTERCONNECT_SATA: 6570 un->un_f_is_fibre = FALSE; 6571 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6572 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6573 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6574 break; 6575 case INTERCONNECT_FIBRE: 6576 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6577 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6578 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6579 break; 6580 case INTERCONNECT_FABRIC: 6581 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6582 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6583 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6584 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6585 break; 6586 default: 6587 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6588 /* 6589 * The HBA does not support the "interconnect-type" property 6590 * (or did not provide a recognized type). 6591 * 6592 * Note: This will be obsoleted when a single fibre channel 6593 * and parallel scsi driver is delivered. In the meantime the 6594 * interconnect type will be set to the platform default.If that 6595 * type is not parallel SCSI, it means that we should be 6596 * assuming "ssd" semantics. However, here this also means that 6597 * the FC HBA is not supporting the "interconnect-type" property 6598 * like we expect it to, so log this occurrence. 6599 */ 6600 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6601 if (!SD_IS_PARALLEL_SCSI(un)) { 6602 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6603 "sd_unit_attach: un:0x%p Assuming " 6604 "INTERCONNECT_FIBRE\n", un); 6605 } else { 6606 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6607 "sd_unit_attach: un:0x%p Assuming " 6608 "INTERCONNECT_PARALLEL\n", un); 6609 un->un_f_is_fibre = FALSE; 6610 } 6611 #else 6612 /* 6613 * Note: This source will be implemented when a single fibre 6614 * channel and parallel scsi driver is delivered. The default 6615 * will be to assume that if a device does not support the 6616 * "interconnect-type" property it is a parallel SCSI HBA and 6617 * we will set the interconnect type for parallel scsi. 6618 */ 6619 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6620 un->un_f_is_fibre = FALSE; 6621 #endif 6622 break; 6623 } 6624 6625 if (un->un_f_is_fibre == TRUE) { 6626 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6627 SCSI_VERSION_3) { 6628 switch (un->un_interconnect_type) { 6629 case SD_INTERCONNECT_FIBRE: 6630 case SD_INTERCONNECT_SSA: 6631 un->un_node_type = DDI_NT_BLOCK_WWN; 6632 break; 6633 default: 6634 break; 6635 } 6636 } 6637 } 6638 6639 /* 6640 * Initialize the Request Sense command for the target 6641 */ 6642 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6643 goto alloc_rqs_failed; 6644 } 6645 6646 /* 6647 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6648 * with separate binary for sd and ssd. 6649 * 6650 * x86 has 1 binary, un_retry_count is set base on connection type. 6651 * The hardcoded values will go away when Sparc uses 1 binary 6652 * for sd and ssd. This hardcoded values need to match 6653 * SD_RETRY_COUNT in sddef.h 6654 * The value used is base on interconnect type. 6655 * fibre = 3, parallel = 5 6656 */ 6657 #if defined(__i386) || defined(__amd64) 6658 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6659 #else 6660 un->un_retry_count = SD_RETRY_COUNT; 6661 #endif 6662 6663 /* 6664 * Set the per disk retry count to the default number of retries 6665 * for disks and CDROMs. This value can be overridden by the 6666 * disk property list or an entry in sd.conf. 6667 */ 6668 un->un_notready_retry_count = 6669 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6670 : DISK_NOT_READY_RETRY_COUNT(un); 6671 6672 /* 6673 * Set the busy retry count to the default value of un_retry_count. 6674 * This can be overridden by entries in sd.conf or the device 6675 * config table. 6676 */ 6677 un->un_busy_retry_count = un->un_retry_count; 6678 6679 /* 6680 * Init the reset threshold for retries. This number determines 6681 * how many retries must be performed before a reset can be issued 6682 * (for certain error conditions). This can be overridden by entries 6683 * in sd.conf or the device config table. 6684 */ 6685 un->un_reset_retry_count = (un->un_retry_count / 2); 6686 6687 /* 6688 * Set the victim_retry_count to the default un_retry_count 6689 */ 6690 un->un_victim_retry_count = (2 * un->un_retry_count); 6691 6692 /* 6693 * Set the reservation release timeout to the default value of 6694 * 5 seconds. This can be overridden by entries in ssd.conf or the 6695 * device config table. 6696 */ 6697 un->un_reserve_release_time = 5; 6698 6699 /* 6700 * Set up the default maximum transfer size. Note that this may 6701 * get updated later in the attach, when setting up default wide 6702 * operations for disks. 6703 */ 6704 #if defined(__i386) || defined(__amd64) 6705 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6706 un->un_partial_dma_supported = 1; 6707 #else 6708 un->un_max_xfer_size = (uint_t)maxphys; 6709 #endif 6710 6711 /* 6712 * Get "allow bus device reset" property (defaults to "enabled" if 6713 * the property was not defined). This is to disable bus resets for 6714 * certain kinds of error recovery. Note: In the future when a run-time 6715 * fibre check is available the soft state flag should default to 6716 * enabled. 6717 */ 6718 if (un->un_f_is_fibre == TRUE) { 6719 un->un_f_allow_bus_device_reset = TRUE; 6720 } else { 6721 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6722 "allow-bus-device-reset", 1) != 0) { 6723 un->un_f_allow_bus_device_reset = TRUE; 6724 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6725 "sd_unit_attach: un:0x%p Bus device reset " 6726 "enabled\n", un); 6727 } else { 6728 un->un_f_allow_bus_device_reset = FALSE; 6729 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6730 "sd_unit_attach: un:0x%p Bus device reset " 6731 "disabled\n", un); 6732 } 6733 } 6734 6735 /* 6736 * Check if this is an ATAPI device. ATAPI devices use Group 1 6737 * Read/Write commands and Group 2 Mode Sense/Select commands. 6738 * 6739 * Note: The "obsolete" way of doing this is to check for the "atapi" 6740 * property. The new "variant" property with a value of "atapi" has been 6741 * introduced so that future 'variants' of standard SCSI behavior (like 6742 * atapi) could be specified by the underlying HBA drivers by supplying 6743 * a new value for the "variant" property, instead of having to define a 6744 * new property. 6745 */ 6746 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6747 un->un_f_cfg_is_atapi = TRUE; 6748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6749 "sd_unit_attach: un:0x%p Atapi device\n", un); 6750 } 6751 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6752 &variantp) == DDI_PROP_SUCCESS) { 6753 if (strcmp(variantp, "atapi") == 0) { 6754 un->un_f_cfg_is_atapi = TRUE; 6755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6756 "sd_unit_attach: un:0x%p Atapi device\n", un); 6757 } 6758 ddi_prop_free(variantp); 6759 } 6760 6761 un->un_cmd_timeout = SD_IO_TIME; 6762 6763 /* Info on current states, statuses, etc. (Updated frequently) */ 6764 un->un_state = SD_STATE_NORMAL; 6765 un->un_last_state = SD_STATE_NORMAL; 6766 6767 /* Control & status info for command throttling */ 6768 un->un_throttle = sd_max_throttle; 6769 un->un_saved_throttle = sd_max_throttle; 6770 un->un_min_throttle = sd_min_throttle; 6771 6772 if (un->un_f_is_fibre == TRUE) { 6773 un->un_f_use_adaptive_throttle = TRUE; 6774 } else { 6775 un->un_f_use_adaptive_throttle = FALSE; 6776 } 6777 6778 /* Removable media support. */ 6779 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6780 un->un_mediastate = DKIO_NONE; 6781 un->un_specified_mediastate = DKIO_NONE; 6782 6783 /* CVs for suspend/resume (PM or DR) */ 6784 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6785 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6786 6787 /* Power management support. */ 6788 un->un_power_level = SD_SPINDLE_UNINIT; 6789 6790 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6791 un->un_f_wcc_inprog = 0; 6792 6793 /* 6794 * The open/close semaphore is used to serialize threads executing 6795 * in the driver's open & close entry point routines for a given 6796 * instance. 6797 */ 6798 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6799 6800 /* 6801 * The conf file entry and softstate variable is a forceful override, 6802 * meaning a non-zero value must be entered to change the default. 6803 */ 6804 un->un_f_disksort_disabled = FALSE; 6805 6806 /* 6807 * Retrieve the properties from the static driver table or the driver 6808 * configuration file (.conf) for this unit and update the soft state 6809 * for the device as needed for the indicated properties. 6810 * Note: the property configuration needs to occur here as some of the 6811 * following routines may have dependancies on soft state flags set 6812 * as part of the driver property configuration. 6813 */ 6814 sd_read_unit_properties(un); 6815 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6816 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6817 6818 /* 6819 * Only if a device has "hotpluggable" property, it is 6820 * treated as hotpluggable device. Otherwise, it is 6821 * regarded as non-hotpluggable one. 6822 */ 6823 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6824 -1) != -1) { 6825 un->un_f_is_hotpluggable = TRUE; 6826 } 6827 6828 /* 6829 * set unit's attributes(flags) according to "hotpluggable" and 6830 * RMB bit in INQUIRY data. 6831 */ 6832 sd_set_unit_attributes(un, devi); 6833 6834 /* 6835 * By default, we mark the capacity, lbasize, and geometry 6836 * as invalid. Only if we successfully read a valid capacity 6837 * will we update the un_blockcount and un_tgt_blocksize with the 6838 * valid values (the geometry will be validated later). 6839 */ 6840 un->un_f_blockcount_is_valid = FALSE; 6841 un->un_f_tgt_blocksize_is_valid = FALSE; 6842 6843 /* 6844 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6845 * otherwise. 6846 */ 6847 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6848 un->un_blockcount = 0; 6849 6850 /* 6851 * Set up the per-instance info needed to determine the correct 6852 * CDBs and other info for issuing commands to the target. 6853 */ 6854 sd_init_cdb_limits(un); 6855 6856 /* 6857 * Set up the IO chains to use, based upon the target type. 6858 */ 6859 if (un->un_f_non_devbsize_supported) { 6860 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6861 } else { 6862 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6863 } 6864 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6865 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6866 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6867 6868 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6869 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6870 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6871 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6872 6873 6874 if (ISCD(un)) { 6875 un->un_additional_codes = sd_additional_codes; 6876 } else { 6877 un->un_additional_codes = NULL; 6878 } 6879 6880 /* 6881 * Create the kstats here so they can be available for attach-time 6882 * routines that send commands to the unit (either polled or via 6883 * sd_send_scsi_cmd). 6884 * 6885 * Note: This is a critical sequence that needs to be maintained: 6886 * 1) Instantiate the kstats here, before any routines using the 6887 * iopath (i.e. sd_send_scsi_cmd). 6888 * 2) Instantiate and initialize the partition stats 6889 * (sd_set_pstats). 6890 * 3) Initialize the error stats (sd_set_errstats), following 6891 * sd_validate_geometry(),sd_register_devid(), 6892 * and sd_cache_control(). 6893 */ 6894 6895 un->un_stats = kstat_create(sd_label, instance, 6896 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6897 if (un->un_stats != NULL) { 6898 un->un_stats->ks_lock = SD_MUTEX(un); 6899 kstat_install(un->un_stats); 6900 } 6901 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6902 "sd_unit_attach: un:0x%p un_stats created\n", un); 6903 6904 sd_create_errstats(un, instance); 6905 if (un->un_errstats == NULL) { 6906 goto create_errstats_failed; 6907 } 6908 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6909 "sd_unit_attach: un:0x%p errstats created\n", un); 6910 6911 /* 6912 * The following if/else code was relocated here from below as part 6913 * of the fix for bug (4430280). However with the default setup added 6914 * on entry to this routine, it's no longer absolutely necessary for 6915 * this to be before the call to sd_spin_up_unit. 6916 */ 6917 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6918 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6919 (devp->sd_inq->inq_ansi == 5)) && 6920 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6921 6922 /* 6923 * If tagged queueing is supported by the target 6924 * and by the host adapter then we will enable it 6925 */ 6926 un->un_tagflags = 0; 6927 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6928 (un->un_f_arq_enabled == TRUE)) { 6929 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6930 1, 1) == 1) { 6931 un->un_tagflags = FLAG_STAG; 6932 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6933 "sd_unit_attach: un:0x%p tag queueing " 6934 "enabled\n", un); 6935 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6936 "untagged-qing", 0) == 1) { 6937 un->un_f_opt_queueing = TRUE; 6938 un->un_saved_throttle = un->un_throttle = 6939 min(un->un_throttle, 3); 6940 } else { 6941 un->un_f_opt_queueing = FALSE; 6942 un->un_saved_throttle = un->un_throttle = 1; 6943 } 6944 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6945 == 1) && (un->un_f_arq_enabled == TRUE)) { 6946 /* The Host Adapter supports internal queueing. */ 6947 un->un_f_opt_queueing = TRUE; 6948 un->un_saved_throttle = un->un_throttle = 6949 min(un->un_throttle, 3); 6950 } else { 6951 un->un_f_opt_queueing = FALSE; 6952 un->un_saved_throttle = un->un_throttle = 1; 6953 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6954 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6955 } 6956 6957 /* 6958 * Enable large transfers for SATA/SAS drives 6959 */ 6960 if (SD_IS_SERIAL(un)) { 6961 un->un_max_xfer_size = 6962 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6963 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6964 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6965 "sd_unit_attach: un:0x%p max transfer " 6966 "size=0x%x\n", un, un->un_max_xfer_size); 6967 6968 } 6969 6970 /* Setup or tear down default wide operations for disks */ 6971 6972 /* 6973 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6974 * and "ssd_max_xfer_size" to exist simultaneously on the same 6975 * system and be set to different values. In the future this 6976 * code may need to be updated when the ssd module is 6977 * obsoleted and removed from the system. (4299588) 6978 */ 6979 if (SD_IS_PARALLEL_SCSI(un) && 6980 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6981 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6982 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6983 1, 1) == 1) { 6984 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6985 "sd_unit_attach: un:0x%p Wide Transfer " 6986 "enabled\n", un); 6987 } 6988 6989 /* 6990 * If tagged queuing has also been enabled, then 6991 * enable large xfers 6992 */ 6993 if (un->un_saved_throttle == sd_max_throttle) { 6994 un->un_max_xfer_size = 6995 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6996 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6997 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6998 "sd_unit_attach: un:0x%p max transfer " 6999 "size=0x%x\n", un, un->un_max_xfer_size); 7000 } 7001 } else { 7002 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7003 0, 1) == 1) { 7004 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7005 "sd_unit_attach: un:0x%p " 7006 "Wide Transfer disabled\n", un); 7007 } 7008 } 7009 } else { 7010 un->un_tagflags = FLAG_STAG; 7011 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7012 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7013 } 7014 7015 /* 7016 * If this target supports LUN reset, try to enable it. 7017 */ 7018 if (un->un_f_lun_reset_enabled) { 7019 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7020 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7021 "un:0x%p lun_reset capability set\n", un); 7022 } else { 7023 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7024 "un:0x%p lun-reset capability not set\n", un); 7025 } 7026 } 7027 7028 /* 7029 * Adjust the maximum transfer size. This is to fix 7030 * the problem of partial DMA support on SPARC. Some 7031 * HBA driver, like aac, has very small dma_attr_maxxfer 7032 * size, which requires partial DMA support on SPARC. 7033 * In the future the SPARC pci nexus driver may solve 7034 * the problem instead of this fix. 7035 */ 7036 #if defined(__sparc) 7037 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7038 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7039 un->un_max_xfer_size = max_xfer_size; 7040 un->un_partial_dma_supported = 1; 7041 } 7042 #endif 7043 7044 /* 7045 * Set PKT_DMA_PARTIAL flag. 7046 */ 7047 if (un->un_partial_dma_supported == 1) { 7048 un->un_pkt_flags = PKT_DMA_PARTIAL; 7049 } else { 7050 un->un_pkt_flags = 0; 7051 } 7052 7053 /* 7054 * At this point in the attach, we have enough info in the 7055 * soft state to be able to issue commands to the target. 7056 * 7057 * All command paths used below MUST issue their commands as 7058 * SD_PATH_DIRECT. This is important as intermediate layers 7059 * are not all initialized yet (such as PM). 7060 */ 7061 7062 /* 7063 * Send a TEST UNIT READY command to the device. This should clear 7064 * any outstanding UNIT ATTENTION that may be present. 7065 * 7066 * Note: Don't check for success, just track if there is a reservation, 7067 * this is a throw away command to clear any unit attentions. 7068 * 7069 * Note: This MUST be the first command issued to the target during 7070 * attach to ensure power on UNIT ATTENTIONS are cleared. 7071 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7072 * with attempts at spinning up a device with no media. 7073 */ 7074 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7075 reservation_flag = SD_TARGET_IS_RESERVED; 7076 } 7077 7078 /* 7079 * If the device is NOT a removable media device, attempt to spin 7080 * it up (using the START_STOP_UNIT command) and read its capacity 7081 * (using the READ CAPACITY command). Note, however, that either 7082 * of these could fail and in some cases we would continue with 7083 * the attach despite the failure (see below). 7084 */ 7085 if (un->un_f_descr_format_supported) { 7086 switch (sd_spin_up_unit(un)) { 7087 case 0: 7088 /* 7089 * Spin-up was successful; now try to read the 7090 * capacity. If successful then save the results 7091 * and mark the capacity & lbasize as valid. 7092 */ 7093 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7094 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7095 7096 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7097 &lbasize, SD_PATH_DIRECT)) { 7098 case 0: { 7099 if (capacity > DK_MAX_BLOCKS) { 7100 #ifdef _LP64 7101 if (capacity + 1 > 7102 SD_GROUP1_MAX_ADDRESS) { 7103 /* 7104 * Enable descriptor format 7105 * sense data so that we can 7106 * get 64 bit sense data 7107 * fields. 7108 */ 7109 sd_enable_descr_sense(un); 7110 } 7111 #else 7112 /* 32-bit kernels can't handle this */ 7113 scsi_log(SD_DEVINFO(un), 7114 sd_label, CE_WARN, 7115 "disk has %llu blocks, which " 7116 "is too large for a 32-bit " 7117 "kernel", capacity); 7118 7119 #if defined(__i386) || defined(__amd64) 7120 /* 7121 * 1TB disk was treated as (1T - 512)B 7122 * in the past, so that it might have 7123 * valid VTOC and solaris partitions, 7124 * we have to allow it to continue to 7125 * work. 7126 */ 7127 if (capacity -1 > DK_MAX_BLOCKS) 7128 #endif 7129 goto spinup_failed; 7130 #endif 7131 } 7132 7133 /* 7134 * Here it's not necessary to check the case: 7135 * the capacity of the device is bigger than 7136 * what the max hba cdb can support. Because 7137 * sd_send_scsi_READ_CAPACITY will retrieve 7138 * the capacity by sending USCSI command, which 7139 * is constrained by the max hba cdb. Actually, 7140 * sd_send_scsi_READ_CAPACITY will return 7141 * EINVAL when using bigger cdb than required 7142 * cdb length. Will handle this case in 7143 * "case EINVAL". 7144 */ 7145 7146 /* 7147 * The following relies on 7148 * sd_send_scsi_READ_CAPACITY never 7149 * returning 0 for capacity and/or lbasize. 7150 */ 7151 sd_update_block_info(un, lbasize, capacity); 7152 7153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7154 "sd_unit_attach: un:0x%p capacity = %ld " 7155 "blocks; lbasize= %ld.\n", un, 7156 un->un_blockcount, un->un_tgt_blocksize); 7157 7158 break; 7159 } 7160 case EINVAL: 7161 /* 7162 * In the case where the max-cdb-length property 7163 * is smaller than the required CDB length for 7164 * a SCSI device, a target driver can fail to 7165 * attach to that device. 7166 */ 7167 scsi_log(SD_DEVINFO(un), 7168 sd_label, CE_WARN, 7169 "disk capacity is too large " 7170 "for current cdb length"); 7171 goto spinup_failed; 7172 case EACCES: 7173 /* 7174 * Should never get here if the spin-up 7175 * succeeded, but code it in anyway. 7176 * From here, just continue with the attach... 7177 */ 7178 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7179 "sd_unit_attach: un:0x%p " 7180 "sd_send_scsi_READ_CAPACITY " 7181 "returned reservation conflict\n", un); 7182 reservation_flag = SD_TARGET_IS_RESERVED; 7183 break; 7184 default: 7185 /* 7186 * Likewise, should never get here if the 7187 * spin-up succeeded. Just continue with 7188 * the attach... 7189 */ 7190 break; 7191 } 7192 break; 7193 case EACCES: 7194 /* 7195 * Device is reserved by another host. In this case 7196 * we could not spin it up or read the capacity, but 7197 * we continue with the attach anyway. 7198 */ 7199 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7200 "sd_unit_attach: un:0x%p spin-up reservation " 7201 "conflict.\n", un); 7202 reservation_flag = SD_TARGET_IS_RESERVED; 7203 break; 7204 default: 7205 /* Fail the attach if the spin-up failed. */ 7206 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7207 "sd_unit_attach: un:0x%p spin-up failed.", un); 7208 goto spinup_failed; 7209 } 7210 } 7211 7212 /* 7213 * Check to see if this is a MMC drive 7214 */ 7215 if (ISCD(un)) { 7216 sd_set_mmc_caps(un); 7217 } 7218 7219 7220 /* 7221 * Add a zero-length attribute to tell the world we support 7222 * kernel ioctls (for layered drivers) 7223 */ 7224 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7225 DDI_KERNEL_IOCTL, NULL, 0); 7226 7227 /* 7228 * Add a boolean property to tell the world we support 7229 * the B_FAILFAST flag (for layered drivers) 7230 */ 7231 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7232 "ddi-failfast-supported", NULL, 0); 7233 7234 /* 7235 * Initialize power management 7236 */ 7237 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7238 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7239 sd_setup_pm(un, devi); 7240 if (un->un_f_pm_is_enabled == FALSE) { 7241 /* 7242 * For performance, point to a jump table that does 7243 * not include pm. 7244 * The direct and priority chains don't change with PM. 7245 * 7246 * Note: this is currently done based on individual device 7247 * capabilities. When an interface for determining system 7248 * power enabled state becomes available, or when additional 7249 * layers are added to the command chain, these values will 7250 * have to be re-evaluated for correctness. 7251 */ 7252 if (un->un_f_non_devbsize_supported) { 7253 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7254 } else { 7255 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7256 } 7257 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7258 } 7259 7260 /* 7261 * This property is set to 0 by HA software to avoid retries 7262 * on a reserved disk. (The preferred property name is 7263 * "retry-on-reservation-conflict") (1189689) 7264 * 7265 * Note: The use of a global here can have unintended consequences. A 7266 * per instance variable is preferrable to match the capabilities of 7267 * different underlying hba's (4402600) 7268 */ 7269 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7270 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7271 sd_retry_on_reservation_conflict); 7272 if (sd_retry_on_reservation_conflict != 0) { 7273 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7274 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7275 sd_retry_on_reservation_conflict); 7276 } 7277 7278 /* Set up options for QFULL handling. */ 7279 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7280 "qfull-retries", -1)) != -1) { 7281 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7282 rval, 1); 7283 } 7284 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7285 "qfull-retry-interval", -1)) != -1) { 7286 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7287 rval, 1); 7288 } 7289 7290 /* 7291 * This just prints a message that announces the existence of the 7292 * device. The message is always printed in the system logfile, but 7293 * only appears on the console if the system is booted with the 7294 * -v (verbose) argument. 7295 */ 7296 ddi_report_dev(devi); 7297 7298 un->un_mediastate = DKIO_NONE; 7299 7300 cmlb_alloc_handle(&un->un_cmlbhandle); 7301 7302 #if defined(__i386) || defined(__amd64) 7303 /* 7304 * On x86, compensate for off-by-1 legacy error 7305 */ 7306 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7307 (lbasize == un->un_sys_blocksize)) 7308 offbyone = CMLB_OFF_BY_ONE; 7309 #endif 7310 7311 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7312 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7313 un->un_node_type, offbyone, un->un_cmlbhandle, 7314 (void *)SD_PATH_DIRECT) != 0) { 7315 goto cmlb_attach_failed; 7316 } 7317 7318 7319 /* 7320 * Read and validate the device's geometry (ie, disk label) 7321 * A new unformatted drive will not have a valid geometry, but 7322 * the driver needs to successfully attach to this device so 7323 * the drive can be formatted via ioctls. 7324 */ 7325 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7326 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7327 7328 mutex_enter(SD_MUTEX(un)); 7329 7330 /* 7331 * Read and initialize the devid for the unit. 7332 */ 7333 if (un->un_f_devid_supported) { 7334 sd_register_devid(un, devi, reservation_flag); 7335 } 7336 mutex_exit(SD_MUTEX(un)); 7337 7338 #if (defined(__fibre)) 7339 /* 7340 * Register callbacks for fibre only. You can't do this soley 7341 * on the basis of the devid_type because this is hba specific. 7342 * We need to query our hba capabilities to find out whether to 7343 * register or not. 7344 */ 7345 if (un->un_f_is_fibre) { 7346 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7347 sd_init_event_callbacks(un); 7348 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7349 "sd_unit_attach: un:0x%p event callbacks inserted", 7350 un); 7351 } 7352 } 7353 #endif 7354 7355 if (un->un_f_opt_disable_cache == TRUE) { 7356 /* 7357 * Disable both read cache and write cache. This is 7358 * the historic behavior of the keywords in the config file. 7359 */ 7360 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7361 0) { 7362 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7363 "sd_unit_attach: un:0x%p Could not disable " 7364 "caching", un); 7365 goto devid_failed; 7366 } 7367 } 7368 7369 /* 7370 * Check the value of the WCE bit now and 7371 * set un_f_write_cache_enabled accordingly. 7372 */ 7373 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7374 mutex_enter(SD_MUTEX(un)); 7375 un->un_f_write_cache_enabled = (wc_enabled != 0); 7376 mutex_exit(SD_MUTEX(un)); 7377 7378 /* 7379 * Check the value of the NV_SUP bit and set 7380 * un_f_suppress_cache_flush accordingly. 7381 */ 7382 sd_get_nv_sup(un); 7383 7384 /* 7385 * Find out what type of reservation this disk supports. 7386 */ 7387 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7388 case 0: 7389 /* 7390 * SCSI-3 reservations are supported. 7391 */ 7392 un->un_reservation_type = SD_SCSI3_RESERVATION; 7393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7394 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7395 break; 7396 case ENOTSUP: 7397 /* 7398 * The PERSISTENT RESERVE IN command would not be recognized by 7399 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7400 */ 7401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7402 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7403 un->un_reservation_type = SD_SCSI2_RESERVATION; 7404 break; 7405 default: 7406 /* 7407 * default to SCSI-3 reservations 7408 */ 7409 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7410 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7411 un->un_reservation_type = SD_SCSI3_RESERVATION; 7412 break; 7413 } 7414 7415 /* 7416 * Set the pstat and error stat values here, so data obtained during the 7417 * previous attach-time routines is available. 7418 * 7419 * Note: This is a critical sequence that needs to be maintained: 7420 * 1) Instantiate the kstats before any routines using the iopath 7421 * (i.e. sd_send_scsi_cmd). 7422 * 2) Initialize the error stats (sd_set_errstats) and partition 7423 * stats (sd_set_pstats)here, following 7424 * cmlb_validate_geometry(), sd_register_devid(), and 7425 * sd_cache_control(). 7426 */ 7427 7428 if (un->un_f_pkstats_enabled && geom_label_valid) { 7429 sd_set_pstats(un); 7430 SD_TRACE(SD_LOG_IO_PARTITION, un, 7431 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7432 } 7433 7434 sd_set_errstats(un); 7435 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7436 "sd_unit_attach: un:0x%p errstats set\n", un); 7437 7438 7439 /* 7440 * After successfully attaching an instance, we record the information 7441 * of how many luns have been attached on the relative target and 7442 * controller for parallel SCSI. This information is used when sd tries 7443 * to set the tagged queuing capability in HBA. 7444 */ 7445 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7446 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7447 } 7448 7449 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7450 "sd_unit_attach: un:0x%p exit success\n", un); 7451 7452 return (DDI_SUCCESS); 7453 7454 /* 7455 * An error occurred during the attach; clean up & return failure. 7456 */ 7457 7458 devid_failed: 7459 7460 setup_pm_failed: 7461 ddi_remove_minor_node(devi, NULL); 7462 7463 cmlb_attach_failed: 7464 /* 7465 * Cleanup from the scsi_ifsetcap() calls (437868) 7466 */ 7467 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7468 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7469 7470 /* 7471 * Refer to the comments of setting tagged-qing in the beginning of 7472 * sd_unit_attach. We can only disable tagged queuing when there is 7473 * no lun attached on the target. 7474 */ 7475 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7476 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7477 } 7478 7479 if (un->un_f_is_fibre == FALSE) { 7480 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7481 } 7482 7483 spinup_failed: 7484 7485 mutex_enter(SD_MUTEX(un)); 7486 7487 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7488 if (un->un_direct_priority_timeid != NULL) { 7489 timeout_id_t temp_id = un->un_direct_priority_timeid; 7490 un->un_direct_priority_timeid = NULL; 7491 mutex_exit(SD_MUTEX(un)); 7492 (void) untimeout(temp_id); 7493 mutex_enter(SD_MUTEX(un)); 7494 } 7495 7496 /* Cancel any pending start/stop timeouts */ 7497 if (un->un_startstop_timeid != NULL) { 7498 timeout_id_t temp_id = un->un_startstop_timeid; 7499 un->un_startstop_timeid = NULL; 7500 mutex_exit(SD_MUTEX(un)); 7501 (void) untimeout(temp_id); 7502 mutex_enter(SD_MUTEX(un)); 7503 } 7504 7505 /* Cancel any pending reset-throttle timeouts */ 7506 if (un->un_reset_throttle_timeid != NULL) { 7507 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7508 un->un_reset_throttle_timeid = NULL; 7509 mutex_exit(SD_MUTEX(un)); 7510 (void) untimeout(temp_id); 7511 mutex_enter(SD_MUTEX(un)); 7512 } 7513 7514 /* Cancel any pending retry timeouts */ 7515 if (un->un_retry_timeid != NULL) { 7516 timeout_id_t temp_id = un->un_retry_timeid; 7517 un->un_retry_timeid = NULL; 7518 mutex_exit(SD_MUTEX(un)); 7519 (void) untimeout(temp_id); 7520 mutex_enter(SD_MUTEX(un)); 7521 } 7522 7523 /* Cancel any pending delayed cv broadcast timeouts */ 7524 if (un->un_dcvb_timeid != NULL) { 7525 timeout_id_t temp_id = un->un_dcvb_timeid; 7526 un->un_dcvb_timeid = NULL; 7527 mutex_exit(SD_MUTEX(un)); 7528 (void) untimeout(temp_id); 7529 mutex_enter(SD_MUTEX(un)); 7530 } 7531 7532 mutex_exit(SD_MUTEX(un)); 7533 7534 /* There should not be any in-progress I/O so ASSERT this check */ 7535 ASSERT(un->un_ncmds_in_transport == 0); 7536 ASSERT(un->un_ncmds_in_driver == 0); 7537 7538 /* Do not free the softstate if the callback routine is active */ 7539 sd_sync_with_callback(un); 7540 7541 /* 7542 * Partition stats apparently are not used with removables. These would 7543 * not have been created during attach, so no need to clean them up... 7544 */ 7545 if (un->un_errstats != NULL) { 7546 kstat_delete(un->un_errstats); 7547 un->un_errstats = NULL; 7548 } 7549 7550 create_errstats_failed: 7551 7552 if (un->un_stats != NULL) { 7553 kstat_delete(un->un_stats); 7554 un->un_stats = NULL; 7555 } 7556 7557 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7558 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7559 7560 ddi_prop_remove_all(devi); 7561 sema_destroy(&un->un_semoclose); 7562 cv_destroy(&un->un_state_cv); 7563 7564 getrbuf_failed: 7565 7566 sd_free_rqs(un); 7567 7568 alloc_rqs_failed: 7569 7570 devp->sd_private = NULL; 7571 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7572 7573 get_softstate_failed: 7574 /* 7575 * Note: the man pages are unclear as to whether or not doing a 7576 * ddi_soft_state_free(sd_state, instance) is the right way to 7577 * clean up after the ddi_soft_state_zalloc() if the subsequent 7578 * ddi_get_soft_state() fails. The implication seems to be 7579 * that the get_soft_state cannot fail if the zalloc succeeds. 7580 */ 7581 ddi_soft_state_free(sd_state, instance); 7582 7583 probe_failed: 7584 scsi_unprobe(devp); 7585 7586 return (DDI_FAILURE); 7587 } 7588 7589 7590 /* 7591 * Function: sd_unit_detach 7592 * 7593 * Description: Performs DDI_DETACH processing for sddetach(). 7594 * 7595 * Return Code: DDI_SUCCESS 7596 * DDI_FAILURE 7597 * 7598 * Context: Kernel thread context 7599 */ 7600 7601 static int 7602 sd_unit_detach(dev_info_t *devi) 7603 { 7604 struct scsi_device *devp; 7605 struct sd_lun *un; 7606 int i; 7607 int tgt; 7608 dev_t dev; 7609 dev_info_t *pdip = ddi_get_parent(devi); 7610 int instance = ddi_get_instance(devi); 7611 7612 mutex_enter(&sd_detach_mutex); 7613 7614 /* 7615 * Fail the detach for any of the following: 7616 * - Unable to get the sd_lun struct for the instance 7617 * - A layered driver has an outstanding open on the instance 7618 * - Another thread is already detaching this instance 7619 * - Another thread is currently performing an open 7620 */ 7621 devp = ddi_get_driver_private(devi); 7622 if ((devp == NULL) || 7623 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7624 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7625 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7626 mutex_exit(&sd_detach_mutex); 7627 return (DDI_FAILURE); 7628 } 7629 7630 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7631 7632 /* 7633 * Mark this instance as currently in a detach, to inhibit any 7634 * opens from a layered driver. 7635 */ 7636 un->un_detach_count++; 7637 mutex_exit(&sd_detach_mutex); 7638 7639 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7640 SCSI_ADDR_PROP_TARGET, -1); 7641 7642 dev = sd_make_device(SD_DEVINFO(un)); 7643 7644 #ifndef lint 7645 _NOTE(COMPETING_THREADS_NOW); 7646 #endif 7647 7648 mutex_enter(SD_MUTEX(un)); 7649 7650 /* 7651 * Fail the detach if there are any outstanding layered 7652 * opens on this device. 7653 */ 7654 for (i = 0; i < NDKMAP; i++) { 7655 if (un->un_ocmap.lyropen[i] != 0) { 7656 goto err_notclosed; 7657 } 7658 } 7659 7660 /* 7661 * Verify there are NO outstanding commands issued to this device. 7662 * ie, un_ncmds_in_transport == 0. 7663 * It's possible to have outstanding commands through the physio 7664 * code path, even though everything's closed. 7665 */ 7666 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7667 (un->un_direct_priority_timeid != NULL) || 7668 (un->un_state == SD_STATE_RWAIT)) { 7669 mutex_exit(SD_MUTEX(un)); 7670 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7671 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7672 goto err_stillbusy; 7673 } 7674 7675 /* 7676 * If we have the device reserved, release the reservation. 7677 */ 7678 if ((un->un_resvd_status & SD_RESERVE) && 7679 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7680 mutex_exit(SD_MUTEX(un)); 7681 /* 7682 * Note: sd_reserve_release sends a command to the device 7683 * via the sd_ioctlcmd() path, and can sleep. 7684 */ 7685 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7686 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7687 "sd_dr_detach: Cannot release reservation \n"); 7688 } 7689 } else { 7690 mutex_exit(SD_MUTEX(un)); 7691 } 7692 7693 /* 7694 * Untimeout any reserve recover, throttle reset, restart unit 7695 * and delayed broadcast timeout threads. Protect the timeout pointer 7696 * from getting nulled by their callback functions. 7697 */ 7698 mutex_enter(SD_MUTEX(un)); 7699 if (un->un_resvd_timeid != NULL) { 7700 timeout_id_t temp_id = un->un_resvd_timeid; 7701 un->un_resvd_timeid = NULL; 7702 mutex_exit(SD_MUTEX(un)); 7703 (void) untimeout(temp_id); 7704 mutex_enter(SD_MUTEX(un)); 7705 } 7706 7707 if (un->un_reset_throttle_timeid != NULL) { 7708 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7709 un->un_reset_throttle_timeid = NULL; 7710 mutex_exit(SD_MUTEX(un)); 7711 (void) untimeout(temp_id); 7712 mutex_enter(SD_MUTEX(un)); 7713 } 7714 7715 if (un->un_startstop_timeid != NULL) { 7716 timeout_id_t temp_id = un->un_startstop_timeid; 7717 un->un_startstop_timeid = NULL; 7718 mutex_exit(SD_MUTEX(un)); 7719 (void) untimeout(temp_id); 7720 mutex_enter(SD_MUTEX(un)); 7721 } 7722 7723 if (un->un_dcvb_timeid != NULL) { 7724 timeout_id_t temp_id = un->un_dcvb_timeid; 7725 un->un_dcvb_timeid = NULL; 7726 mutex_exit(SD_MUTEX(un)); 7727 (void) untimeout(temp_id); 7728 } else { 7729 mutex_exit(SD_MUTEX(un)); 7730 } 7731 7732 /* Remove any pending reservation reclaim requests for this device */ 7733 sd_rmv_resv_reclaim_req(dev); 7734 7735 mutex_enter(SD_MUTEX(un)); 7736 7737 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7738 if (un->un_direct_priority_timeid != NULL) { 7739 timeout_id_t temp_id = un->un_direct_priority_timeid; 7740 un->un_direct_priority_timeid = NULL; 7741 mutex_exit(SD_MUTEX(un)); 7742 (void) untimeout(temp_id); 7743 mutex_enter(SD_MUTEX(un)); 7744 } 7745 7746 /* Cancel any active multi-host disk watch thread requests */ 7747 if (un->un_mhd_token != NULL) { 7748 mutex_exit(SD_MUTEX(un)); 7749 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7750 if (scsi_watch_request_terminate(un->un_mhd_token, 7751 SCSI_WATCH_TERMINATE_NOWAIT)) { 7752 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7753 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7754 /* 7755 * Note: We are returning here after having removed 7756 * some driver timeouts above. This is consistent with 7757 * the legacy implementation but perhaps the watch 7758 * terminate call should be made with the wait flag set. 7759 */ 7760 goto err_stillbusy; 7761 } 7762 mutex_enter(SD_MUTEX(un)); 7763 un->un_mhd_token = NULL; 7764 } 7765 7766 if (un->un_swr_token != NULL) { 7767 mutex_exit(SD_MUTEX(un)); 7768 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7769 if (scsi_watch_request_terminate(un->un_swr_token, 7770 SCSI_WATCH_TERMINATE_NOWAIT)) { 7771 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7772 "sd_dr_detach: Cannot cancel swr watch request\n"); 7773 /* 7774 * Note: We are returning here after having removed 7775 * some driver timeouts above. This is consistent with 7776 * the legacy implementation but perhaps the watch 7777 * terminate call should be made with the wait flag set. 7778 */ 7779 goto err_stillbusy; 7780 } 7781 mutex_enter(SD_MUTEX(un)); 7782 un->un_swr_token = NULL; 7783 } 7784 7785 mutex_exit(SD_MUTEX(un)); 7786 7787 /* 7788 * Clear any scsi_reset_notifies. We clear the reset notifies 7789 * if we have not registered one. 7790 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7791 */ 7792 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7793 sd_mhd_reset_notify_cb, (caddr_t)un); 7794 7795 /* 7796 * protect the timeout pointers from getting nulled by 7797 * their callback functions during the cancellation process. 7798 * In such a scenario untimeout can be invoked with a null value. 7799 */ 7800 _NOTE(NO_COMPETING_THREADS_NOW); 7801 7802 mutex_enter(&un->un_pm_mutex); 7803 if (un->un_pm_idle_timeid != NULL) { 7804 timeout_id_t temp_id = un->un_pm_idle_timeid; 7805 un->un_pm_idle_timeid = NULL; 7806 mutex_exit(&un->un_pm_mutex); 7807 7808 /* 7809 * Timeout is active; cancel it. 7810 * Note that it'll never be active on a device 7811 * that does not support PM therefore we don't 7812 * have to check before calling pm_idle_component. 7813 */ 7814 (void) untimeout(temp_id); 7815 (void) pm_idle_component(SD_DEVINFO(un), 0); 7816 mutex_enter(&un->un_pm_mutex); 7817 } 7818 7819 /* 7820 * Check whether there is already a timeout scheduled for power 7821 * management. If yes then don't lower the power here, that's. 7822 * the timeout handler's job. 7823 */ 7824 if (un->un_pm_timeid != NULL) { 7825 timeout_id_t temp_id = un->un_pm_timeid; 7826 un->un_pm_timeid = NULL; 7827 mutex_exit(&un->un_pm_mutex); 7828 /* 7829 * Timeout is active; cancel it. 7830 * Note that it'll never be active on a device 7831 * that does not support PM therefore we don't 7832 * have to check before calling pm_idle_component. 7833 */ 7834 (void) untimeout(temp_id); 7835 (void) pm_idle_component(SD_DEVINFO(un), 0); 7836 7837 } else { 7838 mutex_exit(&un->un_pm_mutex); 7839 if ((un->un_f_pm_is_enabled == TRUE) && 7840 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7841 DDI_SUCCESS)) { 7842 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7843 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7844 /* 7845 * Fix for bug: 4297749, item # 13 7846 * The above test now includes a check to see if PM is 7847 * supported by this device before call 7848 * pm_lower_power(). 7849 * Note, the following is not dead code. The call to 7850 * pm_lower_power above will generate a call back into 7851 * our sdpower routine which might result in a timeout 7852 * handler getting activated. Therefore the following 7853 * code is valid and necessary. 7854 */ 7855 mutex_enter(&un->un_pm_mutex); 7856 if (un->un_pm_timeid != NULL) { 7857 timeout_id_t temp_id = un->un_pm_timeid; 7858 un->un_pm_timeid = NULL; 7859 mutex_exit(&un->un_pm_mutex); 7860 (void) untimeout(temp_id); 7861 (void) pm_idle_component(SD_DEVINFO(un), 0); 7862 } else { 7863 mutex_exit(&un->un_pm_mutex); 7864 } 7865 } 7866 } 7867 7868 /* 7869 * Cleanup from the scsi_ifsetcap() calls (437868) 7870 * Relocated here from above to be after the call to 7871 * pm_lower_power, which was getting errors. 7872 */ 7873 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7874 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7875 7876 /* 7877 * Currently, tagged queuing is supported per target based by HBA. 7878 * Setting this per lun instance actually sets the capability of this 7879 * target in HBA, which affects those luns already attached on the 7880 * same target. So during detach, we can only disable this capability 7881 * only when this is the only lun left on this target. By doing 7882 * this, we assume a target has the same tagged queuing capability 7883 * for every lun. The condition can be removed when HBA is changed to 7884 * support per lun based tagged queuing capability. 7885 */ 7886 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7887 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7888 } 7889 7890 if (un->un_f_is_fibre == FALSE) { 7891 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7892 } 7893 7894 /* 7895 * Remove any event callbacks, fibre only 7896 */ 7897 if (un->un_f_is_fibre == TRUE) { 7898 if ((un->un_insert_event != NULL) && 7899 (ddi_remove_event_handler(un->un_insert_cb_id) != 7900 DDI_SUCCESS)) { 7901 /* 7902 * Note: We are returning here after having done 7903 * substantial cleanup above. This is consistent 7904 * with the legacy implementation but this may not 7905 * be the right thing to do. 7906 */ 7907 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7908 "sd_dr_detach: Cannot cancel insert event\n"); 7909 goto err_remove_event; 7910 } 7911 un->un_insert_event = NULL; 7912 7913 if ((un->un_remove_event != NULL) && 7914 (ddi_remove_event_handler(un->un_remove_cb_id) != 7915 DDI_SUCCESS)) { 7916 /* 7917 * Note: We are returning here after having done 7918 * substantial cleanup above. This is consistent 7919 * with the legacy implementation but this may not 7920 * be the right thing to do. 7921 */ 7922 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7923 "sd_dr_detach: Cannot cancel remove event\n"); 7924 goto err_remove_event; 7925 } 7926 un->un_remove_event = NULL; 7927 } 7928 7929 /* Do not free the softstate if the callback routine is active */ 7930 sd_sync_with_callback(un); 7931 7932 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7933 cmlb_free_handle(&un->un_cmlbhandle); 7934 7935 /* 7936 * Hold the detach mutex here, to make sure that no other threads ever 7937 * can access a (partially) freed soft state structure. 7938 */ 7939 mutex_enter(&sd_detach_mutex); 7940 7941 /* 7942 * Clean up the soft state struct. 7943 * Cleanup is done in reverse order of allocs/inits. 7944 * At this point there should be no competing threads anymore. 7945 */ 7946 7947 /* Unregister and free device id. */ 7948 ddi_devid_unregister(devi); 7949 if (un->un_devid) { 7950 ddi_devid_free(un->un_devid); 7951 un->un_devid = NULL; 7952 } 7953 7954 /* 7955 * Destroy wmap cache if it exists. 7956 */ 7957 if (un->un_wm_cache != NULL) { 7958 kmem_cache_destroy(un->un_wm_cache); 7959 un->un_wm_cache = NULL; 7960 } 7961 7962 /* 7963 * kstat cleanup is done in detach for all device types (4363169). 7964 * We do not want to fail detach if the device kstats are not deleted 7965 * since there is a confusion about the devo_refcnt for the device. 7966 * We just delete the kstats and let detach complete successfully. 7967 */ 7968 if (un->un_stats != NULL) { 7969 kstat_delete(un->un_stats); 7970 un->un_stats = NULL; 7971 } 7972 if (un->un_errstats != NULL) { 7973 kstat_delete(un->un_errstats); 7974 un->un_errstats = NULL; 7975 } 7976 7977 /* Remove partition stats */ 7978 if (un->un_f_pkstats_enabled) { 7979 for (i = 0; i < NSDMAP; i++) { 7980 if (un->un_pstats[i] != NULL) { 7981 kstat_delete(un->un_pstats[i]); 7982 un->un_pstats[i] = NULL; 7983 } 7984 } 7985 } 7986 7987 /* Remove xbuf registration */ 7988 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7989 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7990 7991 /* Remove driver properties */ 7992 ddi_prop_remove_all(devi); 7993 7994 mutex_destroy(&un->un_pm_mutex); 7995 cv_destroy(&un->un_pm_busy_cv); 7996 7997 cv_destroy(&un->un_wcc_cv); 7998 7999 /* Open/close semaphore */ 8000 sema_destroy(&un->un_semoclose); 8001 8002 /* Removable media condvar. */ 8003 cv_destroy(&un->un_state_cv); 8004 8005 /* Suspend/resume condvar. */ 8006 cv_destroy(&un->un_suspend_cv); 8007 cv_destroy(&un->un_disk_busy_cv); 8008 8009 sd_free_rqs(un); 8010 8011 /* Free up soft state */ 8012 devp->sd_private = NULL; 8013 8014 bzero(un, sizeof (struct sd_lun)); 8015 ddi_soft_state_free(sd_state, instance); 8016 8017 mutex_exit(&sd_detach_mutex); 8018 8019 /* This frees up the INQUIRY data associated with the device. */ 8020 scsi_unprobe(devp); 8021 8022 /* 8023 * After successfully detaching an instance, we update the information 8024 * of how many luns have been attached in the relative target and 8025 * controller for parallel SCSI. This information is used when sd tries 8026 * to set the tagged queuing capability in HBA. 8027 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8028 * check if the device is parallel SCSI. However, we don't need to 8029 * check here because we've already checked during attach. No device 8030 * that is not parallel SCSI is in the chain. 8031 */ 8032 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8033 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8034 } 8035 8036 return (DDI_SUCCESS); 8037 8038 err_notclosed: 8039 mutex_exit(SD_MUTEX(un)); 8040 8041 err_stillbusy: 8042 _NOTE(NO_COMPETING_THREADS_NOW); 8043 8044 err_remove_event: 8045 mutex_enter(&sd_detach_mutex); 8046 un->un_detach_count--; 8047 mutex_exit(&sd_detach_mutex); 8048 8049 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8050 return (DDI_FAILURE); 8051 } 8052 8053 8054 /* 8055 * Function: sd_create_errstats 8056 * 8057 * Description: This routine instantiates the device error stats. 8058 * 8059 * Note: During attach the stats are instantiated first so they are 8060 * available for attach-time routines that utilize the driver 8061 * iopath to send commands to the device. The stats are initialized 8062 * separately so data obtained during some attach-time routines is 8063 * available. (4362483) 8064 * 8065 * Arguments: un - driver soft state (unit) structure 8066 * instance - driver instance 8067 * 8068 * Context: Kernel thread context 8069 */ 8070 8071 static void 8072 sd_create_errstats(struct sd_lun *un, int instance) 8073 { 8074 struct sd_errstats *stp; 8075 char kstatmodule_err[KSTAT_STRLEN]; 8076 char kstatname[KSTAT_STRLEN]; 8077 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8078 8079 ASSERT(un != NULL); 8080 8081 if (un->un_errstats != NULL) { 8082 return; 8083 } 8084 8085 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8086 "%serr", sd_label); 8087 (void) snprintf(kstatname, sizeof (kstatname), 8088 "%s%d,err", sd_label, instance); 8089 8090 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8091 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8092 8093 if (un->un_errstats == NULL) { 8094 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8095 "sd_create_errstats: Failed kstat_create\n"); 8096 return; 8097 } 8098 8099 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8100 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8105 KSTAT_DATA_UINT32); 8106 kstat_named_init(&stp->sd_vid, "Vendor", 8107 KSTAT_DATA_CHAR); 8108 kstat_named_init(&stp->sd_pid, "Product", 8109 KSTAT_DATA_CHAR); 8110 kstat_named_init(&stp->sd_revision, "Revision", 8111 KSTAT_DATA_CHAR); 8112 kstat_named_init(&stp->sd_serial, "Serial No", 8113 KSTAT_DATA_CHAR); 8114 kstat_named_init(&stp->sd_capacity, "Size", 8115 KSTAT_DATA_ULONGLONG); 8116 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8117 KSTAT_DATA_UINT32); 8118 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8119 KSTAT_DATA_UINT32); 8120 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8121 KSTAT_DATA_UINT32); 8122 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8123 KSTAT_DATA_UINT32); 8124 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8125 KSTAT_DATA_UINT32); 8126 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8127 KSTAT_DATA_UINT32); 8128 8129 un->un_errstats->ks_private = un; 8130 un->un_errstats->ks_update = nulldev; 8131 8132 kstat_install(un->un_errstats); 8133 } 8134 8135 8136 /* 8137 * Function: sd_set_errstats 8138 * 8139 * Description: This routine sets the value of the vendor id, product id, 8140 * revision, serial number, and capacity device error stats. 8141 * 8142 * Note: During attach the stats are instantiated first so they are 8143 * available for attach-time routines that utilize the driver 8144 * iopath to send commands to the device. The stats are initialized 8145 * separately so data obtained during some attach-time routines is 8146 * available. (4362483) 8147 * 8148 * Arguments: un - driver soft state (unit) structure 8149 * 8150 * Context: Kernel thread context 8151 */ 8152 8153 static void 8154 sd_set_errstats(struct sd_lun *un) 8155 { 8156 struct sd_errstats *stp; 8157 8158 ASSERT(un != NULL); 8159 ASSERT(un->un_errstats != NULL); 8160 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8161 ASSERT(stp != NULL); 8162 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8163 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8164 (void) strncpy(stp->sd_revision.value.c, 8165 un->un_sd->sd_inq->inq_revision, 4); 8166 8167 /* 8168 * All the errstats are persistent across detach/attach, 8169 * so reset all the errstats here in case of the hot 8170 * replacement of disk drives, except for not changed 8171 * Sun qualified drives. 8172 */ 8173 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8174 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8175 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8176 stp->sd_softerrs.value.ui32 = 0; 8177 stp->sd_harderrs.value.ui32 = 0; 8178 stp->sd_transerrs.value.ui32 = 0; 8179 stp->sd_rq_media_err.value.ui32 = 0; 8180 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8181 stp->sd_rq_nodev_err.value.ui32 = 0; 8182 stp->sd_rq_recov_err.value.ui32 = 0; 8183 stp->sd_rq_illrq_err.value.ui32 = 0; 8184 stp->sd_rq_pfa_err.value.ui32 = 0; 8185 } 8186 8187 /* 8188 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8189 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8190 * (4376302)) 8191 */ 8192 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8193 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8194 sizeof (SD_INQUIRY(un)->inq_serial)); 8195 } 8196 8197 if (un->un_f_blockcount_is_valid != TRUE) { 8198 /* 8199 * Set capacity error stat to 0 for no media. This ensures 8200 * a valid capacity is displayed in response to 'iostat -E' 8201 * when no media is present in the device. 8202 */ 8203 stp->sd_capacity.value.ui64 = 0; 8204 } else { 8205 /* 8206 * Multiply un_blockcount by un->un_sys_blocksize to get 8207 * capacity. 8208 * 8209 * Note: for non-512 blocksize devices "un_blockcount" has been 8210 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8211 * (un_tgt_blocksize / un->un_sys_blocksize). 8212 */ 8213 stp->sd_capacity.value.ui64 = (uint64_t) 8214 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8215 } 8216 } 8217 8218 8219 /* 8220 * Function: sd_set_pstats 8221 * 8222 * Description: This routine instantiates and initializes the partition 8223 * stats for each partition with more than zero blocks. 8224 * (4363169) 8225 * 8226 * Arguments: un - driver soft state (unit) structure 8227 * 8228 * Context: Kernel thread context 8229 */ 8230 8231 static void 8232 sd_set_pstats(struct sd_lun *un) 8233 { 8234 char kstatname[KSTAT_STRLEN]; 8235 int instance; 8236 int i; 8237 diskaddr_t nblks = 0; 8238 char *partname = NULL; 8239 8240 ASSERT(un != NULL); 8241 8242 instance = ddi_get_instance(SD_DEVINFO(un)); 8243 8244 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8245 for (i = 0; i < NSDMAP; i++) { 8246 8247 if (cmlb_partinfo(un->un_cmlbhandle, i, 8248 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8249 continue; 8250 mutex_enter(SD_MUTEX(un)); 8251 8252 if ((un->un_pstats[i] == NULL) && 8253 (nblks != 0)) { 8254 8255 (void) snprintf(kstatname, sizeof (kstatname), 8256 "%s%d,%s", sd_label, instance, 8257 partname); 8258 8259 un->un_pstats[i] = kstat_create(sd_label, 8260 instance, kstatname, "partition", KSTAT_TYPE_IO, 8261 1, KSTAT_FLAG_PERSISTENT); 8262 if (un->un_pstats[i] != NULL) { 8263 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8264 kstat_install(un->un_pstats[i]); 8265 } 8266 } 8267 mutex_exit(SD_MUTEX(un)); 8268 } 8269 } 8270 8271 8272 #if (defined(__fibre)) 8273 /* 8274 * Function: sd_init_event_callbacks 8275 * 8276 * Description: This routine initializes the insertion and removal event 8277 * callbacks. (fibre only) 8278 * 8279 * Arguments: un - driver soft state (unit) structure 8280 * 8281 * Context: Kernel thread context 8282 */ 8283 8284 static void 8285 sd_init_event_callbacks(struct sd_lun *un) 8286 { 8287 ASSERT(un != NULL); 8288 8289 if ((un->un_insert_event == NULL) && 8290 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8291 &un->un_insert_event) == DDI_SUCCESS)) { 8292 /* 8293 * Add the callback for an insertion event 8294 */ 8295 (void) ddi_add_event_handler(SD_DEVINFO(un), 8296 un->un_insert_event, sd_event_callback, (void *)un, 8297 &(un->un_insert_cb_id)); 8298 } 8299 8300 if ((un->un_remove_event == NULL) && 8301 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8302 &un->un_remove_event) == DDI_SUCCESS)) { 8303 /* 8304 * Add the callback for a removal event 8305 */ 8306 (void) ddi_add_event_handler(SD_DEVINFO(un), 8307 un->un_remove_event, sd_event_callback, (void *)un, 8308 &(un->un_remove_cb_id)); 8309 } 8310 } 8311 8312 8313 /* 8314 * Function: sd_event_callback 8315 * 8316 * Description: This routine handles insert/remove events (photon). The 8317 * state is changed to OFFLINE which can be used to supress 8318 * error msgs. (fibre only) 8319 * 8320 * Arguments: un - driver soft state (unit) structure 8321 * 8322 * Context: Callout thread context 8323 */ 8324 /* ARGSUSED */ 8325 static void 8326 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8327 void *bus_impldata) 8328 { 8329 struct sd_lun *un = (struct sd_lun *)arg; 8330 8331 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8332 if (event == un->un_insert_event) { 8333 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8334 mutex_enter(SD_MUTEX(un)); 8335 if (un->un_state == SD_STATE_OFFLINE) { 8336 if (un->un_last_state != SD_STATE_SUSPENDED) { 8337 un->un_state = un->un_last_state; 8338 } else { 8339 /* 8340 * We have gone through SUSPEND/RESUME while 8341 * we were offline. Restore the last state 8342 */ 8343 un->un_state = un->un_save_state; 8344 } 8345 } 8346 mutex_exit(SD_MUTEX(un)); 8347 8348 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8349 } else if (event == un->un_remove_event) { 8350 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8351 mutex_enter(SD_MUTEX(un)); 8352 /* 8353 * We need to handle an event callback that occurs during 8354 * the suspend operation, since we don't prevent it. 8355 */ 8356 if (un->un_state != SD_STATE_OFFLINE) { 8357 if (un->un_state != SD_STATE_SUSPENDED) { 8358 New_state(un, SD_STATE_OFFLINE); 8359 } else { 8360 un->un_last_state = SD_STATE_OFFLINE; 8361 } 8362 } 8363 mutex_exit(SD_MUTEX(un)); 8364 } else { 8365 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8366 "!Unknown event\n"); 8367 } 8368 8369 } 8370 #endif 8371 8372 /* 8373 * Function: sd_cache_control() 8374 * 8375 * Description: This routine is the driver entry point for setting 8376 * read and write caching by modifying the WCE (write cache 8377 * enable) and RCD (read cache disable) bits of mode 8378 * page 8 (MODEPAGE_CACHING). 8379 * 8380 * Arguments: un - driver soft state (unit) structure 8381 * rcd_flag - flag for controlling the read cache 8382 * wce_flag - flag for controlling the write cache 8383 * 8384 * Return Code: EIO 8385 * code returned by sd_send_scsi_MODE_SENSE and 8386 * sd_send_scsi_MODE_SELECT 8387 * 8388 * Context: Kernel Thread 8389 */ 8390 8391 static int 8392 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8393 { 8394 struct mode_caching *mode_caching_page; 8395 uchar_t *header; 8396 size_t buflen; 8397 int hdrlen; 8398 int bd_len; 8399 int rval = 0; 8400 struct mode_header_grp2 *mhp; 8401 8402 ASSERT(un != NULL); 8403 8404 /* 8405 * Do a test unit ready, otherwise a mode sense may not work if this 8406 * is the first command sent to the device after boot. 8407 */ 8408 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8409 8410 if (un->un_f_cfg_is_atapi == TRUE) { 8411 hdrlen = MODE_HEADER_LENGTH_GRP2; 8412 } else { 8413 hdrlen = MODE_HEADER_LENGTH; 8414 } 8415 8416 /* 8417 * Allocate memory for the retrieved mode page and its headers. Set 8418 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8419 * we get all of the mode sense data otherwise, the mode select 8420 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8421 */ 8422 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8423 sizeof (struct mode_cache_scsi3); 8424 8425 header = kmem_zalloc(buflen, KM_SLEEP); 8426 8427 /* Get the information from the device. */ 8428 if (un->un_f_cfg_is_atapi == TRUE) { 8429 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8430 MODEPAGE_CACHING, SD_PATH_DIRECT); 8431 } else { 8432 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8433 MODEPAGE_CACHING, SD_PATH_DIRECT); 8434 } 8435 if (rval != 0) { 8436 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8437 "sd_cache_control: Mode Sense Failed\n"); 8438 kmem_free(header, buflen); 8439 return (rval); 8440 } 8441 8442 /* 8443 * Determine size of Block Descriptors in order to locate 8444 * the mode page data. ATAPI devices return 0, SCSI devices 8445 * should return MODE_BLK_DESC_LENGTH. 8446 */ 8447 if (un->un_f_cfg_is_atapi == TRUE) { 8448 mhp = (struct mode_header_grp2 *)header; 8449 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8450 } else { 8451 bd_len = ((struct mode_header *)header)->bdesc_length; 8452 } 8453 8454 if (bd_len > MODE_BLK_DESC_LENGTH) { 8455 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8456 "sd_cache_control: Mode Sense returned invalid " 8457 "block descriptor length\n"); 8458 kmem_free(header, buflen); 8459 return (EIO); 8460 } 8461 8462 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8463 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8464 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8465 " caching page code mismatch %d\n", 8466 mode_caching_page->mode_page.code); 8467 kmem_free(header, buflen); 8468 return (EIO); 8469 } 8470 8471 /* Check the relevant bits on successful mode sense. */ 8472 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8473 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8474 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8475 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8476 8477 size_t sbuflen; 8478 uchar_t save_pg; 8479 8480 /* 8481 * Construct select buffer length based on the 8482 * length of the sense data returned. 8483 */ 8484 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8485 sizeof (struct mode_page) + 8486 (int)mode_caching_page->mode_page.length; 8487 8488 /* 8489 * Set the caching bits as requested. 8490 */ 8491 if (rcd_flag == SD_CACHE_ENABLE) 8492 mode_caching_page->rcd = 0; 8493 else if (rcd_flag == SD_CACHE_DISABLE) 8494 mode_caching_page->rcd = 1; 8495 8496 if (wce_flag == SD_CACHE_ENABLE) 8497 mode_caching_page->wce = 1; 8498 else if (wce_flag == SD_CACHE_DISABLE) 8499 mode_caching_page->wce = 0; 8500 8501 /* 8502 * Save the page if the mode sense says the 8503 * drive supports it. 8504 */ 8505 save_pg = mode_caching_page->mode_page.ps ? 8506 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8507 8508 /* Clear reserved bits before mode select. */ 8509 mode_caching_page->mode_page.ps = 0; 8510 8511 /* 8512 * Clear out mode header for mode select. 8513 * The rest of the retrieved page will be reused. 8514 */ 8515 bzero(header, hdrlen); 8516 8517 if (un->un_f_cfg_is_atapi == TRUE) { 8518 mhp = (struct mode_header_grp2 *)header; 8519 mhp->bdesc_length_hi = bd_len >> 8; 8520 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8521 } else { 8522 ((struct mode_header *)header)->bdesc_length = bd_len; 8523 } 8524 8525 /* Issue mode select to change the cache settings */ 8526 if (un->un_f_cfg_is_atapi == TRUE) { 8527 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8528 sbuflen, save_pg, SD_PATH_DIRECT); 8529 } else { 8530 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8531 sbuflen, save_pg, SD_PATH_DIRECT); 8532 } 8533 } 8534 8535 kmem_free(header, buflen); 8536 return (rval); 8537 } 8538 8539 8540 /* 8541 * Function: sd_get_write_cache_enabled() 8542 * 8543 * Description: This routine is the driver entry point for determining if 8544 * write caching is enabled. It examines the WCE (write cache 8545 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8546 * 8547 * Arguments: un - driver soft state (unit) structure 8548 * is_enabled - pointer to int where write cache enabled state 8549 * is returned (non-zero -> write cache enabled) 8550 * 8551 * 8552 * Return Code: EIO 8553 * code returned by sd_send_scsi_MODE_SENSE 8554 * 8555 * Context: Kernel Thread 8556 * 8557 * NOTE: If ioctl is added to disable write cache, this sequence should 8558 * be followed so that no locking is required for accesses to 8559 * un->un_f_write_cache_enabled: 8560 * do mode select to clear wce 8561 * do synchronize cache to flush cache 8562 * set un->un_f_write_cache_enabled = FALSE 8563 * 8564 * Conversely, an ioctl to enable the write cache should be done 8565 * in this order: 8566 * set un->un_f_write_cache_enabled = TRUE 8567 * do mode select to set wce 8568 */ 8569 8570 static int 8571 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8572 { 8573 struct mode_caching *mode_caching_page; 8574 uchar_t *header; 8575 size_t buflen; 8576 int hdrlen; 8577 int bd_len; 8578 int rval = 0; 8579 8580 ASSERT(un != NULL); 8581 ASSERT(is_enabled != NULL); 8582 8583 /* in case of error, flag as enabled */ 8584 *is_enabled = TRUE; 8585 8586 /* 8587 * Do a test unit ready, otherwise a mode sense may not work if this 8588 * is the first command sent to the device after boot. 8589 */ 8590 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8591 8592 if (un->un_f_cfg_is_atapi == TRUE) { 8593 hdrlen = MODE_HEADER_LENGTH_GRP2; 8594 } else { 8595 hdrlen = MODE_HEADER_LENGTH; 8596 } 8597 8598 /* 8599 * Allocate memory for the retrieved mode page and its headers. Set 8600 * a pointer to the page itself. 8601 */ 8602 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8603 header = kmem_zalloc(buflen, KM_SLEEP); 8604 8605 /* Get the information from the device. */ 8606 if (un->un_f_cfg_is_atapi == TRUE) { 8607 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8608 MODEPAGE_CACHING, SD_PATH_DIRECT); 8609 } else { 8610 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8611 MODEPAGE_CACHING, SD_PATH_DIRECT); 8612 } 8613 if (rval != 0) { 8614 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8615 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8616 kmem_free(header, buflen); 8617 return (rval); 8618 } 8619 8620 /* 8621 * Determine size of Block Descriptors in order to locate 8622 * the mode page data. ATAPI devices return 0, SCSI devices 8623 * should return MODE_BLK_DESC_LENGTH. 8624 */ 8625 if (un->un_f_cfg_is_atapi == TRUE) { 8626 struct mode_header_grp2 *mhp; 8627 mhp = (struct mode_header_grp2 *)header; 8628 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8629 } else { 8630 bd_len = ((struct mode_header *)header)->bdesc_length; 8631 } 8632 8633 if (bd_len > MODE_BLK_DESC_LENGTH) { 8634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8635 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8636 "block descriptor length\n"); 8637 kmem_free(header, buflen); 8638 return (EIO); 8639 } 8640 8641 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8642 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8643 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8644 " caching page code mismatch %d\n", 8645 mode_caching_page->mode_page.code); 8646 kmem_free(header, buflen); 8647 return (EIO); 8648 } 8649 *is_enabled = mode_caching_page->wce; 8650 8651 kmem_free(header, buflen); 8652 return (0); 8653 } 8654 8655 /* 8656 * Function: sd_get_nv_sup() 8657 * 8658 * Description: This routine is the driver entry point for 8659 * determining whether non-volatile cache is supported. This 8660 * determination process works as follows: 8661 * 8662 * 1. sd first queries sd.conf on whether 8663 * suppress_cache_flush bit is set for this device. 8664 * 8665 * 2. if not there, then queries the internal disk table. 8666 * 8667 * 3. if either sd.conf or internal disk table specifies 8668 * cache flush be suppressed, we don't bother checking 8669 * NV_SUP bit. 8670 * 8671 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8672 * the optional INQUIRY VPD page 0x86. If the device 8673 * supports VPD page 0x86, sd examines the NV_SUP 8674 * (non-volatile cache support) bit in the INQUIRY VPD page 8675 * 0x86: 8676 * o If NV_SUP bit is set, sd assumes the device has a 8677 * non-volatile cache and set the 8678 * un_f_sync_nv_supported to TRUE. 8679 * o Otherwise cache is not non-volatile, 8680 * un_f_sync_nv_supported is set to FALSE. 8681 * 8682 * Arguments: un - driver soft state (unit) structure 8683 * 8684 * Return Code: 8685 * 8686 * Context: Kernel Thread 8687 */ 8688 8689 static void 8690 sd_get_nv_sup(struct sd_lun *un) 8691 { 8692 int rval = 0; 8693 uchar_t *inq86 = NULL; 8694 size_t inq86_len = MAX_INQUIRY_SIZE; 8695 size_t inq86_resid = 0; 8696 struct dk_callback *dkc; 8697 8698 ASSERT(un != NULL); 8699 8700 mutex_enter(SD_MUTEX(un)); 8701 8702 /* 8703 * Be conservative on the device's support of 8704 * SYNC_NV bit: un_f_sync_nv_supported is 8705 * initialized to be false. 8706 */ 8707 un->un_f_sync_nv_supported = FALSE; 8708 8709 /* 8710 * If either sd.conf or internal disk table 8711 * specifies cache flush be suppressed, then 8712 * we don't bother checking NV_SUP bit. 8713 */ 8714 if (un->un_f_suppress_cache_flush == TRUE) { 8715 mutex_exit(SD_MUTEX(un)); 8716 return; 8717 } 8718 8719 if (sd_check_vpd_page_support(un) == 0 && 8720 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8721 mutex_exit(SD_MUTEX(un)); 8722 /* collect page 86 data if available */ 8723 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8724 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8725 0x01, 0x86, &inq86_resid); 8726 8727 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8728 SD_TRACE(SD_LOG_COMMON, un, 8729 "sd_get_nv_sup: \ 8730 successfully get VPD page: %x \ 8731 PAGE LENGTH: %x BYTE 6: %x\n", 8732 inq86[1], inq86[3], inq86[6]); 8733 8734 mutex_enter(SD_MUTEX(un)); 8735 /* 8736 * check the value of NV_SUP bit: only if the device 8737 * reports NV_SUP bit to be 1, the 8738 * un_f_sync_nv_supported bit will be set to true. 8739 */ 8740 if (inq86[6] & SD_VPD_NV_SUP) { 8741 un->un_f_sync_nv_supported = TRUE; 8742 } 8743 mutex_exit(SD_MUTEX(un)); 8744 } 8745 kmem_free(inq86, inq86_len); 8746 } else { 8747 mutex_exit(SD_MUTEX(un)); 8748 } 8749 8750 /* 8751 * Send a SYNC CACHE command to check whether 8752 * SYNC_NV bit is supported. This command should have 8753 * un_f_sync_nv_supported set to correct value. 8754 */ 8755 mutex_enter(SD_MUTEX(un)); 8756 if (un->un_f_sync_nv_supported) { 8757 mutex_exit(SD_MUTEX(un)); 8758 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8759 dkc->dkc_flag = FLUSH_VOLATILE; 8760 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8761 8762 /* 8763 * Send a TEST UNIT READY command to the device. This should 8764 * clear any outstanding UNIT ATTENTION that may be present. 8765 */ 8766 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8767 8768 kmem_free(dkc, sizeof (struct dk_callback)); 8769 } else { 8770 mutex_exit(SD_MUTEX(un)); 8771 } 8772 8773 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8774 un_f_suppress_cache_flush is set to %d\n", 8775 un->un_f_suppress_cache_flush); 8776 } 8777 8778 /* 8779 * Function: sd_make_device 8780 * 8781 * Description: Utility routine to return the Solaris device number from 8782 * the data in the device's dev_info structure. 8783 * 8784 * Return Code: The Solaris device number 8785 * 8786 * Context: Any 8787 */ 8788 8789 static dev_t 8790 sd_make_device(dev_info_t *devi) 8791 { 8792 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8793 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8794 } 8795 8796 8797 /* 8798 * Function: sd_pm_entry 8799 * 8800 * Description: Called at the start of a new command to manage power 8801 * and busy status of a device. This includes determining whether 8802 * the current power state of the device is sufficient for 8803 * performing the command or whether it must be changed. 8804 * The PM framework is notified appropriately. 8805 * Only with a return status of DDI_SUCCESS will the 8806 * component be busy to the framework. 8807 * 8808 * All callers of sd_pm_entry must check the return status 8809 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8810 * of DDI_FAILURE indicates the device failed to power up. 8811 * In this case un_pm_count has been adjusted so the result 8812 * on exit is still powered down, ie. count is less than 0. 8813 * Calling sd_pm_exit with this count value hits an ASSERT. 8814 * 8815 * Return Code: DDI_SUCCESS or DDI_FAILURE 8816 * 8817 * Context: Kernel thread context. 8818 */ 8819 8820 static int 8821 sd_pm_entry(struct sd_lun *un) 8822 { 8823 int return_status = DDI_SUCCESS; 8824 8825 ASSERT(!mutex_owned(SD_MUTEX(un))); 8826 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8827 8828 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8829 8830 if (un->un_f_pm_is_enabled == FALSE) { 8831 SD_TRACE(SD_LOG_IO_PM, un, 8832 "sd_pm_entry: exiting, PM not enabled\n"); 8833 return (return_status); 8834 } 8835 8836 /* 8837 * Just increment a counter if PM is enabled. On the transition from 8838 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8839 * the count with each IO and mark the device as idle when the count 8840 * hits 0. 8841 * 8842 * If the count is less than 0 the device is powered down. If a powered 8843 * down device is successfully powered up then the count must be 8844 * incremented to reflect the power up. Note that it'll get incremented 8845 * a second time to become busy. 8846 * 8847 * Because the following has the potential to change the device state 8848 * and must release the un_pm_mutex to do so, only one thread can be 8849 * allowed through at a time. 8850 */ 8851 8852 mutex_enter(&un->un_pm_mutex); 8853 while (un->un_pm_busy == TRUE) { 8854 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8855 } 8856 un->un_pm_busy = TRUE; 8857 8858 if (un->un_pm_count < 1) { 8859 8860 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8861 8862 /* 8863 * Indicate we are now busy so the framework won't attempt to 8864 * power down the device. This call will only fail if either 8865 * we passed a bad component number or the device has no 8866 * components. Neither of these should ever happen. 8867 */ 8868 mutex_exit(&un->un_pm_mutex); 8869 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8870 ASSERT(return_status == DDI_SUCCESS); 8871 8872 mutex_enter(&un->un_pm_mutex); 8873 8874 if (un->un_pm_count < 0) { 8875 mutex_exit(&un->un_pm_mutex); 8876 8877 SD_TRACE(SD_LOG_IO_PM, un, 8878 "sd_pm_entry: power up component\n"); 8879 8880 /* 8881 * pm_raise_power will cause sdpower to be called 8882 * which brings the device power level to the 8883 * desired state, ON in this case. If successful, 8884 * un_pm_count and un_power_level will be updated 8885 * appropriately. 8886 */ 8887 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8888 SD_SPINDLE_ON); 8889 8890 mutex_enter(&un->un_pm_mutex); 8891 8892 if (return_status != DDI_SUCCESS) { 8893 /* 8894 * Power up failed. 8895 * Idle the device and adjust the count 8896 * so the result on exit is that we're 8897 * still powered down, ie. count is less than 0. 8898 */ 8899 SD_TRACE(SD_LOG_IO_PM, un, 8900 "sd_pm_entry: power up failed," 8901 " idle the component\n"); 8902 8903 (void) pm_idle_component(SD_DEVINFO(un), 0); 8904 un->un_pm_count--; 8905 } else { 8906 /* 8907 * Device is powered up, verify the 8908 * count is non-negative. 8909 * This is debug only. 8910 */ 8911 ASSERT(un->un_pm_count == 0); 8912 } 8913 } 8914 8915 if (return_status == DDI_SUCCESS) { 8916 /* 8917 * For performance, now that the device has been tagged 8918 * as busy, and it's known to be powered up, update the 8919 * chain types to use jump tables that do not include 8920 * pm. This significantly lowers the overhead and 8921 * therefore improves performance. 8922 */ 8923 8924 mutex_exit(&un->un_pm_mutex); 8925 mutex_enter(SD_MUTEX(un)); 8926 SD_TRACE(SD_LOG_IO_PM, un, 8927 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8928 un->un_uscsi_chain_type); 8929 8930 if (un->un_f_non_devbsize_supported) { 8931 un->un_buf_chain_type = 8932 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8933 } else { 8934 un->un_buf_chain_type = 8935 SD_CHAIN_INFO_DISK_NO_PM; 8936 } 8937 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8938 8939 SD_TRACE(SD_LOG_IO_PM, un, 8940 " changed uscsi_chain_type to %d\n", 8941 un->un_uscsi_chain_type); 8942 mutex_exit(SD_MUTEX(un)); 8943 mutex_enter(&un->un_pm_mutex); 8944 8945 if (un->un_pm_idle_timeid == NULL) { 8946 /* 300 ms. */ 8947 un->un_pm_idle_timeid = 8948 timeout(sd_pm_idletimeout_handler, un, 8949 (drv_usectohz((clock_t)300000))); 8950 /* 8951 * Include an extra call to busy which keeps the 8952 * device busy with-respect-to the PM layer 8953 * until the timer fires, at which time it'll 8954 * get the extra idle call. 8955 */ 8956 (void) pm_busy_component(SD_DEVINFO(un), 0); 8957 } 8958 } 8959 } 8960 un->un_pm_busy = FALSE; 8961 /* Next... */ 8962 cv_signal(&un->un_pm_busy_cv); 8963 8964 un->un_pm_count++; 8965 8966 SD_TRACE(SD_LOG_IO_PM, un, 8967 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8968 8969 mutex_exit(&un->un_pm_mutex); 8970 8971 return (return_status); 8972 } 8973 8974 8975 /* 8976 * Function: sd_pm_exit 8977 * 8978 * Description: Called at the completion of a command to manage busy 8979 * status for the device. If the device becomes idle the 8980 * PM framework is notified. 8981 * 8982 * Context: Kernel thread context 8983 */ 8984 8985 static void 8986 sd_pm_exit(struct sd_lun *un) 8987 { 8988 ASSERT(!mutex_owned(SD_MUTEX(un))); 8989 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8990 8991 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8992 8993 /* 8994 * After attach the following flag is only read, so don't 8995 * take the penalty of acquiring a mutex for it. 8996 */ 8997 if (un->un_f_pm_is_enabled == TRUE) { 8998 8999 mutex_enter(&un->un_pm_mutex); 9000 un->un_pm_count--; 9001 9002 SD_TRACE(SD_LOG_IO_PM, un, 9003 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9004 9005 ASSERT(un->un_pm_count >= 0); 9006 if (un->un_pm_count == 0) { 9007 mutex_exit(&un->un_pm_mutex); 9008 9009 SD_TRACE(SD_LOG_IO_PM, un, 9010 "sd_pm_exit: idle component\n"); 9011 9012 (void) pm_idle_component(SD_DEVINFO(un), 0); 9013 9014 } else { 9015 mutex_exit(&un->un_pm_mutex); 9016 } 9017 } 9018 9019 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9020 } 9021 9022 9023 /* 9024 * Function: sdopen 9025 * 9026 * Description: Driver's open(9e) entry point function. 9027 * 9028 * Arguments: dev_i - pointer to device number 9029 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9030 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9031 * cred_p - user credential pointer 9032 * 9033 * Return Code: EINVAL 9034 * ENXIO 9035 * EIO 9036 * EROFS 9037 * EBUSY 9038 * 9039 * Context: Kernel thread context 9040 */ 9041 /* ARGSUSED */ 9042 static int 9043 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9044 { 9045 struct sd_lun *un; 9046 int nodelay; 9047 int part; 9048 uint64_t partmask; 9049 int instance; 9050 dev_t dev; 9051 int rval = EIO; 9052 diskaddr_t nblks = 0; 9053 diskaddr_t label_cap; 9054 9055 /* Validate the open type */ 9056 if (otyp >= OTYPCNT) { 9057 return (EINVAL); 9058 } 9059 9060 dev = *dev_p; 9061 instance = SDUNIT(dev); 9062 mutex_enter(&sd_detach_mutex); 9063 9064 /* 9065 * Fail the open if there is no softstate for the instance, or 9066 * if another thread somewhere is trying to detach the instance. 9067 */ 9068 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9069 (un->un_detach_count != 0)) { 9070 mutex_exit(&sd_detach_mutex); 9071 /* 9072 * The probe cache only needs to be cleared when open (9e) fails 9073 * with ENXIO (4238046). 9074 */ 9075 /* 9076 * un-conditionally clearing probe cache is ok with 9077 * separate sd/ssd binaries 9078 * x86 platform can be an issue with both parallel 9079 * and fibre in 1 binary 9080 */ 9081 sd_scsi_clear_probe_cache(); 9082 return (ENXIO); 9083 } 9084 9085 /* 9086 * The un_layer_count is to prevent another thread in specfs from 9087 * trying to detach the instance, which can happen when we are 9088 * called from a higher-layer driver instead of thru specfs. 9089 * This will not be needed when DDI provides a layered driver 9090 * interface that allows specfs to know that an instance is in 9091 * use by a layered driver & should not be detached. 9092 * 9093 * Note: the semantics for layered driver opens are exactly one 9094 * close for every open. 9095 */ 9096 if (otyp == OTYP_LYR) { 9097 un->un_layer_count++; 9098 } 9099 9100 /* 9101 * Keep a count of the current # of opens in progress. This is because 9102 * some layered drivers try to call us as a regular open. This can 9103 * cause problems that we cannot prevent, however by keeping this count 9104 * we can at least keep our open and detach routines from racing against 9105 * each other under such conditions. 9106 */ 9107 un->un_opens_in_progress++; 9108 mutex_exit(&sd_detach_mutex); 9109 9110 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9111 part = SDPART(dev); 9112 partmask = 1 << part; 9113 9114 /* 9115 * We use a semaphore here in order to serialize 9116 * open and close requests on the device. 9117 */ 9118 sema_p(&un->un_semoclose); 9119 9120 mutex_enter(SD_MUTEX(un)); 9121 9122 /* 9123 * All device accesses go thru sdstrategy() where we check 9124 * on suspend status but there could be a scsi_poll command, 9125 * which bypasses sdstrategy(), so we need to check pm 9126 * status. 9127 */ 9128 9129 if (!nodelay) { 9130 while ((un->un_state == SD_STATE_SUSPENDED) || 9131 (un->un_state == SD_STATE_PM_CHANGING)) { 9132 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9133 } 9134 9135 mutex_exit(SD_MUTEX(un)); 9136 if (sd_pm_entry(un) != DDI_SUCCESS) { 9137 rval = EIO; 9138 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9139 "sdopen: sd_pm_entry failed\n"); 9140 goto open_failed_with_pm; 9141 } 9142 mutex_enter(SD_MUTEX(un)); 9143 } 9144 9145 /* check for previous exclusive open */ 9146 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9147 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9148 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9149 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9150 9151 if (un->un_exclopen & (partmask)) { 9152 goto excl_open_fail; 9153 } 9154 9155 if (flag & FEXCL) { 9156 int i; 9157 if (un->un_ocmap.lyropen[part]) { 9158 goto excl_open_fail; 9159 } 9160 for (i = 0; i < (OTYPCNT - 1); i++) { 9161 if (un->un_ocmap.regopen[i] & (partmask)) { 9162 goto excl_open_fail; 9163 } 9164 } 9165 } 9166 9167 /* 9168 * Check the write permission if this is a removable media device, 9169 * NDELAY has not been set, and writable permission is requested. 9170 * 9171 * Note: If NDELAY was set and this is write-protected media the WRITE 9172 * attempt will fail with EIO as part of the I/O processing. This is a 9173 * more permissive implementation that allows the open to succeed and 9174 * WRITE attempts to fail when appropriate. 9175 */ 9176 if (un->un_f_chk_wp_open) { 9177 if ((flag & FWRITE) && (!nodelay)) { 9178 mutex_exit(SD_MUTEX(un)); 9179 /* 9180 * Defer the check for write permission on writable 9181 * DVD drive till sdstrategy and will not fail open even 9182 * if FWRITE is set as the device can be writable 9183 * depending upon the media and the media can change 9184 * after the call to open(). 9185 */ 9186 if (un->un_f_dvdram_writable_device == FALSE) { 9187 if (ISCD(un) || sr_check_wp(dev)) { 9188 rval = EROFS; 9189 mutex_enter(SD_MUTEX(un)); 9190 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9191 "write to cd or write protected media\n"); 9192 goto open_fail; 9193 } 9194 } 9195 mutex_enter(SD_MUTEX(un)); 9196 } 9197 } 9198 9199 /* 9200 * If opening in NDELAY/NONBLOCK mode, just return. 9201 * Check if disk is ready and has a valid geometry later. 9202 */ 9203 if (!nodelay) { 9204 mutex_exit(SD_MUTEX(un)); 9205 rval = sd_ready_and_valid(un); 9206 mutex_enter(SD_MUTEX(un)); 9207 /* 9208 * Fail if device is not ready or if the number of disk 9209 * blocks is zero or negative for non CD devices. 9210 */ 9211 9212 nblks = 0; 9213 9214 if (rval == SD_READY_VALID && (!ISCD(un))) { 9215 /* if cmlb_partinfo fails, nblks remains 0 */ 9216 mutex_exit(SD_MUTEX(un)); 9217 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9218 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9219 mutex_enter(SD_MUTEX(un)); 9220 } 9221 9222 if ((rval != SD_READY_VALID) || 9223 (!ISCD(un) && nblks <= 0)) { 9224 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9225 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9226 "device not ready or invalid disk block value\n"); 9227 goto open_fail; 9228 } 9229 #if defined(__i386) || defined(__amd64) 9230 } else { 9231 uchar_t *cp; 9232 /* 9233 * x86 requires special nodelay handling, so that p0 is 9234 * always defined and accessible. 9235 * Invalidate geometry only if device is not already open. 9236 */ 9237 cp = &un->un_ocmap.chkd[0]; 9238 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9239 if (*cp != (uchar_t)0) { 9240 break; 9241 } 9242 cp++; 9243 } 9244 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9245 mutex_exit(SD_MUTEX(un)); 9246 cmlb_invalidate(un->un_cmlbhandle, 9247 (void *)SD_PATH_DIRECT); 9248 mutex_enter(SD_MUTEX(un)); 9249 } 9250 9251 #endif 9252 } 9253 9254 if (otyp == OTYP_LYR) { 9255 un->un_ocmap.lyropen[part]++; 9256 } else { 9257 un->un_ocmap.regopen[otyp] |= partmask; 9258 } 9259 9260 /* Set up open and exclusive open flags */ 9261 if (flag & FEXCL) { 9262 un->un_exclopen |= (partmask); 9263 } 9264 9265 /* 9266 * If the lun is EFI labeled and lun capacity is greater than the 9267 * capacity contained in the label, log a sys-event to notify the 9268 * interested module. 9269 * To avoid an infinite loop of logging sys-event, we only log the 9270 * event when the lun is not opened in NDELAY mode. The event handler 9271 * should open the lun in NDELAY mode. 9272 */ 9273 if (!(flag & FNDELAY)) { 9274 mutex_exit(SD_MUTEX(un)); 9275 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9276 (void*)SD_PATH_DIRECT) == 0) { 9277 mutex_enter(SD_MUTEX(un)); 9278 if (un->un_f_blockcount_is_valid && 9279 un->un_blockcount > label_cap) { 9280 mutex_exit(SD_MUTEX(un)); 9281 sd_log_lun_expansion_event(un, 9282 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9283 mutex_enter(SD_MUTEX(un)); 9284 } 9285 } else { 9286 mutex_enter(SD_MUTEX(un)); 9287 } 9288 } 9289 9290 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9291 "open of part %d type %d\n", part, otyp); 9292 9293 mutex_exit(SD_MUTEX(un)); 9294 if (!nodelay) { 9295 sd_pm_exit(un); 9296 } 9297 9298 sema_v(&un->un_semoclose); 9299 9300 mutex_enter(&sd_detach_mutex); 9301 un->un_opens_in_progress--; 9302 mutex_exit(&sd_detach_mutex); 9303 9304 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9305 return (DDI_SUCCESS); 9306 9307 excl_open_fail: 9308 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9309 rval = EBUSY; 9310 9311 open_fail: 9312 mutex_exit(SD_MUTEX(un)); 9313 9314 /* 9315 * On a failed open we must exit the pm management. 9316 */ 9317 if (!nodelay) { 9318 sd_pm_exit(un); 9319 } 9320 open_failed_with_pm: 9321 sema_v(&un->un_semoclose); 9322 9323 mutex_enter(&sd_detach_mutex); 9324 un->un_opens_in_progress--; 9325 if (otyp == OTYP_LYR) { 9326 un->un_layer_count--; 9327 } 9328 mutex_exit(&sd_detach_mutex); 9329 9330 return (rval); 9331 } 9332 9333 9334 /* 9335 * Function: sdclose 9336 * 9337 * Description: Driver's close(9e) entry point function. 9338 * 9339 * Arguments: dev - device number 9340 * flag - file status flag, informational only 9341 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9342 * cred_p - user credential pointer 9343 * 9344 * Return Code: ENXIO 9345 * 9346 * Context: Kernel thread context 9347 */ 9348 /* ARGSUSED */ 9349 static int 9350 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9351 { 9352 struct sd_lun *un; 9353 uchar_t *cp; 9354 int part; 9355 int nodelay; 9356 int rval = 0; 9357 9358 /* Validate the open type */ 9359 if (otyp >= OTYPCNT) { 9360 return (ENXIO); 9361 } 9362 9363 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9364 return (ENXIO); 9365 } 9366 9367 part = SDPART(dev); 9368 nodelay = flag & (FNDELAY | FNONBLOCK); 9369 9370 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9371 "sdclose: close of part %d type %d\n", part, otyp); 9372 9373 /* 9374 * We use a semaphore here in order to serialize 9375 * open and close requests on the device. 9376 */ 9377 sema_p(&un->un_semoclose); 9378 9379 mutex_enter(SD_MUTEX(un)); 9380 9381 /* Don't proceed if power is being changed. */ 9382 while (un->un_state == SD_STATE_PM_CHANGING) { 9383 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9384 } 9385 9386 if (un->un_exclopen & (1 << part)) { 9387 un->un_exclopen &= ~(1 << part); 9388 } 9389 9390 /* Update the open partition map */ 9391 if (otyp == OTYP_LYR) { 9392 un->un_ocmap.lyropen[part] -= 1; 9393 } else { 9394 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9395 } 9396 9397 cp = &un->un_ocmap.chkd[0]; 9398 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9399 if (*cp != NULL) { 9400 break; 9401 } 9402 cp++; 9403 } 9404 9405 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9406 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9407 9408 /* 9409 * We avoid persistance upon the last close, and set 9410 * the throttle back to the maximum. 9411 */ 9412 un->un_throttle = un->un_saved_throttle; 9413 9414 if (un->un_state == SD_STATE_OFFLINE) { 9415 if (un->un_f_is_fibre == FALSE) { 9416 scsi_log(SD_DEVINFO(un), sd_label, 9417 CE_WARN, "offline\n"); 9418 } 9419 mutex_exit(SD_MUTEX(un)); 9420 cmlb_invalidate(un->un_cmlbhandle, 9421 (void *)SD_PATH_DIRECT); 9422 mutex_enter(SD_MUTEX(un)); 9423 9424 } else { 9425 /* 9426 * Flush any outstanding writes in NVRAM cache. 9427 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9428 * cmd, it may not work for non-Pluto devices. 9429 * SYNCHRONIZE CACHE is not required for removables, 9430 * except DVD-RAM drives. 9431 * 9432 * Also note: because SYNCHRONIZE CACHE is currently 9433 * the only command issued here that requires the 9434 * drive be powered up, only do the power up before 9435 * sending the Sync Cache command. If additional 9436 * commands are added which require a powered up 9437 * drive, the following sequence may have to change. 9438 * 9439 * And finally, note that parallel SCSI on SPARC 9440 * only issues a Sync Cache to DVD-RAM, a newly 9441 * supported device. 9442 */ 9443 #if defined(__i386) || defined(__amd64) 9444 if (un->un_f_sync_cache_supported || 9445 un->un_f_dvdram_writable_device == TRUE) { 9446 #else 9447 if (un->un_f_dvdram_writable_device == TRUE) { 9448 #endif 9449 mutex_exit(SD_MUTEX(un)); 9450 if (sd_pm_entry(un) == DDI_SUCCESS) { 9451 rval = 9452 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9453 NULL); 9454 /* ignore error if not supported */ 9455 if (rval == ENOTSUP) { 9456 rval = 0; 9457 } else if (rval != 0) { 9458 rval = EIO; 9459 } 9460 sd_pm_exit(un); 9461 } else { 9462 rval = EIO; 9463 } 9464 mutex_enter(SD_MUTEX(un)); 9465 } 9466 9467 /* 9468 * For devices which supports DOOR_LOCK, send an ALLOW 9469 * MEDIA REMOVAL command, but don't get upset if it 9470 * fails. We need to raise the power of the drive before 9471 * we can call sd_send_scsi_DOORLOCK() 9472 */ 9473 if (un->un_f_doorlock_supported) { 9474 mutex_exit(SD_MUTEX(un)); 9475 if (sd_pm_entry(un) == DDI_SUCCESS) { 9476 rval = sd_send_scsi_DOORLOCK(un, 9477 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9478 9479 sd_pm_exit(un); 9480 if (ISCD(un) && (rval != 0) && 9481 (nodelay != 0)) { 9482 rval = ENXIO; 9483 } 9484 } else { 9485 rval = EIO; 9486 } 9487 mutex_enter(SD_MUTEX(un)); 9488 } 9489 9490 /* 9491 * If a device has removable media, invalidate all 9492 * parameters related to media, such as geometry, 9493 * blocksize, and blockcount. 9494 */ 9495 if (un->un_f_has_removable_media) { 9496 sr_ejected(un); 9497 } 9498 9499 /* 9500 * Destroy the cache (if it exists) which was 9501 * allocated for the write maps since this is 9502 * the last close for this media. 9503 */ 9504 if (un->un_wm_cache) { 9505 /* 9506 * Check if there are pending commands. 9507 * and if there are give a warning and 9508 * do not destroy the cache. 9509 */ 9510 if (un->un_ncmds_in_driver > 0) { 9511 scsi_log(SD_DEVINFO(un), 9512 sd_label, CE_WARN, 9513 "Unable to clean up memory " 9514 "because of pending I/O\n"); 9515 } else { 9516 kmem_cache_destroy( 9517 un->un_wm_cache); 9518 un->un_wm_cache = NULL; 9519 } 9520 } 9521 } 9522 } 9523 9524 mutex_exit(SD_MUTEX(un)); 9525 sema_v(&un->un_semoclose); 9526 9527 if (otyp == OTYP_LYR) { 9528 mutex_enter(&sd_detach_mutex); 9529 /* 9530 * The detach routine may run when the layer count 9531 * drops to zero. 9532 */ 9533 un->un_layer_count--; 9534 mutex_exit(&sd_detach_mutex); 9535 } 9536 9537 return (rval); 9538 } 9539 9540 9541 /* 9542 * Function: sd_ready_and_valid 9543 * 9544 * Description: Test if device is ready and has a valid geometry. 9545 * 9546 * Arguments: dev - device number 9547 * un - driver soft state (unit) structure 9548 * 9549 * Return Code: SD_READY_VALID ready and valid label 9550 * SD_NOT_READY_VALID not ready, no label 9551 * SD_RESERVED_BY_OTHERS reservation conflict 9552 * 9553 * Context: Never called at interrupt context. 9554 */ 9555 9556 static int 9557 sd_ready_and_valid(struct sd_lun *un) 9558 { 9559 struct sd_errstats *stp; 9560 uint64_t capacity; 9561 uint_t lbasize; 9562 int rval = SD_READY_VALID; 9563 char name_str[48]; 9564 int is_valid; 9565 9566 ASSERT(un != NULL); 9567 ASSERT(!mutex_owned(SD_MUTEX(un))); 9568 9569 mutex_enter(SD_MUTEX(un)); 9570 /* 9571 * If a device has removable media, we must check if media is 9572 * ready when checking if this device is ready and valid. 9573 */ 9574 if (un->un_f_has_removable_media) { 9575 mutex_exit(SD_MUTEX(un)); 9576 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9577 rval = SD_NOT_READY_VALID; 9578 mutex_enter(SD_MUTEX(un)); 9579 goto done; 9580 } 9581 9582 is_valid = SD_IS_VALID_LABEL(un); 9583 mutex_enter(SD_MUTEX(un)); 9584 if (!is_valid || 9585 (un->un_f_blockcount_is_valid == FALSE) || 9586 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9587 9588 /* capacity has to be read every open. */ 9589 mutex_exit(SD_MUTEX(un)); 9590 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9591 &lbasize, SD_PATH_DIRECT) != 0) { 9592 cmlb_invalidate(un->un_cmlbhandle, 9593 (void *)SD_PATH_DIRECT); 9594 mutex_enter(SD_MUTEX(un)); 9595 rval = SD_NOT_READY_VALID; 9596 goto done; 9597 } else { 9598 mutex_enter(SD_MUTEX(un)); 9599 sd_update_block_info(un, lbasize, capacity); 9600 } 9601 } 9602 9603 /* 9604 * Check if the media in the device is writable or not. 9605 */ 9606 if (!is_valid && ISCD(un)) { 9607 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9608 } 9609 9610 } else { 9611 /* 9612 * Do a test unit ready to clear any unit attention from non-cd 9613 * devices. 9614 */ 9615 mutex_exit(SD_MUTEX(un)); 9616 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9617 mutex_enter(SD_MUTEX(un)); 9618 } 9619 9620 9621 /* 9622 * If this is a non 512 block device, allocate space for 9623 * the wmap cache. This is being done here since every time 9624 * a media is changed this routine will be called and the 9625 * block size is a function of media rather than device. 9626 */ 9627 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9628 if (!(un->un_wm_cache)) { 9629 (void) snprintf(name_str, sizeof (name_str), 9630 "%s%d_cache", 9631 ddi_driver_name(SD_DEVINFO(un)), 9632 ddi_get_instance(SD_DEVINFO(un))); 9633 un->un_wm_cache = kmem_cache_create( 9634 name_str, sizeof (struct sd_w_map), 9635 8, sd_wm_cache_constructor, 9636 sd_wm_cache_destructor, NULL, 9637 (void *)un, NULL, 0); 9638 if (!(un->un_wm_cache)) { 9639 rval = ENOMEM; 9640 goto done; 9641 } 9642 } 9643 } 9644 9645 if (un->un_state == SD_STATE_NORMAL) { 9646 /* 9647 * If the target is not yet ready here (defined by a TUR 9648 * failure), invalidate the geometry and print an 'offline' 9649 * message. This is a legacy message, as the state of the 9650 * target is not actually changed to SD_STATE_OFFLINE. 9651 * 9652 * If the TUR fails for EACCES (Reservation Conflict), 9653 * SD_RESERVED_BY_OTHERS will be returned to indicate 9654 * reservation conflict. If the TUR fails for other 9655 * reasons, SD_NOT_READY_VALID will be returned. 9656 */ 9657 int err; 9658 9659 mutex_exit(SD_MUTEX(un)); 9660 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9661 mutex_enter(SD_MUTEX(un)); 9662 9663 if (err != 0) { 9664 mutex_exit(SD_MUTEX(un)); 9665 cmlb_invalidate(un->un_cmlbhandle, 9666 (void *)SD_PATH_DIRECT); 9667 mutex_enter(SD_MUTEX(un)); 9668 if (err == EACCES) { 9669 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9670 "reservation conflict\n"); 9671 rval = SD_RESERVED_BY_OTHERS; 9672 } else { 9673 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9674 "drive offline\n"); 9675 rval = SD_NOT_READY_VALID; 9676 } 9677 goto done; 9678 } 9679 } 9680 9681 if (un->un_f_format_in_progress == FALSE) { 9682 mutex_exit(SD_MUTEX(un)); 9683 if (cmlb_validate(un->un_cmlbhandle, 0, 9684 (void *)SD_PATH_DIRECT) != 0) { 9685 rval = SD_NOT_READY_VALID; 9686 mutex_enter(SD_MUTEX(un)); 9687 goto done; 9688 } 9689 if (un->un_f_pkstats_enabled) { 9690 sd_set_pstats(un); 9691 SD_TRACE(SD_LOG_IO_PARTITION, un, 9692 "sd_ready_and_valid: un:0x%p pstats created and " 9693 "set\n", un); 9694 } 9695 mutex_enter(SD_MUTEX(un)); 9696 } 9697 9698 /* 9699 * If this device supports DOOR_LOCK command, try and send 9700 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9701 * if it fails. For a CD, however, it is an error 9702 */ 9703 if (un->un_f_doorlock_supported) { 9704 mutex_exit(SD_MUTEX(un)); 9705 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9706 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9707 rval = SD_NOT_READY_VALID; 9708 mutex_enter(SD_MUTEX(un)); 9709 goto done; 9710 } 9711 mutex_enter(SD_MUTEX(un)); 9712 } 9713 9714 /* The state has changed, inform the media watch routines */ 9715 un->un_mediastate = DKIO_INSERTED; 9716 cv_broadcast(&un->un_state_cv); 9717 rval = SD_READY_VALID; 9718 9719 done: 9720 9721 /* 9722 * Initialize the capacity kstat value, if no media previously 9723 * (capacity kstat is 0) and a media has been inserted 9724 * (un_blockcount > 0). 9725 */ 9726 if (un->un_errstats != NULL) { 9727 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9728 if ((stp->sd_capacity.value.ui64 == 0) && 9729 (un->un_f_blockcount_is_valid == TRUE)) { 9730 stp->sd_capacity.value.ui64 = 9731 (uint64_t)((uint64_t)un->un_blockcount * 9732 un->un_sys_blocksize); 9733 } 9734 } 9735 9736 mutex_exit(SD_MUTEX(un)); 9737 return (rval); 9738 } 9739 9740 9741 /* 9742 * Function: sdmin 9743 * 9744 * Description: Routine to limit the size of a data transfer. Used in 9745 * conjunction with physio(9F). 9746 * 9747 * Arguments: bp - pointer to the indicated buf(9S) struct. 9748 * 9749 * Context: Kernel thread context. 9750 */ 9751 9752 static void 9753 sdmin(struct buf *bp) 9754 { 9755 struct sd_lun *un; 9756 int instance; 9757 9758 instance = SDUNIT(bp->b_edev); 9759 9760 un = ddi_get_soft_state(sd_state, instance); 9761 ASSERT(un != NULL); 9762 9763 if (bp->b_bcount > un->un_max_xfer_size) { 9764 bp->b_bcount = un->un_max_xfer_size; 9765 } 9766 } 9767 9768 9769 /* 9770 * Function: sdread 9771 * 9772 * Description: Driver's read(9e) entry point function. 9773 * 9774 * Arguments: dev - device number 9775 * uio - structure pointer describing where data is to be stored 9776 * in user's space 9777 * cred_p - user credential pointer 9778 * 9779 * Return Code: ENXIO 9780 * EIO 9781 * EINVAL 9782 * value returned by physio 9783 * 9784 * Context: Kernel thread context. 9785 */ 9786 /* ARGSUSED */ 9787 static int 9788 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9789 { 9790 struct sd_lun *un = NULL; 9791 int secmask; 9792 int err; 9793 9794 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9795 return (ENXIO); 9796 } 9797 9798 ASSERT(!mutex_owned(SD_MUTEX(un))); 9799 9800 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9801 mutex_enter(SD_MUTEX(un)); 9802 /* 9803 * Because the call to sd_ready_and_valid will issue I/O we 9804 * must wait here if either the device is suspended or 9805 * if it's power level is changing. 9806 */ 9807 while ((un->un_state == SD_STATE_SUSPENDED) || 9808 (un->un_state == SD_STATE_PM_CHANGING)) { 9809 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9810 } 9811 un->un_ncmds_in_driver++; 9812 mutex_exit(SD_MUTEX(un)); 9813 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9814 mutex_enter(SD_MUTEX(un)); 9815 un->un_ncmds_in_driver--; 9816 ASSERT(un->un_ncmds_in_driver >= 0); 9817 mutex_exit(SD_MUTEX(un)); 9818 return (EIO); 9819 } 9820 mutex_enter(SD_MUTEX(un)); 9821 un->un_ncmds_in_driver--; 9822 ASSERT(un->un_ncmds_in_driver >= 0); 9823 mutex_exit(SD_MUTEX(un)); 9824 } 9825 9826 /* 9827 * Read requests are restricted to multiples of the system block size. 9828 */ 9829 secmask = un->un_sys_blocksize - 1; 9830 9831 if (uio->uio_loffset & ((offset_t)(secmask))) { 9832 SD_ERROR(SD_LOG_READ_WRITE, un, 9833 "sdread: file offset not modulo %d\n", 9834 un->un_sys_blocksize); 9835 err = EINVAL; 9836 } else if (uio->uio_iov->iov_len & (secmask)) { 9837 SD_ERROR(SD_LOG_READ_WRITE, un, 9838 "sdread: transfer length not modulo %d\n", 9839 un->un_sys_blocksize); 9840 err = EINVAL; 9841 } else { 9842 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9843 } 9844 return (err); 9845 } 9846 9847 9848 /* 9849 * Function: sdwrite 9850 * 9851 * Description: Driver's write(9e) entry point function. 9852 * 9853 * Arguments: dev - device number 9854 * uio - structure pointer describing where data is stored in 9855 * user's space 9856 * cred_p - user credential pointer 9857 * 9858 * Return Code: ENXIO 9859 * EIO 9860 * EINVAL 9861 * value returned by physio 9862 * 9863 * Context: Kernel thread context. 9864 */ 9865 /* ARGSUSED */ 9866 static int 9867 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9868 { 9869 struct sd_lun *un = NULL; 9870 int secmask; 9871 int err; 9872 9873 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9874 return (ENXIO); 9875 } 9876 9877 ASSERT(!mutex_owned(SD_MUTEX(un))); 9878 9879 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9880 mutex_enter(SD_MUTEX(un)); 9881 /* 9882 * Because the call to sd_ready_and_valid will issue I/O we 9883 * must wait here if either the device is suspended or 9884 * if it's power level is changing. 9885 */ 9886 while ((un->un_state == SD_STATE_SUSPENDED) || 9887 (un->un_state == SD_STATE_PM_CHANGING)) { 9888 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9889 } 9890 un->un_ncmds_in_driver++; 9891 mutex_exit(SD_MUTEX(un)); 9892 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9893 mutex_enter(SD_MUTEX(un)); 9894 un->un_ncmds_in_driver--; 9895 ASSERT(un->un_ncmds_in_driver >= 0); 9896 mutex_exit(SD_MUTEX(un)); 9897 return (EIO); 9898 } 9899 mutex_enter(SD_MUTEX(un)); 9900 un->un_ncmds_in_driver--; 9901 ASSERT(un->un_ncmds_in_driver >= 0); 9902 mutex_exit(SD_MUTEX(un)); 9903 } 9904 9905 /* 9906 * Write requests are restricted to multiples of the system block size. 9907 */ 9908 secmask = un->un_sys_blocksize - 1; 9909 9910 if (uio->uio_loffset & ((offset_t)(secmask))) { 9911 SD_ERROR(SD_LOG_READ_WRITE, un, 9912 "sdwrite: file offset not modulo %d\n", 9913 un->un_sys_blocksize); 9914 err = EINVAL; 9915 } else if (uio->uio_iov->iov_len & (secmask)) { 9916 SD_ERROR(SD_LOG_READ_WRITE, un, 9917 "sdwrite: transfer length not modulo %d\n", 9918 un->un_sys_blocksize); 9919 err = EINVAL; 9920 } else { 9921 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9922 } 9923 return (err); 9924 } 9925 9926 9927 /* 9928 * Function: sdaread 9929 * 9930 * Description: Driver's aread(9e) entry point function. 9931 * 9932 * Arguments: dev - device number 9933 * aio - structure pointer describing where data is to be stored 9934 * cred_p - user credential pointer 9935 * 9936 * Return Code: ENXIO 9937 * EIO 9938 * EINVAL 9939 * value returned by aphysio 9940 * 9941 * Context: Kernel thread context. 9942 */ 9943 /* ARGSUSED */ 9944 static int 9945 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9946 { 9947 struct sd_lun *un = NULL; 9948 struct uio *uio = aio->aio_uio; 9949 int secmask; 9950 int err; 9951 9952 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9953 return (ENXIO); 9954 } 9955 9956 ASSERT(!mutex_owned(SD_MUTEX(un))); 9957 9958 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9959 mutex_enter(SD_MUTEX(un)); 9960 /* 9961 * Because the call to sd_ready_and_valid will issue I/O we 9962 * must wait here if either the device is suspended or 9963 * if it's power level is changing. 9964 */ 9965 while ((un->un_state == SD_STATE_SUSPENDED) || 9966 (un->un_state == SD_STATE_PM_CHANGING)) { 9967 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9968 } 9969 un->un_ncmds_in_driver++; 9970 mutex_exit(SD_MUTEX(un)); 9971 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9972 mutex_enter(SD_MUTEX(un)); 9973 un->un_ncmds_in_driver--; 9974 ASSERT(un->un_ncmds_in_driver >= 0); 9975 mutex_exit(SD_MUTEX(un)); 9976 return (EIO); 9977 } 9978 mutex_enter(SD_MUTEX(un)); 9979 un->un_ncmds_in_driver--; 9980 ASSERT(un->un_ncmds_in_driver >= 0); 9981 mutex_exit(SD_MUTEX(un)); 9982 } 9983 9984 /* 9985 * Read requests are restricted to multiples of the system block size. 9986 */ 9987 secmask = un->un_sys_blocksize - 1; 9988 9989 if (uio->uio_loffset & ((offset_t)(secmask))) { 9990 SD_ERROR(SD_LOG_READ_WRITE, un, 9991 "sdaread: file offset not modulo %d\n", 9992 un->un_sys_blocksize); 9993 err = EINVAL; 9994 } else if (uio->uio_iov->iov_len & (secmask)) { 9995 SD_ERROR(SD_LOG_READ_WRITE, un, 9996 "sdaread: transfer length not modulo %d\n", 9997 un->un_sys_blocksize); 9998 err = EINVAL; 9999 } else { 10000 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10001 } 10002 return (err); 10003 } 10004 10005 10006 /* 10007 * Function: sdawrite 10008 * 10009 * Description: Driver's awrite(9e) entry point function. 10010 * 10011 * Arguments: dev - device number 10012 * aio - structure pointer describing where data is stored 10013 * cred_p - user credential pointer 10014 * 10015 * Return Code: ENXIO 10016 * EIO 10017 * EINVAL 10018 * value returned by aphysio 10019 * 10020 * Context: Kernel thread context. 10021 */ 10022 /* ARGSUSED */ 10023 static int 10024 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10025 { 10026 struct sd_lun *un = NULL; 10027 struct uio *uio = aio->aio_uio; 10028 int secmask; 10029 int err; 10030 10031 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10032 return (ENXIO); 10033 } 10034 10035 ASSERT(!mutex_owned(SD_MUTEX(un))); 10036 10037 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10038 mutex_enter(SD_MUTEX(un)); 10039 /* 10040 * Because the call to sd_ready_and_valid will issue I/O we 10041 * must wait here if either the device is suspended or 10042 * if it's power level is changing. 10043 */ 10044 while ((un->un_state == SD_STATE_SUSPENDED) || 10045 (un->un_state == SD_STATE_PM_CHANGING)) { 10046 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10047 } 10048 un->un_ncmds_in_driver++; 10049 mutex_exit(SD_MUTEX(un)); 10050 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10051 mutex_enter(SD_MUTEX(un)); 10052 un->un_ncmds_in_driver--; 10053 ASSERT(un->un_ncmds_in_driver >= 0); 10054 mutex_exit(SD_MUTEX(un)); 10055 return (EIO); 10056 } 10057 mutex_enter(SD_MUTEX(un)); 10058 un->un_ncmds_in_driver--; 10059 ASSERT(un->un_ncmds_in_driver >= 0); 10060 mutex_exit(SD_MUTEX(un)); 10061 } 10062 10063 /* 10064 * Write requests are restricted to multiples of the system block size. 10065 */ 10066 secmask = un->un_sys_blocksize - 1; 10067 10068 if (uio->uio_loffset & ((offset_t)(secmask))) { 10069 SD_ERROR(SD_LOG_READ_WRITE, un, 10070 "sdawrite: file offset not modulo %d\n", 10071 un->un_sys_blocksize); 10072 err = EINVAL; 10073 } else if (uio->uio_iov->iov_len & (secmask)) { 10074 SD_ERROR(SD_LOG_READ_WRITE, un, 10075 "sdawrite: transfer length not modulo %d\n", 10076 un->un_sys_blocksize); 10077 err = EINVAL; 10078 } else { 10079 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10080 } 10081 return (err); 10082 } 10083 10084 10085 10086 10087 10088 /* 10089 * Driver IO processing follows the following sequence: 10090 * 10091 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10092 * | | ^ 10093 * v v | 10094 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10095 * | | | | 10096 * v | | | 10097 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10098 * | | ^ ^ 10099 * v v | | 10100 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10101 * | | | | 10102 * +---+ | +------------+ +-------+ 10103 * | | | | 10104 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10105 * | v | | 10106 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10107 * | | ^ | 10108 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10109 * | v | | 10110 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10111 * | | ^ | 10112 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10113 * | v | | 10114 * | sd_checksum_iostart() sd_checksum_iodone() | 10115 * | | ^ | 10116 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10117 * | v | | 10118 * | sd_pm_iostart() sd_pm_iodone() | 10119 * | | ^ | 10120 * | | | | 10121 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10122 * | ^ 10123 * v | 10124 * sd_core_iostart() | 10125 * | | 10126 * | +------>(*destroypkt)() 10127 * +-> sd_start_cmds() <-+ | | 10128 * | | | v 10129 * | | | scsi_destroy_pkt(9F) 10130 * | | | 10131 * +->(*initpkt)() +- sdintr() 10132 * | | | | 10133 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10134 * | +-> scsi_setup_cdb(9F) | 10135 * | | 10136 * +--> scsi_transport(9F) | 10137 * | | 10138 * +----> SCSA ---->+ 10139 * 10140 * 10141 * This code is based upon the following presumptions: 10142 * 10143 * - iostart and iodone functions operate on buf(9S) structures. These 10144 * functions perform the necessary operations on the buf(9S) and pass 10145 * them along to the next function in the chain by using the macros 10146 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10147 * (for iodone side functions). 10148 * 10149 * - The iostart side functions may sleep. The iodone side functions 10150 * are called under interrupt context and may NOT sleep. Therefore 10151 * iodone side functions also may not call iostart side functions. 10152 * (NOTE: iostart side functions should NOT sleep for memory, as 10153 * this could result in deadlock.) 10154 * 10155 * - An iostart side function may call its corresponding iodone side 10156 * function directly (if necessary). 10157 * 10158 * - In the event of an error, an iostart side function can return a buf(9S) 10159 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10160 * b_error in the usual way of course). 10161 * 10162 * - The taskq mechanism may be used by the iodone side functions to dispatch 10163 * requests to the iostart side functions. The iostart side functions in 10164 * this case would be called under the context of a taskq thread, so it's 10165 * OK for them to block/sleep/spin in this case. 10166 * 10167 * - iostart side functions may allocate "shadow" buf(9S) structs and 10168 * pass them along to the next function in the chain. The corresponding 10169 * iodone side functions must coalesce the "shadow" bufs and return 10170 * the "original" buf to the next higher layer. 10171 * 10172 * - The b_private field of the buf(9S) struct holds a pointer to 10173 * an sd_xbuf struct, which contains information needed to 10174 * construct the scsi_pkt for the command. 10175 * 10176 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10177 * layer must acquire & release the SD_MUTEX(un) as needed. 10178 */ 10179 10180 10181 /* 10182 * Create taskq for all targets in the system. This is created at 10183 * _init(9E) and destroyed at _fini(9E). 10184 * 10185 * Note: here we set the minalloc to a reasonably high number to ensure that 10186 * we will have an adequate supply of task entries available at interrupt time. 10187 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10188 * sd_create_taskq(). Since we do not want to sleep for allocations at 10189 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10190 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10191 * requests any one instant in time. 10192 */ 10193 #define SD_TASKQ_NUMTHREADS 8 10194 #define SD_TASKQ_MINALLOC 256 10195 #define SD_TASKQ_MAXALLOC 256 10196 10197 static taskq_t *sd_tq = NULL; 10198 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10199 10200 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10201 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10202 10203 /* 10204 * The following task queue is being created for the write part of 10205 * read-modify-write of non-512 block size devices. 10206 * Limit the number of threads to 1 for now. This number has been chosen 10207 * considering the fact that it applies only to dvd ram drives/MO drives 10208 * currently. Performance for which is not main criteria at this stage. 10209 * Note: It needs to be explored if we can use a single taskq in future 10210 */ 10211 #define SD_WMR_TASKQ_NUMTHREADS 1 10212 static taskq_t *sd_wmr_tq = NULL; 10213 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10214 10215 /* 10216 * Function: sd_taskq_create 10217 * 10218 * Description: Create taskq thread(s) and preallocate task entries 10219 * 10220 * Return Code: Returns a pointer to the allocated taskq_t. 10221 * 10222 * Context: Can sleep. Requires blockable context. 10223 * 10224 * Notes: - The taskq() facility currently is NOT part of the DDI. 10225 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10226 * - taskq_create() will block for memory, also it will panic 10227 * if it cannot create the requested number of threads. 10228 * - Currently taskq_create() creates threads that cannot be 10229 * swapped. 10230 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10231 * supply of taskq entries at interrupt time (ie, so that we 10232 * do not have to sleep for memory) 10233 */ 10234 10235 static void 10236 sd_taskq_create(void) 10237 { 10238 char taskq_name[TASKQ_NAMELEN]; 10239 10240 ASSERT(sd_tq == NULL); 10241 ASSERT(sd_wmr_tq == NULL); 10242 10243 (void) snprintf(taskq_name, sizeof (taskq_name), 10244 "%s_drv_taskq", sd_label); 10245 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10246 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10247 TASKQ_PREPOPULATE)); 10248 10249 (void) snprintf(taskq_name, sizeof (taskq_name), 10250 "%s_rmw_taskq", sd_label); 10251 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10252 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10253 TASKQ_PREPOPULATE)); 10254 } 10255 10256 10257 /* 10258 * Function: sd_taskq_delete 10259 * 10260 * Description: Complementary cleanup routine for sd_taskq_create(). 10261 * 10262 * Context: Kernel thread context. 10263 */ 10264 10265 static void 10266 sd_taskq_delete(void) 10267 { 10268 ASSERT(sd_tq != NULL); 10269 ASSERT(sd_wmr_tq != NULL); 10270 taskq_destroy(sd_tq); 10271 taskq_destroy(sd_wmr_tq); 10272 sd_tq = NULL; 10273 sd_wmr_tq = NULL; 10274 } 10275 10276 10277 /* 10278 * Function: sdstrategy 10279 * 10280 * Description: Driver's strategy (9E) entry point function. 10281 * 10282 * Arguments: bp - pointer to buf(9S) 10283 * 10284 * Return Code: Always returns zero 10285 * 10286 * Context: Kernel thread context. 10287 */ 10288 10289 static int 10290 sdstrategy(struct buf *bp) 10291 { 10292 struct sd_lun *un; 10293 10294 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10295 if (un == NULL) { 10296 bioerror(bp, EIO); 10297 bp->b_resid = bp->b_bcount; 10298 biodone(bp); 10299 return (0); 10300 } 10301 /* As was done in the past, fail new cmds. if state is dumping. */ 10302 if (un->un_state == SD_STATE_DUMPING) { 10303 bioerror(bp, ENXIO); 10304 bp->b_resid = bp->b_bcount; 10305 biodone(bp); 10306 return (0); 10307 } 10308 10309 ASSERT(!mutex_owned(SD_MUTEX(un))); 10310 10311 /* 10312 * Commands may sneak in while we released the mutex in 10313 * DDI_SUSPEND, we should block new commands. However, old 10314 * commands that are still in the driver at this point should 10315 * still be allowed to drain. 10316 */ 10317 mutex_enter(SD_MUTEX(un)); 10318 /* 10319 * Must wait here if either the device is suspended or 10320 * if it's power level is changing. 10321 */ 10322 while ((un->un_state == SD_STATE_SUSPENDED) || 10323 (un->un_state == SD_STATE_PM_CHANGING)) { 10324 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10325 } 10326 10327 un->un_ncmds_in_driver++; 10328 10329 /* 10330 * atapi: Since we are running the CD for now in PIO mode we need to 10331 * call bp_mapin here to avoid bp_mapin called interrupt context under 10332 * the HBA's init_pkt routine. 10333 */ 10334 if (un->un_f_cfg_is_atapi == TRUE) { 10335 mutex_exit(SD_MUTEX(un)); 10336 bp_mapin(bp); 10337 mutex_enter(SD_MUTEX(un)); 10338 } 10339 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10340 un->un_ncmds_in_driver); 10341 10342 mutex_exit(SD_MUTEX(un)); 10343 10344 /* 10345 * This will (eventually) allocate the sd_xbuf area and 10346 * call sd_xbuf_strategy(). We just want to return the 10347 * result of ddi_xbuf_qstrategy so that we have an opt- 10348 * imized tail call which saves us a stack frame. 10349 */ 10350 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10351 } 10352 10353 10354 /* 10355 * Function: sd_xbuf_strategy 10356 * 10357 * Description: Function for initiating IO operations via the 10358 * ddi_xbuf_qstrategy() mechanism. 10359 * 10360 * Context: Kernel thread context. 10361 */ 10362 10363 static void 10364 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10365 { 10366 struct sd_lun *un = arg; 10367 10368 ASSERT(bp != NULL); 10369 ASSERT(xp != NULL); 10370 ASSERT(un != NULL); 10371 ASSERT(!mutex_owned(SD_MUTEX(un))); 10372 10373 /* 10374 * Initialize the fields in the xbuf and save a pointer to the 10375 * xbuf in bp->b_private. 10376 */ 10377 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10378 10379 /* Send the buf down the iostart chain */ 10380 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10381 } 10382 10383 10384 /* 10385 * Function: sd_xbuf_init 10386 * 10387 * Description: Prepare the given sd_xbuf struct for use. 10388 * 10389 * Arguments: un - ptr to softstate 10390 * bp - ptr to associated buf(9S) 10391 * xp - ptr to associated sd_xbuf 10392 * chain_type - IO chain type to use: 10393 * SD_CHAIN_NULL 10394 * SD_CHAIN_BUFIO 10395 * SD_CHAIN_USCSI 10396 * SD_CHAIN_DIRECT 10397 * SD_CHAIN_DIRECT_PRIORITY 10398 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10399 * initialization; may be NULL if none. 10400 * 10401 * Context: Kernel thread context 10402 */ 10403 10404 static void 10405 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10406 uchar_t chain_type, void *pktinfop) 10407 { 10408 int index; 10409 10410 ASSERT(un != NULL); 10411 ASSERT(bp != NULL); 10412 ASSERT(xp != NULL); 10413 10414 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10415 bp, chain_type); 10416 10417 xp->xb_un = un; 10418 xp->xb_pktp = NULL; 10419 xp->xb_pktinfo = pktinfop; 10420 xp->xb_private = bp->b_private; 10421 xp->xb_blkno = (daddr_t)bp->b_blkno; 10422 10423 /* 10424 * Set up the iostart and iodone chain indexes in the xbuf, based 10425 * upon the specified chain type to use. 10426 */ 10427 switch (chain_type) { 10428 case SD_CHAIN_NULL: 10429 /* 10430 * Fall thru to just use the values for the buf type, even 10431 * tho for the NULL chain these values will never be used. 10432 */ 10433 /* FALLTHRU */ 10434 case SD_CHAIN_BUFIO: 10435 index = un->un_buf_chain_type; 10436 break; 10437 case SD_CHAIN_USCSI: 10438 index = un->un_uscsi_chain_type; 10439 break; 10440 case SD_CHAIN_DIRECT: 10441 index = un->un_direct_chain_type; 10442 break; 10443 case SD_CHAIN_DIRECT_PRIORITY: 10444 index = un->un_priority_chain_type; 10445 break; 10446 default: 10447 /* We're really broken if we ever get here... */ 10448 panic("sd_xbuf_init: illegal chain type!"); 10449 /*NOTREACHED*/ 10450 } 10451 10452 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10453 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10454 10455 /* 10456 * It might be a bit easier to simply bzero the entire xbuf above, 10457 * but it turns out that since we init a fair number of members anyway, 10458 * we save a fair number cycles by doing explicit assignment of zero. 10459 */ 10460 xp->xb_pkt_flags = 0; 10461 xp->xb_dma_resid = 0; 10462 xp->xb_retry_count = 0; 10463 xp->xb_victim_retry_count = 0; 10464 xp->xb_ua_retry_count = 0; 10465 xp->xb_nr_retry_count = 0; 10466 xp->xb_sense_bp = NULL; 10467 xp->xb_sense_status = 0; 10468 xp->xb_sense_state = 0; 10469 xp->xb_sense_resid = 0; 10470 10471 bp->b_private = xp; 10472 bp->b_flags &= ~(B_DONE | B_ERROR); 10473 bp->b_resid = 0; 10474 bp->av_forw = NULL; 10475 bp->av_back = NULL; 10476 bioerror(bp, 0); 10477 10478 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10479 } 10480 10481 10482 /* 10483 * Function: sd_uscsi_strategy 10484 * 10485 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10486 * 10487 * Arguments: bp - buf struct ptr 10488 * 10489 * Return Code: Always returns 0 10490 * 10491 * Context: Kernel thread context 10492 */ 10493 10494 static int 10495 sd_uscsi_strategy(struct buf *bp) 10496 { 10497 struct sd_lun *un; 10498 struct sd_uscsi_info *uip; 10499 struct sd_xbuf *xp; 10500 uchar_t chain_type; 10501 10502 ASSERT(bp != NULL); 10503 10504 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10505 if (un == NULL) { 10506 bioerror(bp, EIO); 10507 bp->b_resid = bp->b_bcount; 10508 biodone(bp); 10509 return (0); 10510 } 10511 10512 ASSERT(!mutex_owned(SD_MUTEX(un))); 10513 10514 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 /* 10518 * atapi: Since we are running the CD for now in PIO mode we need to 10519 * call bp_mapin here to avoid bp_mapin called interrupt context under 10520 * the HBA's init_pkt routine. 10521 */ 10522 if (un->un_f_cfg_is_atapi == TRUE) { 10523 mutex_exit(SD_MUTEX(un)); 10524 bp_mapin(bp); 10525 mutex_enter(SD_MUTEX(un)); 10526 } 10527 un->un_ncmds_in_driver++; 10528 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10529 un->un_ncmds_in_driver); 10530 mutex_exit(SD_MUTEX(un)); 10531 10532 /* 10533 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10534 */ 10535 ASSERT(bp->b_private != NULL); 10536 uip = (struct sd_uscsi_info *)bp->b_private; 10537 10538 switch (uip->ui_flags) { 10539 case SD_PATH_DIRECT: 10540 chain_type = SD_CHAIN_DIRECT; 10541 break; 10542 case SD_PATH_DIRECT_PRIORITY: 10543 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10544 break; 10545 default: 10546 chain_type = SD_CHAIN_USCSI; 10547 break; 10548 } 10549 10550 /* 10551 * We may allocate extra buf for external USCSI commands. If the 10552 * application asks for bigger than 20-byte sense data via USCSI, 10553 * SCSA layer will allocate 252 bytes sense buf for that command. 10554 */ 10555 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10556 SENSE_LENGTH) { 10557 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10558 MAX_SENSE_LENGTH, KM_SLEEP); 10559 } else { 10560 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10561 } 10562 10563 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10564 10565 /* Use the index obtained within xbuf_init */ 10566 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10567 10568 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10569 10570 return (0); 10571 } 10572 10573 /* 10574 * Function: sd_send_scsi_cmd 10575 * 10576 * Description: Runs a USCSI command for user (when called thru sdioctl), 10577 * or for the driver 10578 * 10579 * Arguments: dev - the dev_t for the device 10580 * incmd - ptr to a valid uscsi_cmd struct 10581 * flag - bit flag, indicating open settings, 32/64 bit type 10582 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10583 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10584 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10585 * to use the USCSI "direct" chain and bypass the normal 10586 * command waitq. 10587 * 10588 * Return Code: 0 - successful completion of the given command 10589 * EIO - scsi_uscsi_handle_command() failed 10590 * ENXIO - soft state not found for specified dev 10591 * EINVAL 10592 * EFAULT - copyin/copyout error 10593 * return code of scsi_uscsi_handle_command(): 10594 * EIO 10595 * ENXIO 10596 * EACCES 10597 * 10598 * Context: Waits for command to complete. Can sleep. 10599 */ 10600 10601 static int 10602 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10603 enum uio_seg dataspace, int path_flag) 10604 { 10605 struct sd_uscsi_info *uip; 10606 struct uscsi_cmd *uscmd; 10607 struct sd_lun *un; 10608 int format = 0; 10609 int rval; 10610 10611 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10612 if (un == NULL) { 10613 return (ENXIO); 10614 } 10615 10616 ASSERT(!mutex_owned(SD_MUTEX(un))); 10617 10618 #ifdef SDDEBUG 10619 switch (dataspace) { 10620 case UIO_USERSPACE: 10621 SD_TRACE(SD_LOG_IO, un, 10622 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10623 break; 10624 case UIO_SYSSPACE: 10625 SD_TRACE(SD_LOG_IO, un, 10626 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10627 break; 10628 default: 10629 SD_TRACE(SD_LOG_IO, un, 10630 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10631 break; 10632 } 10633 #endif 10634 10635 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10636 SD_ADDRESS(un), &uscmd); 10637 if (rval != 0) { 10638 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10639 "scsi_uscsi_alloc_and_copyin failed\n", un); 10640 return (rval); 10641 } 10642 10643 if ((uscmd->uscsi_cdb != NULL) && 10644 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10645 mutex_enter(SD_MUTEX(un)); 10646 un->un_f_format_in_progress = TRUE; 10647 mutex_exit(SD_MUTEX(un)); 10648 format = 1; 10649 } 10650 10651 /* 10652 * Allocate an sd_uscsi_info struct and fill it with the info 10653 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10654 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10655 * since we allocate the buf here in this function, we do not 10656 * need to preserve the prior contents of b_private. 10657 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10658 */ 10659 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10660 uip->ui_flags = path_flag; 10661 uip->ui_cmdp = uscmd; 10662 10663 /* 10664 * Commands sent with priority are intended for error recovery 10665 * situations, and do not have retries performed. 10666 */ 10667 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10668 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10669 } 10670 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10671 10672 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10673 sd_uscsi_strategy, NULL, uip); 10674 10675 #ifdef SDDEBUG 10676 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10677 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10678 uscmd->uscsi_status, uscmd->uscsi_resid); 10679 if (uscmd->uscsi_bufaddr != NULL) { 10680 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10681 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10682 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10683 if (dataspace == UIO_SYSSPACE) { 10684 SD_DUMP_MEMORY(un, SD_LOG_IO, 10685 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10686 uscmd->uscsi_buflen, SD_LOG_HEX); 10687 } 10688 } 10689 #endif 10690 10691 if (format == 1) { 10692 mutex_enter(SD_MUTEX(un)); 10693 un->un_f_format_in_progress = FALSE; 10694 mutex_exit(SD_MUTEX(un)); 10695 } 10696 10697 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10698 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10699 10700 return (rval); 10701 } 10702 10703 10704 /* 10705 * Function: sd_buf_iodone 10706 * 10707 * Description: Frees the sd_xbuf & returns the buf to its originator. 10708 * 10709 * Context: May be called from interrupt context. 10710 */ 10711 /* ARGSUSED */ 10712 static void 10713 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10714 { 10715 struct sd_xbuf *xp; 10716 10717 ASSERT(un != NULL); 10718 ASSERT(bp != NULL); 10719 ASSERT(!mutex_owned(SD_MUTEX(un))); 10720 10721 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10722 10723 xp = SD_GET_XBUF(bp); 10724 ASSERT(xp != NULL); 10725 10726 mutex_enter(SD_MUTEX(un)); 10727 10728 /* 10729 * Grab time when the cmd completed. 10730 * This is used for determining if the system has been 10731 * idle long enough to make it idle to the PM framework. 10732 * This is for lowering the overhead, and therefore improving 10733 * performance per I/O operation. 10734 */ 10735 un->un_pm_idle_time = ddi_get_time(); 10736 10737 un->un_ncmds_in_driver--; 10738 ASSERT(un->un_ncmds_in_driver >= 0); 10739 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10740 un->un_ncmds_in_driver); 10741 10742 mutex_exit(SD_MUTEX(un)); 10743 10744 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10745 biodone(bp); /* bp is gone after this */ 10746 10747 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10748 } 10749 10750 10751 /* 10752 * Function: sd_uscsi_iodone 10753 * 10754 * Description: Frees the sd_xbuf & returns the buf to its originator. 10755 * 10756 * Context: May be called from interrupt context. 10757 */ 10758 /* ARGSUSED */ 10759 static void 10760 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10761 { 10762 struct sd_xbuf *xp; 10763 10764 ASSERT(un != NULL); 10765 ASSERT(bp != NULL); 10766 10767 xp = SD_GET_XBUF(bp); 10768 ASSERT(xp != NULL); 10769 ASSERT(!mutex_owned(SD_MUTEX(un))); 10770 10771 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10772 10773 bp->b_private = xp->xb_private; 10774 10775 mutex_enter(SD_MUTEX(un)); 10776 10777 /* 10778 * Grab time when the cmd completed. 10779 * This is used for determining if the system has been 10780 * idle long enough to make it idle to the PM framework. 10781 * This is for lowering the overhead, and therefore improving 10782 * performance per I/O operation. 10783 */ 10784 un->un_pm_idle_time = ddi_get_time(); 10785 10786 un->un_ncmds_in_driver--; 10787 ASSERT(un->un_ncmds_in_driver >= 0); 10788 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10789 un->un_ncmds_in_driver); 10790 10791 mutex_exit(SD_MUTEX(un)); 10792 10793 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10794 SENSE_LENGTH) { 10795 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10796 MAX_SENSE_LENGTH); 10797 } else { 10798 kmem_free(xp, sizeof (struct sd_xbuf)); 10799 } 10800 10801 biodone(bp); 10802 10803 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10804 } 10805 10806 10807 /* 10808 * Function: sd_mapblockaddr_iostart 10809 * 10810 * Description: Verify request lies within the partition limits for 10811 * the indicated minor device. Issue "overrun" buf if 10812 * request would exceed partition range. Converts 10813 * partition-relative block address to absolute. 10814 * 10815 * Context: Can sleep 10816 * 10817 * Issues: This follows what the old code did, in terms of accessing 10818 * some of the partition info in the unit struct without holding 10819 * the mutext. This is a general issue, if the partition info 10820 * can be altered while IO is in progress... as soon as we send 10821 * a buf, its partitioning can be invalid before it gets to the 10822 * device. Probably the right fix is to move partitioning out 10823 * of the driver entirely. 10824 */ 10825 10826 static void 10827 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10828 { 10829 diskaddr_t nblocks; /* #blocks in the given partition */ 10830 daddr_t blocknum; /* Block number specified by the buf */ 10831 size_t requested_nblocks; 10832 size_t available_nblocks; 10833 int partition; 10834 diskaddr_t partition_offset; 10835 struct sd_xbuf *xp; 10836 10837 10838 ASSERT(un != NULL); 10839 ASSERT(bp != NULL); 10840 ASSERT(!mutex_owned(SD_MUTEX(un))); 10841 10842 SD_TRACE(SD_LOG_IO_PARTITION, un, 10843 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10844 10845 xp = SD_GET_XBUF(bp); 10846 ASSERT(xp != NULL); 10847 10848 /* 10849 * If the geometry is not indicated as valid, attempt to access 10850 * the unit & verify the geometry/label. This can be the case for 10851 * removable-media devices, of if the device was opened in 10852 * NDELAY/NONBLOCK mode. 10853 */ 10854 if (!SD_IS_VALID_LABEL(un) && 10855 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10856 /* 10857 * For removable devices it is possible to start an I/O 10858 * without a media by opening the device in nodelay mode. 10859 * Also for writable CDs there can be many scenarios where 10860 * there is no geometry yet but volume manager is trying to 10861 * issue a read() just because it can see TOC on the CD. So 10862 * do not print a message for removables. 10863 */ 10864 if (!un->un_f_has_removable_media) { 10865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10866 "i/o to invalid geometry\n"); 10867 } 10868 bioerror(bp, EIO); 10869 bp->b_resid = bp->b_bcount; 10870 SD_BEGIN_IODONE(index, un, bp); 10871 return; 10872 } 10873 10874 partition = SDPART(bp->b_edev); 10875 10876 nblocks = 0; 10877 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10878 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10879 10880 /* 10881 * blocknum is the starting block number of the request. At this 10882 * point it is still relative to the start of the minor device. 10883 */ 10884 blocknum = xp->xb_blkno; 10885 10886 /* 10887 * Legacy: If the starting block number is one past the last block 10888 * in the partition, do not set B_ERROR in the buf. 10889 */ 10890 if (blocknum == nblocks) { 10891 goto error_exit; 10892 } 10893 10894 /* 10895 * Confirm that the first block of the request lies within the 10896 * partition limits. Also the requested number of bytes must be 10897 * a multiple of the system block size. 10898 */ 10899 if ((blocknum < 0) || (blocknum >= nblocks) || 10900 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10901 bp->b_flags |= B_ERROR; 10902 goto error_exit; 10903 } 10904 10905 /* 10906 * If the requsted # blocks exceeds the available # blocks, that 10907 * is an overrun of the partition. 10908 */ 10909 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10910 available_nblocks = (size_t)(nblocks - blocknum); 10911 ASSERT(nblocks >= blocknum); 10912 10913 if (requested_nblocks > available_nblocks) { 10914 /* 10915 * Allocate an "overrun" buf to allow the request to proceed 10916 * for the amount of space available in the partition. The 10917 * amount not transferred will be added into the b_resid 10918 * when the operation is complete. The overrun buf 10919 * replaces the original buf here, and the original buf 10920 * is saved inside the overrun buf, for later use. 10921 */ 10922 size_t resid = SD_SYSBLOCKS2BYTES(un, 10923 (offset_t)(requested_nblocks - available_nblocks)); 10924 size_t count = bp->b_bcount - resid; 10925 /* 10926 * Note: count is an unsigned entity thus it'll NEVER 10927 * be less than 0 so ASSERT the original values are 10928 * correct. 10929 */ 10930 ASSERT(bp->b_bcount >= resid); 10931 10932 bp = sd_bioclone_alloc(bp, count, blocknum, 10933 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10934 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10935 ASSERT(xp != NULL); 10936 } 10937 10938 /* At this point there should be no residual for this buf. */ 10939 ASSERT(bp->b_resid == 0); 10940 10941 /* Convert the block number to an absolute address. */ 10942 xp->xb_blkno += partition_offset; 10943 10944 SD_NEXT_IOSTART(index, un, bp); 10945 10946 SD_TRACE(SD_LOG_IO_PARTITION, un, 10947 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10948 10949 return; 10950 10951 error_exit: 10952 bp->b_resid = bp->b_bcount; 10953 SD_BEGIN_IODONE(index, un, bp); 10954 SD_TRACE(SD_LOG_IO_PARTITION, un, 10955 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10956 } 10957 10958 10959 /* 10960 * Function: sd_mapblockaddr_iodone 10961 * 10962 * Description: Completion-side processing for partition management. 10963 * 10964 * Context: May be called under interrupt context 10965 */ 10966 10967 static void 10968 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10969 { 10970 /* int partition; */ /* Not used, see below. */ 10971 ASSERT(un != NULL); 10972 ASSERT(bp != NULL); 10973 ASSERT(!mutex_owned(SD_MUTEX(un))); 10974 10975 SD_TRACE(SD_LOG_IO_PARTITION, un, 10976 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10977 10978 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10979 /* 10980 * We have an "overrun" buf to deal with... 10981 */ 10982 struct sd_xbuf *xp; 10983 struct buf *obp; /* ptr to the original buf */ 10984 10985 xp = SD_GET_XBUF(bp); 10986 ASSERT(xp != NULL); 10987 10988 /* Retrieve the pointer to the original buf */ 10989 obp = (struct buf *)xp->xb_private; 10990 ASSERT(obp != NULL); 10991 10992 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10993 bioerror(obp, bp->b_error); 10994 10995 sd_bioclone_free(bp); 10996 10997 /* 10998 * Get back the original buf. 10999 * Note that since the restoration of xb_blkno below 11000 * was removed, the sd_xbuf is not needed. 11001 */ 11002 bp = obp; 11003 /* 11004 * xp = SD_GET_XBUF(bp); 11005 * ASSERT(xp != NULL); 11006 */ 11007 } 11008 11009 /* 11010 * Convert sd->xb_blkno back to a minor-device relative value. 11011 * Note: this has been commented out, as it is not needed in the 11012 * current implementation of the driver (ie, since this function 11013 * is at the top of the layering chains, so the info will be 11014 * discarded) and it is in the "hot" IO path. 11015 * 11016 * partition = getminor(bp->b_edev) & SDPART_MASK; 11017 * xp->xb_blkno -= un->un_offset[partition]; 11018 */ 11019 11020 SD_NEXT_IODONE(index, un, bp); 11021 11022 SD_TRACE(SD_LOG_IO_PARTITION, un, 11023 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11024 } 11025 11026 11027 /* 11028 * Function: sd_mapblocksize_iostart 11029 * 11030 * Description: Convert between system block size (un->un_sys_blocksize) 11031 * and target block size (un->un_tgt_blocksize). 11032 * 11033 * Context: Can sleep to allocate resources. 11034 * 11035 * Assumptions: A higher layer has already performed any partition validation, 11036 * and converted the xp->xb_blkno to an absolute value relative 11037 * to the start of the device. 11038 * 11039 * It is also assumed that the higher layer has implemented 11040 * an "overrun" mechanism for the case where the request would 11041 * read/write beyond the end of a partition. In this case we 11042 * assume (and ASSERT) that bp->b_resid == 0. 11043 * 11044 * Note: The implementation for this routine assumes the target 11045 * block size remains constant between allocation and transport. 11046 */ 11047 11048 static void 11049 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11050 { 11051 struct sd_mapblocksize_info *bsp; 11052 struct sd_xbuf *xp; 11053 offset_t first_byte; 11054 daddr_t start_block, end_block; 11055 daddr_t request_bytes; 11056 ushort_t is_aligned = FALSE; 11057 11058 ASSERT(un != NULL); 11059 ASSERT(bp != NULL); 11060 ASSERT(!mutex_owned(SD_MUTEX(un))); 11061 ASSERT(bp->b_resid == 0); 11062 11063 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11064 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11065 11066 /* 11067 * For a non-writable CD, a write request is an error 11068 */ 11069 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11070 (un->un_f_mmc_writable_media == FALSE)) { 11071 bioerror(bp, EIO); 11072 bp->b_resid = bp->b_bcount; 11073 SD_BEGIN_IODONE(index, un, bp); 11074 return; 11075 } 11076 11077 /* 11078 * We do not need a shadow buf if the device is using 11079 * un->un_sys_blocksize as its block size or if bcount == 0. 11080 * In this case there is no layer-private data block allocated. 11081 */ 11082 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11083 (bp->b_bcount == 0)) { 11084 goto done; 11085 } 11086 11087 #if defined(__i386) || defined(__amd64) 11088 /* We do not support non-block-aligned transfers for ROD devices */ 11089 ASSERT(!ISROD(un)); 11090 #endif 11091 11092 xp = SD_GET_XBUF(bp); 11093 ASSERT(xp != NULL); 11094 11095 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11096 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11097 un->un_tgt_blocksize, un->un_sys_blocksize); 11098 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11099 "request start block:0x%x\n", xp->xb_blkno); 11100 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11101 "request len:0x%x\n", bp->b_bcount); 11102 11103 /* 11104 * Allocate the layer-private data area for the mapblocksize layer. 11105 * Layers are allowed to use the xp_private member of the sd_xbuf 11106 * struct to store the pointer to their layer-private data block, but 11107 * each layer also has the responsibility of restoring the prior 11108 * contents of xb_private before returning the buf/xbuf to the 11109 * higher layer that sent it. 11110 * 11111 * Here we save the prior contents of xp->xb_private into the 11112 * bsp->mbs_oprivate field of our layer-private data area. This value 11113 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11114 * the layer-private area and returning the buf/xbuf to the layer 11115 * that sent it. 11116 * 11117 * Note that here we use kmem_zalloc for the allocation as there are 11118 * parts of the mapblocksize code that expect certain fields to be 11119 * zero unless explicitly set to a required value. 11120 */ 11121 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11122 bsp->mbs_oprivate = xp->xb_private; 11123 xp->xb_private = bsp; 11124 11125 /* 11126 * This treats the data on the disk (target) as an array of bytes. 11127 * first_byte is the byte offset, from the beginning of the device, 11128 * to the location of the request. This is converted from a 11129 * un->un_sys_blocksize block address to a byte offset, and then back 11130 * to a block address based upon a un->un_tgt_blocksize block size. 11131 * 11132 * xp->xb_blkno should be absolute upon entry into this function, 11133 * but, but it is based upon partitions that use the "system" 11134 * block size. It must be adjusted to reflect the block size of 11135 * the target. 11136 * 11137 * Note that end_block is actually the block that follows the last 11138 * block of the request, but that's what is needed for the computation. 11139 */ 11140 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11141 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11142 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11143 un->un_tgt_blocksize; 11144 11145 /* request_bytes is rounded up to a multiple of the target block size */ 11146 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11147 11148 /* 11149 * See if the starting address of the request and the request 11150 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11151 * then we do not need to allocate a shadow buf to handle the request. 11152 */ 11153 if (((first_byte % un->un_tgt_blocksize) == 0) && 11154 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11155 is_aligned = TRUE; 11156 } 11157 11158 if ((bp->b_flags & B_READ) == 0) { 11159 /* 11160 * Lock the range for a write operation. An aligned request is 11161 * considered a simple write; otherwise the request must be a 11162 * read-modify-write. 11163 */ 11164 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11165 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11166 } 11167 11168 /* 11169 * Alloc a shadow buf if the request is not aligned. Also, this is 11170 * where the READ command is generated for a read-modify-write. (The 11171 * write phase is deferred until after the read completes.) 11172 */ 11173 if (is_aligned == FALSE) { 11174 11175 struct sd_mapblocksize_info *shadow_bsp; 11176 struct sd_xbuf *shadow_xp; 11177 struct buf *shadow_bp; 11178 11179 /* 11180 * Allocate the shadow buf and it associated xbuf. Note that 11181 * after this call the xb_blkno value in both the original 11182 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11183 * same: absolute relative to the start of the device, and 11184 * adjusted for the target block size. The b_blkno in the 11185 * shadow buf will also be set to this value. We should never 11186 * change b_blkno in the original bp however. 11187 * 11188 * Note also that the shadow buf will always need to be a 11189 * READ command, regardless of whether the incoming command 11190 * is a READ or a WRITE. 11191 */ 11192 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11193 xp->xb_blkno, 11194 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11195 11196 shadow_xp = SD_GET_XBUF(shadow_bp); 11197 11198 /* 11199 * Allocate the layer-private data for the shadow buf. 11200 * (No need to preserve xb_private in the shadow xbuf.) 11201 */ 11202 shadow_xp->xb_private = shadow_bsp = 11203 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11204 11205 /* 11206 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11207 * to figure out where the start of the user data is (based upon 11208 * the system block size) in the data returned by the READ 11209 * command (which will be based upon the target blocksize). Note 11210 * that this is only really used if the request is unaligned. 11211 */ 11212 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11213 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11214 ASSERT((bsp->mbs_copy_offset >= 0) && 11215 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11216 11217 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11218 11219 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11220 11221 /* Transfer the wmap (if any) to the shadow buf */ 11222 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11223 bsp->mbs_wmp = NULL; 11224 11225 /* 11226 * The shadow buf goes on from here in place of the 11227 * original buf. 11228 */ 11229 shadow_bsp->mbs_orig_bp = bp; 11230 bp = shadow_bp; 11231 } 11232 11233 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11234 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11235 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11236 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11237 request_bytes); 11238 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11239 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11240 11241 done: 11242 SD_NEXT_IOSTART(index, un, bp); 11243 11244 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11245 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11246 } 11247 11248 11249 /* 11250 * Function: sd_mapblocksize_iodone 11251 * 11252 * Description: Completion side processing for block-size mapping. 11253 * 11254 * Context: May be called under interrupt context 11255 */ 11256 11257 static void 11258 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11259 { 11260 struct sd_mapblocksize_info *bsp; 11261 struct sd_xbuf *xp; 11262 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11263 struct buf *orig_bp; /* ptr to the original buf */ 11264 offset_t shadow_end; 11265 offset_t request_end; 11266 offset_t shadow_start; 11267 ssize_t copy_offset; 11268 size_t copy_length; 11269 size_t shortfall; 11270 uint_t is_write; /* TRUE if this bp is a WRITE */ 11271 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11272 11273 ASSERT(un != NULL); 11274 ASSERT(bp != NULL); 11275 11276 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11277 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11278 11279 /* 11280 * There is no shadow buf or layer-private data if the target is 11281 * using un->un_sys_blocksize as its block size or if bcount == 0. 11282 */ 11283 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11284 (bp->b_bcount == 0)) { 11285 goto exit; 11286 } 11287 11288 xp = SD_GET_XBUF(bp); 11289 ASSERT(xp != NULL); 11290 11291 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11292 bsp = xp->xb_private; 11293 11294 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11295 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11296 11297 if (is_write) { 11298 /* 11299 * For a WRITE request we must free up the block range that 11300 * we have locked up. This holds regardless of whether this is 11301 * an aligned write request or a read-modify-write request. 11302 */ 11303 sd_range_unlock(un, bsp->mbs_wmp); 11304 bsp->mbs_wmp = NULL; 11305 } 11306 11307 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11308 /* 11309 * An aligned read or write command will have no shadow buf; 11310 * there is not much else to do with it. 11311 */ 11312 goto done; 11313 } 11314 11315 orig_bp = bsp->mbs_orig_bp; 11316 ASSERT(orig_bp != NULL); 11317 orig_xp = SD_GET_XBUF(orig_bp); 11318 ASSERT(orig_xp != NULL); 11319 ASSERT(!mutex_owned(SD_MUTEX(un))); 11320 11321 if (!is_write && has_wmap) { 11322 /* 11323 * A READ with a wmap means this is the READ phase of a 11324 * read-modify-write. If an error occurred on the READ then 11325 * we do not proceed with the WRITE phase or copy any data. 11326 * Just release the write maps and return with an error. 11327 */ 11328 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11329 orig_bp->b_resid = orig_bp->b_bcount; 11330 bioerror(orig_bp, bp->b_error); 11331 sd_range_unlock(un, bsp->mbs_wmp); 11332 goto freebuf_done; 11333 } 11334 } 11335 11336 /* 11337 * Here is where we set up to copy the data from the shadow buf 11338 * into the space associated with the original buf. 11339 * 11340 * To deal with the conversion between block sizes, these 11341 * computations treat the data as an array of bytes, with the 11342 * first byte (byte 0) corresponding to the first byte in the 11343 * first block on the disk. 11344 */ 11345 11346 /* 11347 * shadow_start and shadow_len indicate the location and size of 11348 * the data returned with the shadow IO request. 11349 */ 11350 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11351 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11352 11353 /* 11354 * copy_offset gives the offset (in bytes) from the start of the first 11355 * block of the READ request to the beginning of the data. We retrieve 11356 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11357 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11358 * data to be copied (in bytes). 11359 */ 11360 copy_offset = bsp->mbs_copy_offset; 11361 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11362 copy_length = orig_bp->b_bcount; 11363 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11364 11365 /* 11366 * Set up the resid and error fields of orig_bp as appropriate. 11367 */ 11368 if (shadow_end >= request_end) { 11369 /* We got all the requested data; set resid to zero */ 11370 orig_bp->b_resid = 0; 11371 } else { 11372 /* 11373 * We failed to get enough data to fully satisfy the original 11374 * request. Just copy back whatever data we got and set 11375 * up the residual and error code as required. 11376 * 11377 * 'shortfall' is the amount by which the data received with the 11378 * shadow buf has "fallen short" of the requested amount. 11379 */ 11380 shortfall = (size_t)(request_end - shadow_end); 11381 11382 if (shortfall > orig_bp->b_bcount) { 11383 /* 11384 * We did not get enough data to even partially 11385 * fulfill the original request. The residual is 11386 * equal to the amount requested. 11387 */ 11388 orig_bp->b_resid = orig_bp->b_bcount; 11389 } else { 11390 /* 11391 * We did not get all the data that we requested 11392 * from the device, but we will try to return what 11393 * portion we did get. 11394 */ 11395 orig_bp->b_resid = shortfall; 11396 } 11397 ASSERT(copy_length >= orig_bp->b_resid); 11398 copy_length -= orig_bp->b_resid; 11399 } 11400 11401 /* Propagate the error code from the shadow buf to the original buf */ 11402 bioerror(orig_bp, bp->b_error); 11403 11404 if (is_write) { 11405 goto freebuf_done; /* No data copying for a WRITE */ 11406 } 11407 11408 if (has_wmap) { 11409 /* 11410 * This is a READ command from the READ phase of a 11411 * read-modify-write request. We have to copy the data given 11412 * by the user OVER the data returned by the READ command, 11413 * then convert the command from a READ to a WRITE and send 11414 * it back to the target. 11415 */ 11416 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11417 copy_length); 11418 11419 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11420 11421 /* 11422 * Dispatch the WRITE command to the taskq thread, which 11423 * will in turn send the command to the target. When the 11424 * WRITE command completes, we (sd_mapblocksize_iodone()) 11425 * will get called again as part of the iodone chain 11426 * processing for it. Note that we will still be dealing 11427 * with the shadow buf at that point. 11428 */ 11429 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11430 KM_NOSLEEP) != 0) { 11431 /* 11432 * Dispatch was successful so we are done. Return 11433 * without going any higher up the iodone chain. Do 11434 * not free up any layer-private data until after the 11435 * WRITE completes. 11436 */ 11437 return; 11438 } 11439 11440 /* 11441 * Dispatch of the WRITE command failed; set up the error 11442 * condition and send this IO back up the iodone chain. 11443 */ 11444 bioerror(orig_bp, EIO); 11445 orig_bp->b_resid = orig_bp->b_bcount; 11446 11447 } else { 11448 /* 11449 * This is a regular READ request (ie, not a RMW). Copy the 11450 * data from the shadow buf into the original buf. The 11451 * copy_offset compensates for any "misalignment" between the 11452 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11453 * original buf (with its un->un_sys_blocksize blocks). 11454 */ 11455 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11456 copy_length); 11457 } 11458 11459 freebuf_done: 11460 11461 /* 11462 * At this point we still have both the shadow buf AND the original 11463 * buf to deal with, as well as the layer-private data area in each. 11464 * Local variables are as follows: 11465 * 11466 * bp -- points to shadow buf 11467 * xp -- points to xbuf of shadow buf 11468 * bsp -- points to layer-private data area of shadow buf 11469 * orig_bp -- points to original buf 11470 * 11471 * First free the shadow buf and its associated xbuf, then free the 11472 * layer-private data area from the shadow buf. There is no need to 11473 * restore xb_private in the shadow xbuf. 11474 */ 11475 sd_shadow_buf_free(bp); 11476 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11477 11478 /* 11479 * Now update the local variables to point to the original buf, xbuf, 11480 * and layer-private area. 11481 */ 11482 bp = orig_bp; 11483 xp = SD_GET_XBUF(bp); 11484 ASSERT(xp != NULL); 11485 ASSERT(xp == orig_xp); 11486 bsp = xp->xb_private; 11487 ASSERT(bsp != NULL); 11488 11489 done: 11490 /* 11491 * Restore xb_private to whatever it was set to by the next higher 11492 * layer in the chain, then free the layer-private data area. 11493 */ 11494 xp->xb_private = bsp->mbs_oprivate; 11495 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11496 11497 exit: 11498 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11499 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11500 11501 SD_NEXT_IODONE(index, un, bp); 11502 } 11503 11504 11505 /* 11506 * Function: sd_checksum_iostart 11507 * 11508 * Description: A stub function for a layer that's currently not used. 11509 * For now just a placeholder. 11510 * 11511 * Context: Kernel thread context 11512 */ 11513 11514 static void 11515 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11516 { 11517 ASSERT(un != NULL); 11518 ASSERT(bp != NULL); 11519 ASSERT(!mutex_owned(SD_MUTEX(un))); 11520 SD_NEXT_IOSTART(index, un, bp); 11521 } 11522 11523 11524 /* 11525 * Function: sd_checksum_iodone 11526 * 11527 * Description: A stub function for a layer that's currently not used. 11528 * For now just a placeholder. 11529 * 11530 * Context: May be called under interrupt context 11531 */ 11532 11533 static void 11534 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11535 { 11536 ASSERT(un != NULL); 11537 ASSERT(bp != NULL); 11538 ASSERT(!mutex_owned(SD_MUTEX(un))); 11539 SD_NEXT_IODONE(index, un, bp); 11540 } 11541 11542 11543 /* 11544 * Function: sd_checksum_uscsi_iostart 11545 * 11546 * Description: A stub function for a layer that's currently not used. 11547 * For now just a placeholder. 11548 * 11549 * Context: Kernel thread context 11550 */ 11551 11552 static void 11553 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11554 { 11555 ASSERT(un != NULL); 11556 ASSERT(bp != NULL); 11557 ASSERT(!mutex_owned(SD_MUTEX(un))); 11558 SD_NEXT_IOSTART(index, un, bp); 11559 } 11560 11561 11562 /* 11563 * Function: sd_checksum_uscsi_iodone 11564 * 11565 * Description: A stub function for a layer that's currently not used. 11566 * For now just a placeholder. 11567 * 11568 * Context: May be called under interrupt context 11569 */ 11570 11571 static void 11572 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11573 { 11574 ASSERT(un != NULL); 11575 ASSERT(bp != NULL); 11576 ASSERT(!mutex_owned(SD_MUTEX(un))); 11577 SD_NEXT_IODONE(index, un, bp); 11578 } 11579 11580 11581 /* 11582 * Function: sd_pm_iostart 11583 * 11584 * Description: iostart-side routine for Power mangement. 11585 * 11586 * Context: Kernel thread context 11587 */ 11588 11589 static void 11590 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11591 { 11592 ASSERT(un != NULL); 11593 ASSERT(bp != NULL); 11594 ASSERT(!mutex_owned(SD_MUTEX(un))); 11595 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11596 11597 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11598 11599 if (sd_pm_entry(un) != DDI_SUCCESS) { 11600 /* 11601 * Set up to return the failed buf back up the 'iodone' 11602 * side of the calling chain. 11603 */ 11604 bioerror(bp, EIO); 11605 bp->b_resid = bp->b_bcount; 11606 11607 SD_BEGIN_IODONE(index, un, bp); 11608 11609 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11610 return; 11611 } 11612 11613 SD_NEXT_IOSTART(index, un, bp); 11614 11615 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11616 } 11617 11618 11619 /* 11620 * Function: sd_pm_iodone 11621 * 11622 * Description: iodone-side routine for power mangement. 11623 * 11624 * Context: may be called from interrupt context 11625 */ 11626 11627 static void 11628 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11629 { 11630 ASSERT(un != NULL); 11631 ASSERT(bp != NULL); 11632 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11633 11634 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11635 11636 /* 11637 * After attach the following flag is only read, so don't 11638 * take the penalty of acquiring a mutex for it. 11639 */ 11640 if (un->un_f_pm_is_enabled == TRUE) { 11641 sd_pm_exit(un); 11642 } 11643 11644 SD_NEXT_IODONE(index, un, bp); 11645 11646 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11647 } 11648 11649 11650 /* 11651 * Function: sd_core_iostart 11652 * 11653 * Description: Primary driver function for enqueuing buf(9S) structs from 11654 * the system and initiating IO to the target device 11655 * 11656 * Context: Kernel thread context. Can sleep. 11657 * 11658 * Assumptions: - The given xp->xb_blkno is absolute 11659 * (ie, relative to the start of the device). 11660 * - The IO is to be done using the native blocksize of 11661 * the device, as specified in un->un_tgt_blocksize. 11662 */ 11663 /* ARGSUSED */ 11664 static void 11665 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11666 { 11667 struct sd_xbuf *xp; 11668 11669 ASSERT(un != NULL); 11670 ASSERT(bp != NULL); 11671 ASSERT(!mutex_owned(SD_MUTEX(un))); 11672 ASSERT(bp->b_resid == 0); 11673 11674 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11675 11676 xp = SD_GET_XBUF(bp); 11677 ASSERT(xp != NULL); 11678 11679 mutex_enter(SD_MUTEX(un)); 11680 11681 /* 11682 * If we are currently in the failfast state, fail any new IO 11683 * that has B_FAILFAST set, then return. 11684 */ 11685 if ((bp->b_flags & B_FAILFAST) && 11686 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11687 mutex_exit(SD_MUTEX(un)); 11688 bioerror(bp, EIO); 11689 bp->b_resid = bp->b_bcount; 11690 SD_BEGIN_IODONE(index, un, bp); 11691 return; 11692 } 11693 11694 if (SD_IS_DIRECT_PRIORITY(xp)) { 11695 /* 11696 * Priority command -- transport it immediately. 11697 * 11698 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11699 * because all direct priority commands should be associated 11700 * with error recovery actions which we don't want to retry. 11701 */ 11702 sd_start_cmds(un, bp); 11703 } else { 11704 /* 11705 * Normal command -- add it to the wait queue, then start 11706 * transporting commands from the wait queue. 11707 */ 11708 sd_add_buf_to_waitq(un, bp); 11709 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11710 sd_start_cmds(un, NULL); 11711 } 11712 11713 mutex_exit(SD_MUTEX(un)); 11714 11715 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11716 } 11717 11718 11719 /* 11720 * Function: sd_init_cdb_limits 11721 * 11722 * Description: This is to handle scsi_pkt initialization differences 11723 * between the driver platforms. 11724 * 11725 * Legacy behaviors: 11726 * 11727 * If the block number or the sector count exceeds the 11728 * capabilities of a Group 0 command, shift over to a 11729 * Group 1 command. We don't blindly use Group 1 11730 * commands because a) some drives (CDC Wren IVs) get a 11731 * bit confused, and b) there is probably a fair amount 11732 * of speed difference for a target to receive and decode 11733 * a 10 byte command instead of a 6 byte command. 11734 * 11735 * The xfer time difference of 6 vs 10 byte CDBs is 11736 * still significant so this code is still worthwhile. 11737 * 10 byte CDBs are very inefficient with the fas HBA driver 11738 * and older disks. Each CDB byte took 1 usec with some 11739 * popular disks. 11740 * 11741 * Context: Must be called at attach time 11742 */ 11743 11744 static void 11745 sd_init_cdb_limits(struct sd_lun *un) 11746 { 11747 int hba_cdb_limit; 11748 11749 /* 11750 * Use CDB_GROUP1 commands for most devices except for 11751 * parallel SCSI fixed drives in which case we get better 11752 * performance using CDB_GROUP0 commands (where applicable). 11753 */ 11754 un->un_mincdb = SD_CDB_GROUP1; 11755 #if !defined(__fibre) 11756 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11757 !un->un_f_has_removable_media) { 11758 un->un_mincdb = SD_CDB_GROUP0; 11759 } 11760 #endif 11761 11762 /* 11763 * Try to read the max-cdb-length supported by HBA. 11764 */ 11765 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11766 if (0 >= un->un_max_hba_cdb) { 11767 un->un_max_hba_cdb = CDB_GROUP4; 11768 hba_cdb_limit = SD_CDB_GROUP4; 11769 } else if (0 < un->un_max_hba_cdb && 11770 un->un_max_hba_cdb < CDB_GROUP1) { 11771 hba_cdb_limit = SD_CDB_GROUP0; 11772 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11773 un->un_max_hba_cdb < CDB_GROUP5) { 11774 hba_cdb_limit = SD_CDB_GROUP1; 11775 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11776 un->un_max_hba_cdb < CDB_GROUP4) { 11777 hba_cdb_limit = SD_CDB_GROUP5; 11778 } else { 11779 hba_cdb_limit = SD_CDB_GROUP4; 11780 } 11781 11782 /* 11783 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11784 * commands for fixed disks unless we are building for a 32 bit 11785 * kernel. 11786 */ 11787 #ifdef _LP64 11788 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11789 min(hba_cdb_limit, SD_CDB_GROUP4); 11790 #else 11791 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11792 min(hba_cdb_limit, SD_CDB_GROUP1); 11793 #endif 11794 11795 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11796 ? sizeof (struct scsi_arq_status) : 1); 11797 un->un_cmd_timeout = (ushort_t)sd_io_time; 11798 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11799 } 11800 11801 11802 /* 11803 * Function: sd_initpkt_for_buf 11804 * 11805 * Description: Allocate and initialize for transport a scsi_pkt struct, 11806 * based upon the info specified in the given buf struct. 11807 * 11808 * Assumes the xb_blkno in the request is absolute (ie, 11809 * relative to the start of the device (NOT partition!). 11810 * Also assumes that the request is using the native block 11811 * size of the device (as returned by the READ CAPACITY 11812 * command). 11813 * 11814 * Return Code: SD_PKT_ALLOC_SUCCESS 11815 * SD_PKT_ALLOC_FAILURE 11816 * SD_PKT_ALLOC_FAILURE_NO_DMA 11817 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11818 * 11819 * Context: Kernel thread and may be called from software interrupt context 11820 * as part of a sdrunout callback. This function may not block or 11821 * call routines that block 11822 */ 11823 11824 static int 11825 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11826 { 11827 struct sd_xbuf *xp; 11828 struct scsi_pkt *pktp = NULL; 11829 struct sd_lun *un; 11830 size_t blockcount; 11831 daddr_t startblock; 11832 int rval; 11833 int cmd_flags; 11834 11835 ASSERT(bp != NULL); 11836 ASSERT(pktpp != NULL); 11837 xp = SD_GET_XBUF(bp); 11838 ASSERT(xp != NULL); 11839 un = SD_GET_UN(bp); 11840 ASSERT(un != NULL); 11841 ASSERT(mutex_owned(SD_MUTEX(un))); 11842 ASSERT(bp->b_resid == 0); 11843 11844 SD_TRACE(SD_LOG_IO_CORE, un, 11845 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11846 11847 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11848 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11849 /* 11850 * Already have a scsi_pkt -- just need DMA resources. 11851 * We must recompute the CDB in case the mapping returns 11852 * a nonzero pkt_resid. 11853 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11854 * that is being retried, the unmap/remap of the DMA resouces 11855 * will result in the entire transfer starting over again 11856 * from the very first block. 11857 */ 11858 ASSERT(xp->xb_pktp != NULL); 11859 pktp = xp->xb_pktp; 11860 } else { 11861 pktp = NULL; 11862 } 11863 #endif /* __i386 || __amd64 */ 11864 11865 startblock = xp->xb_blkno; /* Absolute block num. */ 11866 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11867 11868 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11869 11870 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11871 11872 #else 11873 11874 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11875 11876 #endif 11877 11878 /* 11879 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11880 * call scsi_init_pkt, and build the CDB. 11881 */ 11882 rval = sd_setup_rw_pkt(un, &pktp, bp, 11883 cmd_flags, sdrunout, (caddr_t)un, 11884 startblock, blockcount); 11885 11886 if (rval == 0) { 11887 /* 11888 * Success. 11889 * 11890 * If partial DMA is being used and required for this transfer. 11891 * set it up here. 11892 */ 11893 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11894 (pktp->pkt_resid != 0)) { 11895 11896 /* 11897 * Save the CDB length and pkt_resid for the 11898 * next xfer 11899 */ 11900 xp->xb_dma_resid = pktp->pkt_resid; 11901 11902 /* rezero resid */ 11903 pktp->pkt_resid = 0; 11904 11905 } else { 11906 xp->xb_dma_resid = 0; 11907 } 11908 11909 pktp->pkt_flags = un->un_tagflags; 11910 pktp->pkt_time = un->un_cmd_timeout; 11911 pktp->pkt_comp = sdintr; 11912 11913 pktp->pkt_private = bp; 11914 *pktpp = pktp; 11915 11916 SD_TRACE(SD_LOG_IO_CORE, un, 11917 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11918 11919 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11920 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11921 #endif 11922 11923 return (SD_PKT_ALLOC_SUCCESS); 11924 11925 } 11926 11927 /* 11928 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11929 * from sd_setup_rw_pkt. 11930 */ 11931 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11932 11933 if (rval == SD_PKT_ALLOC_FAILURE) { 11934 *pktpp = NULL; 11935 /* 11936 * Set the driver state to RWAIT to indicate the driver 11937 * is waiting on resource allocations. The driver will not 11938 * suspend, pm_suspend, or detatch while the state is RWAIT. 11939 */ 11940 New_state(un, SD_STATE_RWAIT); 11941 11942 SD_ERROR(SD_LOG_IO_CORE, un, 11943 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11944 11945 if ((bp->b_flags & B_ERROR) != 0) { 11946 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11947 } 11948 return (SD_PKT_ALLOC_FAILURE); 11949 } else { 11950 /* 11951 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11952 * 11953 * This should never happen. Maybe someone messed with the 11954 * kernel's minphys? 11955 */ 11956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11957 "Request rejected: too large for CDB: " 11958 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11959 SD_ERROR(SD_LOG_IO_CORE, un, 11960 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11961 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11962 11963 } 11964 } 11965 11966 11967 /* 11968 * Function: sd_destroypkt_for_buf 11969 * 11970 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11971 * 11972 * Context: Kernel thread or interrupt context 11973 */ 11974 11975 static void 11976 sd_destroypkt_for_buf(struct buf *bp) 11977 { 11978 ASSERT(bp != NULL); 11979 ASSERT(SD_GET_UN(bp) != NULL); 11980 11981 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11982 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11983 11984 ASSERT(SD_GET_PKTP(bp) != NULL); 11985 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11986 11987 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11988 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11989 } 11990 11991 /* 11992 * Function: sd_setup_rw_pkt 11993 * 11994 * Description: Determines appropriate CDB group for the requested LBA 11995 * and transfer length, calls scsi_init_pkt, and builds 11996 * the CDB. Do not use for partial DMA transfers except 11997 * for the initial transfer since the CDB size must 11998 * remain constant. 11999 * 12000 * Context: Kernel thread and may be called from software interrupt 12001 * context as part of a sdrunout callback. This function may not 12002 * block or call routines that block 12003 */ 12004 12005 12006 int 12007 sd_setup_rw_pkt(struct sd_lun *un, 12008 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12009 int (*callback)(caddr_t), caddr_t callback_arg, 12010 diskaddr_t lba, uint32_t blockcount) 12011 { 12012 struct scsi_pkt *return_pktp; 12013 union scsi_cdb *cdbp; 12014 struct sd_cdbinfo *cp = NULL; 12015 int i; 12016 12017 /* 12018 * See which size CDB to use, based upon the request. 12019 */ 12020 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12021 12022 /* 12023 * Check lba and block count against sd_cdbtab limits. 12024 * In the partial DMA case, we have to use the same size 12025 * CDB for all the transfers. Check lba + blockcount 12026 * against the max LBA so we know that segment of the 12027 * transfer can use the CDB we select. 12028 */ 12029 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12030 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12031 12032 /* 12033 * The command will fit into the CDB type 12034 * specified by sd_cdbtab[i]. 12035 */ 12036 cp = sd_cdbtab + i; 12037 12038 /* 12039 * Call scsi_init_pkt so we can fill in the 12040 * CDB. 12041 */ 12042 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12043 bp, cp->sc_grpcode, un->un_status_len, 0, 12044 flags, callback, callback_arg); 12045 12046 if (return_pktp != NULL) { 12047 12048 /* 12049 * Return new value of pkt 12050 */ 12051 *pktpp = return_pktp; 12052 12053 /* 12054 * To be safe, zero the CDB insuring there is 12055 * no leftover data from a previous command. 12056 */ 12057 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12058 12059 /* 12060 * Handle partial DMA mapping 12061 */ 12062 if (return_pktp->pkt_resid != 0) { 12063 12064 /* 12065 * Not going to xfer as many blocks as 12066 * originally expected 12067 */ 12068 blockcount -= 12069 SD_BYTES2TGTBLOCKS(un, 12070 return_pktp->pkt_resid); 12071 } 12072 12073 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12074 12075 /* 12076 * Set command byte based on the CDB 12077 * type we matched. 12078 */ 12079 cdbp->scc_cmd = cp->sc_grpmask | 12080 ((bp->b_flags & B_READ) ? 12081 SCMD_READ : SCMD_WRITE); 12082 12083 SD_FILL_SCSI1_LUN(un, return_pktp); 12084 12085 /* 12086 * Fill in LBA and length 12087 */ 12088 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12089 (cp->sc_grpcode == CDB_GROUP4) || 12090 (cp->sc_grpcode == CDB_GROUP0) || 12091 (cp->sc_grpcode == CDB_GROUP5)); 12092 12093 if (cp->sc_grpcode == CDB_GROUP1) { 12094 FORMG1ADDR(cdbp, lba); 12095 FORMG1COUNT(cdbp, blockcount); 12096 return (0); 12097 } else if (cp->sc_grpcode == CDB_GROUP4) { 12098 FORMG4LONGADDR(cdbp, lba); 12099 FORMG4COUNT(cdbp, blockcount); 12100 return (0); 12101 } else if (cp->sc_grpcode == CDB_GROUP0) { 12102 FORMG0ADDR(cdbp, lba); 12103 FORMG0COUNT(cdbp, blockcount); 12104 return (0); 12105 } else if (cp->sc_grpcode == CDB_GROUP5) { 12106 FORMG5ADDR(cdbp, lba); 12107 FORMG5COUNT(cdbp, blockcount); 12108 return (0); 12109 } 12110 12111 /* 12112 * It should be impossible to not match one 12113 * of the CDB types above, so we should never 12114 * reach this point. Set the CDB command byte 12115 * to test-unit-ready to avoid writing 12116 * to somewhere we don't intend. 12117 */ 12118 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12119 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12120 } else { 12121 /* 12122 * Couldn't get scsi_pkt 12123 */ 12124 return (SD_PKT_ALLOC_FAILURE); 12125 } 12126 } 12127 } 12128 12129 /* 12130 * None of the available CDB types were suitable. This really 12131 * should never happen: on a 64 bit system we support 12132 * READ16/WRITE16 which will hold an entire 64 bit disk address 12133 * and on a 32 bit system we will refuse to bind to a device 12134 * larger than 2TB so addresses will never be larger than 32 bits. 12135 */ 12136 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12137 } 12138 12139 /* 12140 * Function: sd_setup_next_rw_pkt 12141 * 12142 * Description: Setup packet for partial DMA transfers, except for the 12143 * initial transfer. sd_setup_rw_pkt should be used for 12144 * the initial transfer. 12145 * 12146 * Context: Kernel thread and may be called from interrupt context. 12147 */ 12148 12149 int 12150 sd_setup_next_rw_pkt(struct sd_lun *un, 12151 struct scsi_pkt *pktp, struct buf *bp, 12152 diskaddr_t lba, uint32_t blockcount) 12153 { 12154 uchar_t com; 12155 union scsi_cdb *cdbp; 12156 uchar_t cdb_group_id; 12157 12158 ASSERT(pktp != NULL); 12159 ASSERT(pktp->pkt_cdbp != NULL); 12160 12161 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12162 com = cdbp->scc_cmd; 12163 cdb_group_id = CDB_GROUPID(com); 12164 12165 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12166 (cdb_group_id == CDB_GROUPID_1) || 12167 (cdb_group_id == CDB_GROUPID_4) || 12168 (cdb_group_id == CDB_GROUPID_5)); 12169 12170 /* 12171 * Move pkt to the next portion of the xfer. 12172 * func is NULL_FUNC so we do not have to release 12173 * the disk mutex here. 12174 */ 12175 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12176 NULL_FUNC, NULL) == pktp) { 12177 /* Success. Handle partial DMA */ 12178 if (pktp->pkt_resid != 0) { 12179 blockcount -= 12180 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12181 } 12182 12183 cdbp->scc_cmd = com; 12184 SD_FILL_SCSI1_LUN(un, pktp); 12185 if (cdb_group_id == CDB_GROUPID_1) { 12186 FORMG1ADDR(cdbp, lba); 12187 FORMG1COUNT(cdbp, blockcount); 12188 return (0); 12189 } else if (cdb_group_id == CDB_GROUPID_4) { 12190 FORMG4LONGADDR(cdbp, lba); 12191 FORMG4COUNT(cdbp, blockcount); 12192 return (0); 12193 } else if (cdb_group_id == CDB_GROUPID_0) { 12194 FORMG0ADDR(cdbp, lba); 12195 FORMG0COUNT(cdbp, blockcount); 12196 return (0); 12197 } else if (cdb_group_id == CDB_GROUPID_5) { 12198 FORMG5ADDR(cdbp, lba); 12199 FORMG5COUNT(cdbp, blockcount); 12200 return (0); 12201 } 12202 12203 /* Unreachable */ 12204 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12205 } 12206 12207 /* 12208 * Error setting up next portion of cmd transfer. 12209 * Something is definitely very wrong and this 12210 * should not happen. 12211 */ 12212 return (SD_PKT_ALLOC_FAILURE); 12213 } 12214 12215 /* 12216 * Function: sd_initpkt_for_uscsi 12217 * 12218 * Description: Allocate and initialize for transport a scsi_pkt struct, 12219 * based upon the info specified in the given uscsi_cmd struct. 12220 * 12221 * Return Code: SD_PKT_ALLOC_SUCCESS 12222 * SD_PKT_ALLOC_FAILURE 12223 * SD_PKT_ALLOC_FAILURE_NO_DMA 12224 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12225 * 12226 * Context: Kernel thread and may be called from software interrupt context 12227 * as part of a sdrunout callback. This function may not block or 12228 * call routines that block 12229 */ 12230 12231 static int 12232 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12233 { 12234 struct uscsi_cmd *uscmd; 12235 struct sd_xbuf *xp; 12236 struct scsi_pkt *pktp; 12237 struct sd_lun *un; 12238 uint32_t flags = 0; 12239 12240 ASSERT(bp != NULL); 12241 ASSERT(pktpp != NULL); 12242 xp = SD_GET_XBUF(bp); 12243 ASSERT(xp != NULL); 12244 un = SD_GET_UN(bp); 12245 ASSERT(un != NULL); 12246 ASSERT(mutex_owned(SD_MUTEX(un))); 12247 12248 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12249 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12250 ASSERT(uscmd != NULL); 12251 12252 SD_TRACE(SD_LOG_IO_CORE, un, 12253 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12254 12255 /* 12256 * Allocate the scsi_pkt for the command. 12257 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12258 * during scsi_init_pkt time and will continue to use the 12259 * same path as long as the same scsi_pkt is used without 12260 * intervening scsi_dma_free(). Since uscsi command does 12261 * not call scsi_dmafree() before retry failed command, it 12262 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12263 * set such that scsi_vhci can use other available path for 12264 * retry. Besides, ucsci command does not allow DMA breakup, 12265 * so there is no need to set PKT_DMA_PARTIAL flag. 12266 */ 12267 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12268 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12269 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12270 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12271 - sizeof (struct scsi_extended_sense)), 0, 12272 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12273 sdrunout, (caddr_t)un); 12274 } else { 12275 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12276 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12277 sizeof (struct scsi_arq_status), 0, 12278 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12279 sdrunout, (caddr_t)un); 12280 } 12281 12282 if (pktp == NULL) { 12283 *pktpp = NULL; 12284 /* 12285 * Set the driver state to RWAIT to indicate the driver 12286 * is waiting on resource allocations. The driver will not 12287 * suspend, pm_suspend, or detatch while the state is RWAIT. 12288 */ 12289 New_state(un, SD_STATE_RWAIT); 12290 12291 SD_ERROR(SD_LOG_IO_CORE, un, 12292 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12293 12294 if ((bp->b_flags & B_ERROR) != 0) { 12295 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12296 } 12297 return (SD_PKT_ALLOC_FAILURE); 12298 } 12299 12300 /* 12301 * We do not do DMA breakup for USCSI commands, so return failure 12302 * here if all the needed DMA resources were not allocated. 12303 */ 12304 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12305 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12306 scsi_destroy_pkt(pktp); 12307 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12308 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12309 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12310 } 12311 12312 /* Init the cdb from the given uscsi struct */ 12313 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12314 uscmd->uscsi_cdb[0], 0, 0, 0); 12315 12316 SD_FILL_SCSI1_LUN(un, pktp); 12317 12318 /* 12319 * Set up the optional USCSI flags. See the uscsi (7I) man page 12320 * for listing of the supported flags. 12321 */ 12322 12323 if (uscmd->uscsi_flags & USCSI_SILENT) { 12324 flags |= FLAG_SILENT; 12325 } 12326 12327 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12328 flags |= FLAG_DIAGNOSE; 12329 } 12330 12331 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12332 flags |= FLAG_ISOLATE; 12333 } 12334 12335 if (un->un_f_is_fibre == FALSE) { 12336 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12337 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12338 } 12339 } 12340 12341 /* 12342 * Set the pkt flags here so we save time later. 12343 * Note: These flags are NOT in the uscsi man page!!! 12344 */ 12345 if (uscmd->uscsi_flags & USCSI_HEAD) { 12346 flags |= FLAG_HEAD; 12347 } 12348 12349 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12350 flags |= FLAG_NOINTR; 12351 } 12352 12353 /* 12354 * For tagged queueing, things get a bit complicated. 12355 * Check first for head of queue and last for ordered queue. 12356 * If neither head nor order, use the default driver tag flags. 12357 */ 12358 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12359 if (uscmd->uscsi_flags & USCSI_HTAG) { 12360 flags |= FLAG_HTAG; 12361 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12362 flags |= FLAG_OTAG; 12363 } else { 12364 flags |= un->un_tagflags & FLAG_TAGMASK; 12365 } 12366 } 12367 12368 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12369 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12370 } 12371 12372 pktp->pkt_flags = flags; 12373 12374 /* Transfer uscsi information to scsi_pkt */ 12375 (void) scsi_uscsi_pktinit(uscmd, pktp); 12376 12377 /* Copy the caller's CDB into the pkt... */ 12378 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12379 12380 if (uscmd->uscsi_timeout == 0) { 12381 pktp->pkt_time = un->un_uscsi_timeout; 12382 } else { 12383 pktp->pkt_time = uscmd->uscsi_timeout; 12384 } 12385 12386 /* need it later to identify USCSI request in sdintr */ 12387 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12388 12389 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12390 12391 pktp->pkt_private = bp; 12392 pktp->pkt_comp = sdintr; 12393 *pktpp = pktp; 12394 12395 SD_TRACE(SD_LOG_IO_CORE, un, 12396 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12397 12398 return (SD_PKT_ALLOC_SUCCESS); 12399 } 12400 12401 12402 /* 12403 * Function: sd_destroypkt_for_uscsi 12404 * 12405 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12406 * IOs.. Also saves relevant info into the associated uscsi_cmd 12407 * struct. 12408 * 12409 * Context: May be called under interrupt context 12410 */ 12411 12412 static void 12413 sd_destroypkt_for_uscsi(struct buf *bp) 12414 { 12415 struct uscsi_cmd *uscmd; 12416 struct sd_xbuf *xp; 12417 struct scsi_pkt *pktp; 12418 struct sd_lun *un; 12419 12420 ASSERT(bp != NULL); 12421 xp = SD_GET_XBUF(bp); 12422 ASSERT(xp != NULL); 12423 un = SD_GET_UN(bp); 12424 ASSERT(un != NULL); 12425 ASSERT(!mutex_owned(SD_MUTEX(un))); 12426 pktp = SD_GET_PKTP(bp); 12427 ASSERT(pktp != NULL); 12428 12429 SD_TRACE(SD_LOG_IO_CORE, un, 12430 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12431 12432 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12433 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12434 ASSERT(uscmd != NULL); 12435 12436 /* Save the status and the residual into the uscsi_cmd struct */ 12437 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12438 uscmd->uscsi_resid = bp->b_resid; 12439 12440 /* Transfer scsi_pkt information to uscsi */ 12441 (void) scsi_uscsi_pktfini(pktp, uscmd); 12442 12443 /* 12444 * If enabled, copy any saved sense data into the area specified 12445 * by the uscsi command. 12446 */ 12447 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12448 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12449 /* 12450 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12451 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12452 */ 12453 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12454 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12455 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12456 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12457 MAX_SENSE_LENGTH); 12458 } else { 12459 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12460 SENSE_LENGTH); 12461 } 12462 } 12463 12464 /* We are done with the scsi_pkt; free it now */ 12465 ASSERT(SD_GET_PKTP(bp) != NULL); 12466 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12467 12468 SD_TRACE(SD_LOG_IO_CORE, un, 12469 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12470 } 12471 12472 12473 /* 12474 * Function: sd_bioclone_alloc 12475 * 12476 * Description: Allocate a buf(9S) and init it as per the given buf 12477 * and the various arguments. The associated sd_xbuf 12478 * struct is (nearly) duplicated. The struct buf *bp 12479 * argument is saved in new_xp->xb_private. 12480 * 12481 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12482 * datalen - size of data area for the shadow bp 12483 * blkno - starting LBA 12484 * func - function pointer for b_iodone in the shadow buf. (May 12485 * be NULL if none.) 12486 * 12487 * Return Code: Pointer to allocates buf(9S) struct 12488 * 12489 * Context: Can sleep. 12490 */ 12491 12492 static struct buf * 12493 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12494 daddr_t blkno, int (*func)(struct buf *)) 12495 { 12496 struct sd_lun *un; 12497 struct sd_xbuf *xp; 12498 struct sd_xbuf *new_xp; 12499 struct buf *new_bp; 12500 12501 ASSERT(bp != NULL); 12502 xp = SD_GET_XBUF(bp); 12503 ASSERT(xp != NULL); 12504 un = SD_GET_UN(bp); 12505 ASSERT(un != NULL); 12506 ASSERT(!mutex_owned(SD_MUTEX(un))); 12507 12508 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12509 NULL, KM_SLEEP); 12510 12511 new_bp->b_lblkno = blkno; 12512 12513 /* 12514 * Allocate an xbuf for the shadow bp and copy the contents of the 12515 * original xbuf into it. 12516 */ 12517 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12518 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12519 12520 /* 12521 * The given bp is automatically saved in the xb_private member 12522 * of the new xbuf. Callers are allowed to depend on this. 12523 */ 12524 new_xp->xb_private = bp; 12525 12526 new_bp->b_private = new_xp; 12527 12528 return (new_bp); 12529 } 12530 12531 /* 12532 * Function: sd_shadow_buf_alloc 12533 * 12534 * Description: Allocate a buf(9S) and init it as per the given buf 12535 * and the various arguments. The associated sd_xbuf 12536 * struct is (nearly) duplicated. The struct buf *bp 12537 * argument is saved in new_xp->xb_private. 12538 * 12539 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12540 * datalen - size of data area for the shadow bp 12541 * bflags - B_READ or B_WRITE (pseudo flag) 12542 * blkno - starting LBA 12543 * func - function pointer for b_iodone in the shadow buf. (May 12544 * be NULL if none.) 12545 * 12546 * Return Code: Pointer to allocates buf(9S) struct 12547 * 12548 * Context: Can sleep. 12549 */ 12550 12551 static struct buf * 12552 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12553 daddr_t blkno, int (*func)(struct buf *)) 12554 { 12555 struct sd_lun *un; 12556 struct sd_xbuf *xp; 12557 struct sd_xbuf *new_xp; 12558 struct buf *new_bp; 12559 12560 ASSERT(bp != NULL); 12561 xp = SD_GET_XBUF(bp); 12562 ASSERT(xp != NULL); 12563 un = SD_GET_UN(bp); 12564 ASSERT(un != NULL); 12565 ASSERT(!mutex_owned(SD_MUTEX(un))); 12566 12567 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12568 bp_mapin(bp); 12569 } 12570 12571 bflags &= (B_READ | B_WRITE); 12572 #if defined(__i386) || defined(__amd64) 12573 new_bp = getrbuf(KM_SLEEP); 12574 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12575 new_bp->b_bcount = datalen; 12576 new_bp->b_flags = bflags | 12577 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12578 #else 12579 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12580 datalen, bflags, SLEEP_FUNC, NULL); 12581 #endif 12582 new_bp->av_forw = NULL; 12583 new_bp->av_back = NULL; 12584 new_bp->b_dev = bp->b_dev; 12585 new_bp->b_blkno = blkno; 12586 new_bp->b_iodone = func; 12587 new_bp->b_edev = bp->b_edev; 12588 new_bp->b_resid = 0; 12589 12590 /* We need to preserve the B_FAILFAST flag */ 12591 if (bp->b_flags & B_FAILFAST) { 12592 new_bp->b_flags |= B_FAILFAST; 12593 } 12594 12595 /* 12596 * Allocate an xbuf for the shadow bp and copy the contents of the 12597 * original xbuf into it. 12598 */ 12599 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12600 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12601 12602 /* Need later to copy data between the shadow buf & original buf! */ 12603 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12604 12605 /* 12606 * The given bp is automatically saved in the xb_private member 12607 * of the new xbuf. Callers are allowed to depend on this. 12608 */ 12609 new_xp->xb_private = bp; 12610 12611 new_bp->b_private = new_xp; 12612 12613 return (new_bp); 12614 } 12615 12616 /* 12617 * Function: sd_bioclone_free 12618 * 12619 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12620 * in the larger than partition operation. 12621 * 12622 * Context: May be called under interrupt context 12623 */ 12624 12625 static void 12626 sd_bioclone_free(struct buf *bp) 12627 { 12628 struct sd_xbuf *xp; 12629 12630 ASSERT(bp != NULL); 12631 xp = SD_GET_XBUF(bp); 12632 ASSERT(xp != NULL); 12633 12634 /* 12635 * Call bp_mapout() before freeing the buf, in case a lower 12636 * layer or HBA had done a bp_mapin(). we must do this here 12637 * as we are the "originator" of the shadow buf. 12638 */ 12639 bp_mapout(bp); 12640 12641 /* 12642 * Null out b_iodone before freeing the bp, to ensure that the driver 12643 * never gets confused by a stale value in this field. (Just a little 12644 * extra defensiveness here.) 12645 */ 12646 bp->b_iodone = NULL; 12647 12648 freerbuf(bp); 12649 12650 kmem_free(xp, sizeof (struct sd_xbuf)); 12651 } 12652 12653 /* 12654 * Function: sd_shadow_buf_free 12655 * 12656 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12657 * 12658 * Context: May be called under interrupt context 12659 */ 12660 12661 static void 12662 sd_shadow_buf_free(struct buf *bp) 12663 { 12664 struct sd_xbuf *xp; 12665 12666 ASSERT(bp != NULL); 12667 xp = SD_GET_XBUF(bp); 12668 ASSERT(xp != NULL); 12669 12670 #if defined(__sparc) 12671 /* 12672 * Call bp_mapout() before freeing the buf, in case a lower 12673 * layer or HBA had done a bp_mapin(). we must do this here 12674 * as we are the "originator" of the shadow buf. 12675 */ 12676 bp_mapout(bp); 12677 #endif 12678 12679 /* 12680 * Null out b_iodone before freeing the bp, to ensure that the driver 12681 * never gets confused by a stale value in this field. (Just a little 12682 * extra defensiveness here.) 12683 */ 12684 bp->b_iodone = NULL; 12685 12686 #if defined(__i386) || defined(__amd64) 12687 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12688 freerbuf(bp); 12689 #else 12690 scsi_free_consistent_buf(bp); 12691 #endif 12692 12693 kmem_free(xp, sizeof (struct sd_xbuf)); 12694 } 12695 12696 12697 /* 12698 * Function: sd_print_transport_rejected_message 12699 * 12700 * Description: This implements the ludicrously complex rules for printing 12701 * a "transport rejected" message. This is to address the 12702 * specific problem of having a flood of this error message 12703 * produced when a failover occurs. 12704 * 12705 * Context: Any. 12706 */ 12707 12708 static void 12709 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12710 int code) 12711 { 12712 ASSERT(un != NULL); 12713 ASSERT(mutex_owned(SD_MUTEX(un))); 12714 ASSERT(xp != NULL); 12715 12716 /* 12717 * Print the "transport rejected" message under the following 12718 * conditions: 12719 * 12720 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12721 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12722 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12723 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12724 * scsi_transport(9F) (which indicates that the target might have 12725 * gone off-line). This uses the un->un_tran_fatal_count 12726 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12727 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12728 * from scsi_transport(). 12729 * 12730 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12731 * the preceeding cases in order for the message to be printed. 12732 */ 12733 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12734 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12735 (code != TRAN_FATAL_ERROR) || 12736 (un->un_tran_fatal_count == 1)) { 12737 switch (code) { 12738 case TRAN_BADPKT: 12739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12740 "transport rejected bad packet\n"); 12741 break; 12742 case TRAN_FATAL_ERROR: 12743 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12744 "transport rejected fatal error\n"); 12745 break; 12746 default: 12747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12748 "transport rejected (%d)\n", code); 12749 break; 12750 } 12751 } 12752 } 12753 } 12754 12755 12756 /* 12757 * Function: sd_add_buf_to_waitq 12758 * 12759 * Description: Add the given buf(9S) struct to the wait queue for the 12760 * instance. If sorting is enabled, then the buf is added 12761 * to the queue via an elevator sort algorithm (a la 12762 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12763 * If sorting is not enabled, then the buf is just added 12764 * to the end of the wait queue. 12765 * 12766 * Return Code: void 12767 * 12768 * Context: Does not sleep/block, therefore technically can be called 12769 * from any context. However if sorting is enabled then the 12770 * execution time is indeterminate, and may take long if 12771 * the wait queue grows large. 12772 */ 12773 12774 static void 12775 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12776 { 12777 struct buf *ap; 12778 12779 ASSERT(bp != NULL); 12780 ASSERT(un != NULL); 12781 ASSERT(mutex_owned(SD_MUTEX(un))); 12782 12783 /* If the queue is empty, add the buf as the only entry & return. */ 12784 if (un->un_waitq_headp == NULL) { 12785 ASSERT(un->un_waitq_tailp == NULL); 12786 un->un_waitq_headp = un->un_waitq_tailp = bp; 12787 bp->av_forw = NULL; 12788 return; 12789 } 12790 12791 ASSERT(un->un_waitq_tailp != NULL); 12792 12793 /* 12794 * If sorting is disabled, just add the buf to the tail end of 12795 * the wait queue and return. 12796 */ 12797 if (un->un_f_disksort_disabled) { 12798 un->un_waitq_tailp->av_forw = bp; 12799 un->un_waitq_tailp = bp; 12800 bp->av_forw = NULL; 12801 return; 12802 } 12803 12804 /* 12805 * Sort thru the list of requests currently on the wait queue 12806 * and add the new buf request at the appropriate position. 12807 * 12808 * The un->un_waitq_headp is an activity chain pointer on which 12809 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12810 * first queue holds those requests which are positioned after 12811 * the current SD_GET_BLKNO() (in the first request); the second holds 12812 * requests which came in after their SD_GET_BLKNO() number was passed. 12813 * Thus we implement a one way scan, retracting after reaching 12814 * the end of the drive to the first request on the second 12815 * queue, at which time it becomes the first queue. 12816 * A one-way scan is natural because of the way UNIX read-ahead 12817 * blocks are allocated. 12818 * 12819 * If we lie after the first request, then we must locate the 12820 * second request list and add ourselves to it. 12821 */ 12822 ap = un->un_waitq_headp; 12823 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12824 while (ap->av_forw != NULL) { 12825 /* 12826 * Look for an "inversion" in the (normally 12827 * ascending) block numbers. This indicates 12828 * the start of the second request list. 12829 */ 12830 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12831 /* 12832 * Search the second request list for the 12833 * first request at a larger block number. 12834 * We go before that; however if there is 12835 * no such request, we go at the end. 12836 */ 12837 do { 12838 if (SD_GET_BLKNO(bp) < 12839 SD_GET_BLKNO(ap->av_forw)) { 12840 goto insert; 12841 } 12842 ap = ap->av_forw; 12843 } while (ap->av_forw != NULL); 12844 goto insert; /* after last */ 12845 } 12846 ap = ap->av_forw; 12847 } 12848 12849 /* 12850 * No inversions... we will go after the last, and 12851 * be the first request in the second request list. 12852 */ 12853 goto insert; 12854 } 12855 12856 /* 12857 * Request is at/after the current request... 12858 * sort in the first request list. 12859 */ 12860 while (ap->av_forw != NULL) { 12861 /* 12862 * We want to go after the current request (1) if 12863 * there is an inversion after it (i.e. it is the end 12864 * of the first request list), or (2) if the next 12865 * request is a larger block no. than our request. 12866 */ 12867 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12868 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12869 goto insert; 12870 } 12871 ap = ap->av_forw; 12872 } 12873 12874 /* 12875 * Neither a second list nor a larger request, therefore 12876 * we go at the end of the first list (which is the same 12877 * as the end of the whole schebang). 12878 */ 12879 insert: 12880 bp->av_forw = ap->av_forw; 12881 ap->av_forw = bp; 12882 12883 /* 12884 * If we inserted onto the tail end of the waitq, make sure the 12885 * tail pointer is updated. 12886 */ 12887 if (ap == un->un_waitq_tailp) { 12888 un->un_waitq_tailp = bp; 12889 } 12890 } 12891 12892 12893 /* 12894 * Function: sd_start_cmds 12895 * 12896 * Description: Remove and transport cmds from the driver queues. 12897 * 12898 * Arguments: un - pointer to the unit (soft state) struct for the target. 12899 * 12900 * immed_bp - ptr to a buf to be transported immediately. Only 12901 * the immed_bp is transported; bufs on the waitq are not 12902 * processed and the un_retry_bp is not checked. If immed_bp is 12903 * NULL, then normal queue processing is performed. 12904 * 12905 * Context: May be called from kernel thread context, interrupt context, 12906 * or runout callback context. This function may not block or 12907 * call routines that block. 12908 */ 12909 12910 static void 12911 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12912 { 12913 struct sd_xbuf *xp; 12914 struct buf *bp; 12915 void (*statp)(kstat_io_t *); 12916 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12917 void (*saved_statp)(kstat_io_t *); 12918 #endif 12919 int rval; 12920 12921 ASSERT(un != NULL); 12922 ASSERT(mutex_owned(SD_MUTEX(un))); 12923 ASSERT(un->un_ncmds_in_transport >= 0); 12924 ASSERT(un->un_throttle >= 0); 12925 12926 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12927 12928 do { 12929 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12930 saved_statp = NULL; 12931 #endif 12932 12933 /* 12934 * If we are syncing or dumping, fail the command to 12935 * avoid recursively calling back into scsi_transport(). 12936 * The dump I/O itself uses a separate code path so this 12937 * only prevents non-dump I/O from being sent while dumping. 12938 * File system sync takes place before dumping begins. 12939 * During panic, filesystem I/O is allowed provided 12940 * un_in_callback is <= 1. This is to prevent recursion 12941 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12942 * sd_start_cmds and so on. See panic.c for more information 12943 * about the states the system can be in during panic. 12944 */ 12945 if ((un->un_state == SD_STATE_DUMPING) || 12946 (ddi_in_panic() && (un->un_in_callback > 1))) { 12947 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12948 "sd_start_cmds: panicking\n"); 12949 goto exit; 12950 } 12951 12952 if ((bp = immed_bp) != NULL) { 12953 /* 12954 * We have a bp that must be transported immediately. 12955 * It's OK to transport the immed_bp here without doing 12956 * the throttle limit check because the immed_bp is 12957 * always used in a retry/recovery case. This means 12958 * that we know we are not at the throttle limit by 12959 * virtue of the fact that to get here we must have 12960 * already gotten a command back via sdintr(). This also 12961 * relies on (1) the command on un_retry_bp preventing 12962 * further commands from the waitq from being issued; 12963 * and (2) the code in sd_retry_command checking the 12964 * throttle limit before issuing a delayed or immediate 12965 * retry. This holds even if the throttle limit is 12966 * currently ratcheted down from its maximum value. 12967 */ 12968 statp = kstat_runq_enter; 12969 if (bp == un->un_retry_bp) { 12970 ASSERT((un->un_retry_statp == NULL) || 12971 (un->un_retry_statp == kstat_waitq_enter) || 12972 (un->un_retry_statp == 12973 kstat_runq_back_to_waitq)); 12974 /* 12975 * If the waitq kstat was incremented when 12976 * sd_set_retry_bp() queued this bp for a retry, 12977 * then we must set up statp so that the waitq 12978 * count will get decremented correctly below. 12979 * Also we must clear un->un_retry_statp to 12980 * ensure that we do not act on a stale value 12981 * in this field. 12982 */ 12983 if ((un->un_retry_statp == kstat_waitq_enter) || 12984 (un->un_retry_statp == 12985 kstat_runq_back_to_waitq)) { 12986 statp = kstat_waitq_to_runq; 12987 } 12988 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12989 saved_statp = un->un_retry_statp; 12990 #endif 12991 un->un_retry_statp = NULL; 12992 12993 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12994 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12995 "un_throttle:%d un_ncmds_in_transport:%d\n", 12996 un, un->un_retry_bp, un->un_throttle, 12997 un->un_ncmds_in_transport); 12998 } else { 12999 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13000 "processing priority bp:0x%p\n", bp); 13001 } 13002 13003 } else if ((bp = un->un_waitq_headp) != NULL) { 13004 /* 13005 * A command on the waitq is ready to go, but do not 13006 * send it if: 13007 * 13008 * (1) the throttle limit has been reached, or 13009 * (2) a retry is pending, or 13010 * (3) a START_STOP_UNIT callback pending, or 13011 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13012 * command is pending. 13013 * 13014 * For all of these conditions, IO processing will 13015 * restart after the condition is cleared. 13016 */ 13017 if (un->un_ncmds_in_transport >= un->un_throttle) { 13018 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13019 "sd_start_cmds: exiting, " 13020 "throttle limit reached!\n"); 13021 goto exit; 13022 } 13023 if (un->un_retry_bp != NULL) { 13024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13025 "sd_start_cmds: exiting, retry pending!\n"); 13026 goto exit; 13027 } 13028 if (un->un_startstop_timeid != NULL) { 13029 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13030 "sd_start_cmds: exiting, " 13031 "START_STOP pending!\n"); 13032 goto exit; 13033 } 13034 if (un->un_direct_priority_timeid != NULL) { 13035 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13036 "sd_start_cmds: exiting, " 13037 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13038 goto exit; 13039 } 13040 13041 /* Dequeue the command */ 13042 un->un_waitq_headp = bp->av_forw; 13043 if (un->un_waitq_headp == NULL) { 13044 un->un_waitq_tailp = NULL; 13045 } 13046 bp->av_forw = NULL; 13047 statp = kstat_waitq_to_runq; 13048 SD_TRACE(SD_LOG_IO_CORE, un, 13049 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13050 13051 } else { 13052 /* No work to do so bail out now */ 13053 SD_TRACE(SD_LOG_IO_CORE, un, 13054 "sd_start_cmds: no more work, exiting!\n"); 13055 goto exit; 13056 } 13057 13058 /* 13059 * Reset the state to normal. This is the mechanism by which 13060 * the state transitions from either SD_STATE_RWAIT or 13061 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13062 * If state is SD_STATE_PM_CHANGING then this command is 13063 * part of the device power control and the state must 13064 * not be put back to normal. Doing so would would 13065 * allow new commands to proceed when they shouldn't, 13066 * the device may be going off. 13067 */ 13068 if ((un->un_state != SD_STATE_SUSPENDED) && 13069 (un->un_state != SD_STATE_PM_CHANGING)) { 13070 New_state(un, SD_STATE_NORMAL); 13071 } 13072 13073 xp = SD_GET_XBUF(bp); 13074 ASSERT(xp != NULL); 13075 13076 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13077 /* 13078 * Allocate the scsi_pkt if we need one, or attach DMA 13079 * resources if we have a scsi_pkt that needs them. The 13080 * latter should only occur for commands that are being 13081 * retried. 13082 */ 13083 if ((xp->xb_pktp == NULL) || 13084 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13085 #else 13086 if (xp->xb_pktp == NULL) { 13087 #endif 13088 /* 13089 * There is no scsi_pkt allocated for this buf. Call 13090 * the initpkt function to allocate & init one. 13091 * 13092 * The scsi_init_pkt runout callback functionality is 13093 * implemented as follows: 13094 * 13095 * 1) The initpkt function always calls 13096 * scsi_init_pkt(9F) with sdrunout specified as the 13097 * callback routine. 13098 * 2) A successful packet allocation is initialized and 13099 * the I/O is transported. 13100 * 3) The I/O associated with an allocation resource 13101 * failure is left on its queue to be retried via 13102 * runout or the next I/O. 13103 * 4) The I/O associated with a DMA error is removed 13104 * from the queue and failed with EIO. Processing of 13105 * the transport queues is also halted to be 13106 * restarted via runout or the next I/O. 13107 * 5) The I/O associated with a CDB size or packet 13108 * size error is removed from the queue and failed 13109 * with EIO. Processing of the transport queues is 13110 * continued. 13111 * 13112 * Note: there is no interface for canceling a runout 13113 * callback. To prevent the driver from detaching or 13114 * suspending while a runout is pending the driver 13115 * state is set to SD_STATE_RWAIT 13116 * 13117 * Note: using the scsi_init_pkt callback facility can 13118 * result in an I/O request persisting at the head of 13119 * the list which cannot be satisfied even after 13120 * multiple retries. In the future the driver may 13121 * implement some kind of maximum runout count before 13122 * failing an I/O. 13123 * 13124 * Note: the use of funcp below may seem superfluous, 13125 * but it helps warlock figure out the correct 13126 * initpkt function calls (see [s]sd.wlcmd). 13127 */ 13128 struct scsi_pkt *pktp; 13129 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13130 13131 ASSERT(bp != un->un_rqs_bp); 13132 13133 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13134 switch ((*funcp)(bp, &pktp)) { 13135 case SD_PKT_ALLOC_SUCCESS: 13136 xp->xb_pktp = pktp; 13137 SD_TRACE(SD_LOG_IO_CORE, un, 13138 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13139 pktp); 13140 goto got_pkt; 13141 13142 case SD_PKT_ALLOC_FAILURE: 13143 /* 13144 * Temporary (hopefully) resource depletion. 13145 * Since retries and RQS commands always have a 13146 * scsi_pkt allocated, these cases should never 13147 * get here. So the only cases this needs to 13148 * handle is a bp from the waitq (which we put 13149 * back onto the waitq for sdrunout), or a bp 13150 * sent as an immed_bp (which we just fail). 13151 */ 13152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13153 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13154 13155 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13156 13157 if (bp == immed_bp) { 13158 /* 13159 * If SD_XB_DMA_FREED is clear, then 13160 * this is a failure to allocate a 13161 * scsi_pkt, and we must fail the 13162 * command. 13163 */ 13164 if ((xp->xb_pkt_flags & 13165 SD_XB_DMA_FREED) == 0) { 13166 break; 13167 } 13168 13169 /* 13170 * If this immediate command is NOT our 13171 * un_retry_bp, then we must fail it. 13172 */ 13173 if (bp != un->un_retry_bp) { 13174 break; 13175 } 13176 13177 /* 13178 * We get here if this cmd is our 13179 * un_retry_bp that was DMAFREED, but 13180 * scsi_init_pkt() failed to reallocate 13181 * DMA resources when we attempted to 13182 * retry it. This can happen when an 13183 * mpxio failover is in progress, but 13184 * we don't want to just fail the 13185 * command in this case. 13186 * 13187 * Use timeout(9F) to restart it after 13188 * a 100ms delay. We don't want to 13189 * let sdrunout() restart it, because 13190 * sdrunout() is just supposed to start 13191 * commands that are sitting on the 13192 * wait queue. The un_retry_bp stays 13193 * set until the command completes, but 13194 * sdrunout can be called many times 13195 * before that happens. Since sdrunout 13196 * cannot tell if the un_retry_bp is 13197 * already in the transport, it could 13198 * end up calling scsi_transport() for 13199 * the un_retry_bp multiple times. 13200 * 13201 * Also: don't schedule the callback 13202 * if some other callback is already 13203 * pending. 13204 */ 13205 if (un->un_retry_statp == NULL) { 13206 /* 13207 * restore the kstat pointer to 13208 * keep kstat counts coherent 13209 * when we do retry the command. 13210 */ 13211 un->un_retry_statp = 13212 saved_statp; 13213 } 13214 13215 if ((un->un_startstop_timeid == NULL) && 13216 (un->un_retry_timeid == NULL) && 13217 (un->un_direct_priority_timeid == 13218 NULL)) { 13219 13220 un->un_retry_timeid = 13221 timeout( 13222 sd_start_retry_command, 13223 un, SD_RESTART_TIMEOUT); 13224 } 13225 goto exit; 13226 } 13227 13228 #else 13229 if (bp == immed_bp) { 13230 break; /* Just fail the command */ 13231 } 13232 #endif 13233 13234 /* Add the buf back to the head of the waitq */ 13235 bp->av_forw = un->un_waitq_headp; 13236 un->un_waitq_headp = bp; 13237 if (un->un_waitq_tailp == NULL) { 13238 un->un_waitq_tailp = bp; 13239 } 13240 goto exit; 13241 13242 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13243 /* 13244 * HBA DMA resource failure. Fail the command 13245 * and continue processing of the queues. 13246 */ 13247 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13248 "sd_start_cmds: " 13249 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13250 break; 13251 13252 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13253 /* 13254 * Note:x86: Partial DMA mapping not supported 13255 * for USCSI commands, and all the needed DMA 13256 * resources were not allocated. 13257 */ 13258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13259 "sd_start_cmds: " 13260 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13261 break; 13262 13263 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13264 /* 13265 * Note:x86: Request cannot fit into CDB based 13266 * on lba and len. 13267 */ 13268 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13269 "sd_start_cmds: " 13270 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13271 break; 13272 13273 default: 13274 /* Should NEVER get here! */ 13275 panic("scsi_initpkt error"); 13276 /*NOTREACHED*/ 13277 } 13278 13279 /* 13280 * Fatal error in allocating a scsi_pkt for this buf. 13281 * Update kstats & return the buf with an error code. 13282 * We must use sd_return_failed_command_no_restart() to 13283 * avoid a recursive call back into sd_start_cmds(). 13284 * However this also means that we must keep processing 13285 * the waitq here in order to avoid stalling. 13286 */ 13287 if (statp == kstat_waitq_to_runq) { 13288 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13289 } 13290 sd_return_failed_command_no_restart(un, bp, EIO); 13291 if (bp == immed_bp) { 13292 /* immed_bp is gone by now, so clear this */ 13293 immed_bp = NULL; 13294 } 13295 continue; 13296 } 13297 got_pkt: 13298 if (bp == immed_bp) { 13299 /* goto the head of the class.... */ 13300 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13301 } 13302 13303 un->un_ncmds_in_transport++; 13304 SD_UPDATE_KSTATS(un, statp, bp); 13305 13306 /* 13307 * Call scsi_transport() to send the command to the target. 13308 * According to SCSA architecture, we must drop the mutex here 13309 * before calling scsi_transport() in order to avoid deadlock. 13310 * Note that the scsi_pkt's completion routine can be executed 13311 * (from interrupt context) even before the call to 13312 * scsi_transport() returns. 13313 */ 13314 SD_TRACE(SD_LOG_IO_CORE, un, 13315 "sd_start_cmds: calling scsi_transport()\n"); 13316 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13317 13318 mutex_exit(SD_MUTEX(un)); 13319 rval = scsi_transport(xp->xb_pktp); 13320 mutex_enter(SD_MUTEX(un)); 13321 13322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13323 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13324 13325 switch (rval) { 13326 case TRAN_ACCEPT: 13327 /* Clear this with every pkt accepted by the HBA */ 13328 un->un_tran_fatal_count = 0; 13329 break; /* Success; try the next cmd (if any) */ 13330 13331 case TRAN_BUSY: 13332 un->un_ncmds_in_transport--; 13333 ASSERT(un->un_ncmds_in_transport >= 0); 13334 13335 /* 13336 * Don't retry request sense, the sense data 13337 * is lost when another request is sent. 13338 * Free up the rqs buf and retry 13339 * the original failed cmd. Update kstat. 13340 */ 13341 if (bp == un->un_rqs_bp) { 13342 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13343 bp = sd_mark_rqs_idle(un, xp); 13344 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13345 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13346 kstat_waitq_enter); 13347 goto exit; 13348 } 13349 13350 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13351 /* 13352 * Free the DMA resources for the scsi_pkt. This will 13353 * allow mpxio to select another path the next time 13354 * we call scsi_transport() with this scsi_pkt. 13355 * See sdintr() for the rationalization behind this. 13356 */ 13357 if ((un->un_f_is_fibre == TRUE) && 13358 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13359 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13360 scsi_dmafree(xp->xb_pktp); 13361 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13362 } 13363 #endif 13364 13365 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13366 /* 13367 * Commands that are SD_PATH_DIRECT_PRIORITY 13368 * are for error recovery situations. These do 13369 * not use the normal command waitq, so if they 13370 * get a TRAN_BUSY we cannot put them back onto 13371 * the waitq for later retry. One possible 13372 * problem is that there could already be some 13373 * other command on un_retry_bp that is waiting 13374 * for this one to complete, so we would be 13375 * deadlocked if we put this command back onto 13376 * the waitq for later retry (since un_retry_bp 13377 * must complete before the driver gets back to 13378 * commands on the waitq). 13379 * 13380 * To avoid deadlock we must schedule a callback 13381 * that will restart this command after a set 13382 * interval. This should keep retrying for as 13383 * long as the underlying transport keeps 13384 * returning TRAN_BUSY (just like for other 13385 * commands). Use the same timeout interval as 13386 * for the ordinary TRAN_BUSY retry. 13387 */ 13388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13389 "sd_start_cmds: scsi_transport() returned " 13390 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13391 13392 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13393 un->un_direct_priority_timeid = 13394 timeout(sd_start_direct_priority_command, 13395 bp, SD_BSY_TIMEOUT / 500); 13396 13397 goto exit; 13398 } 13399 13400 /* 13401 * For TRAN_BUSY, we want to reduce the throttle value, 13402 * unless we are retrying a command. 13403 */ 13404 if (bp != un->un_retry_bp) { 13405 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13406 } 13407 13408 /* 13409 * Set up the bp to be tried again 10 ms later. 13410 * Note:x86: Is there a timeout value in the sd_lun 13411 * for this condition? 13412 */ 13413 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13414 kstat_runq_back_to_waitq); 13415 goto exit; 13416 13417 case TRAN_FATAL_ERROR: 13418 un->un_tran_fatal_count++; 13419 /* FALLTHRU */ 13420 13421 case TRAN_BADPKT: 13422 default: 13423 un->un_ncmds_in_transport--; 13424 ASSERT(un->un_ncmds_in_transport >= 0); 13425 13426 /* 13427 * If this is our REQUEST SENSE command with a 13428 * transport error, we must get back the pointers 13429 * to the original buf, and mark the REQUEST 13430 * SENSE command as "available". 13431 */ 13432 if (bp == un->un_rqs_bp) { 13433 bp = sd_mark_rqs_idle(un, xp); 13434 xp = SD_GET_XBUF(bp); 13435 } else { 13436 /* 13437 * Legacy behavior: do not update transport 13438 * error count for request sense commands. 13439 */ 13440 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13441 } 13442 13443 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13444 sd_print_transport_rejected_message(un, xp, rval); 13445 13446 /* 13447 * We must use sd_return_failed_command_no_restart() to 13448 * avoid a recursive call back into sd_start_cmds(). 13449 * However this also means that we must keep processing 13450 * the waitq here in order to avoid stalling. 13451 */ 13452 sd_return_failed_command_no_restart(un, bp, EIO); 13453 13454 /* 13455 * Notify any threads waiting in sd_ddi_suspend() that 13456 * a command completion has occurred. 13457 */ 13458 if (un->un_state == SD_STATE_SUSPENDED) { 13459 cv_broadcast(&un->un_disk_busy_cv); 13460 } 13461 13462 if (bp == immed_bp) { 13463 /* immed_bp is gone by now, so clear this */ 13464 immed_bp = NULL; 13465 } 13466 break; 13467 } 13468 13469 } while (immed_bp == NULL); 13470 13471 exit: 13472 ASSERT(mutex_owned(SD_MUTEX(un))); 13473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13474 } 13475 13476 13477 /* 13478 * Function: sd_return_command 13479 * 13480 * Description: Returns a command to its originator (with or without an 13481 * error). Also starts commands waiting to be transported 13482 * to the target. 13483 * 13484 * Context: May be called from interrupt, kernel, or timeout context 13485 */ 13486 13487 static void 13488 sd_return_command(struct sd_lun *un, struct buf *bp) 13489 { 13490 struct sd_xbuf *xp; 13491 struct scsi_pkt *pktp; 13492 13493 ASSERT(bp != NULL); 13494 ASSERT(un != NULL); 13495 ASSERT(mutex_owned(SD_MUTEX(un))); 13496 ASSERT(bp != un->un_rqs_bp); 13497 xp = SD_GET_XBUF(bp); 13498 ASSERT(xp != NULL); 13499 13500 pktp = SD_GET_PKTP(bp); 13501 13502 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13503 13504 /* 13505 * Note: check for the "sdrestart failed" case. 13506 */ 13507 if ((un->un_partial_dma_supported == 1) && 13508 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13509 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13510 (xp->xb_pktp->pkt_resid == 0)) { 13511 13512 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13513 /* 13514 * Successfully set up next portion of cmd 13515 * transfer, try sending it 13516 */ 13517 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13518 NULL, NULL, 0, (clock_t)0, NULL); 13519 sd_start_cmds(un, NULL); 13520 return; /* Note:x86: need a return here? */ 13521 } 13522 } 13523 13524 /* 13525 * If this is the failfast bp, clear it from un_failfast_bp. This 13526 * can happen if upon being re-tried the failfast bp either 13527 * succeeded or encountered another error (possibly even a different 13528 * error than the one that precipitated the failfast state, but in 13529 * that case it would have had to exhaust retries as well). Regardless, 13530 * this should not occur whenever the instance is in the active 13531 * failfast state. 13532 */ 13533 if (bp == un->un_failfast_bp) { 13534 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13535 un->un_failfast_bp = NULL; 13536 } 13537 13538 /* 13539 * Clear the failfast state upon successful completion of ANY cmd. 13540 */ 13541 if (bp->b_error == 0) { 13542 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13543 } 13544 13545 /* 13546 * This is used if the command was retried one or more times. Show that 13547 * we are done with it, and allow processing of the waitq to resume. 13548 */ 13549 if (bp == un->un_retry_bp) { 13550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13551 "sd_return_command: un:0x%p: " 13552 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13553 un->un_retry_bp = NULL; 13554 un->un_retry_statp = NULL; 13555 } 13556 13557 SD_UPDATE_RDWR_STATS(un, bp); 13558 SD_UPDATE_PARTITION_STATS(un, bp); 13559 13560 switch (un->un_state) { 13561 case SD_STATE_SUSPENDED: 13562 /* 13563 * Notify any threads waiting in sd_ddi_suspend() that 13564 * a command completion has occurred. 13565 */ 13566 cv_broadcast(&un->un_disk_busy_cv); 13567 break; 13568 default: 13569 sd_start_cmds(un, NULL); 13570 break; 13571 } 13572 13573 /* Return this command up the iodone chain to its originator. */ 13574 mutex_exit(SD_MUTEX(un)); 13575 13576 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13577 xp->xb_pktp = NULL; 13578 13579 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13580 13581 ASSERT(!mutex_owned(SD_MUTEX(un))); 13582 mutex_enter(SD_MUTEX(un)); 13583 13584 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13585 } 13586 13587 13588 /* 13589 * Function: sd_return_failed_command 13590 * 13591 * Description: Command completion when an error occurred. 13592 * 13593 * Context: May be called from interrupt context 13594 */ 13595 13596 static void 13597 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13598 { 13599 ASSERT(bp != NULL); 13600 ASSERT(un != NULL); 13601 ASSERT(mutex_owned(SD_MUTEX(un))); 13602 13603 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13604 "sd_return_failed_command: entry\n"); 13605 13606 /* 13607 * b_resid could already be nonzero due to a partial data 13608 * transfer, so do not change it here. 13609 */ 13610 SD_BIOERROR(bp, errcode); 13611 13612 sd_return_command(un, bp); 13613 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13614 "sd_return_failed_command: exit\n"); 13615 } 13616 13617 13618 /* 13619 * Function: sd_return_failed_command_no_restart 13620 * 13621 * Description: Same as sd_return_failed_command, but ensures that no 13622 * call back into sd_start_cmds will be issued. 13623 * 13624 * Context: May be called from interrupt context 13625 */ 13626 13627 static void 13628 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13629 int errcode) 13630 { 13631 struct sd_xbuf *xp; 13632 13633 ASSERT(bp != NULL); 13634 ASSERT(un != NULL); 13635 ASSERT(mutex_owned(SD_MUTEX(un))); 13636 xp = SD_GET_XBUF(bp); 13637 ASSERT(xp != NULL); 13638 ASSERT(errcode != 0); 13639 13640 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13641 "sd_return_failed_command_no_restart: entry\n"); 13642 13643 /* 13644 * b_resid could already be nonzero due to a partial data 13645 * transfer, so do not change it here. 13646 */ 13647 SD_BIOERROR(bp, errcode); 13648 13649 /* 13650 * If this is the failfast bp, clear it. This can happen if the 13651 * failfast bp encounterd a fatal error when we attempted to 13652 * re-try it (such as a scsi_transport(9F) failure). However 13653 * we should NOT be in an active failfast state if the failfast 13654 * bp is not NULL. 13655 */ 13656 if (bp == un->un_failfast_bp) { 13657 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13658 un->un_failfast_bp = NULL; 13659 } 13660 13661 if (bp == un->un_retry_bp) { 13662 /* 13663 * This command was retried one or more times. Show that we are 13664 * done with it, and allow processing of the waitq to resume. 13665 */ 13666 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13667 "sd_return_failed_command_no_restart: " 13668 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13669 un->un_retry_bp = NULL; 13670 un->un_retry_statp = NULL; 13671 } 13672 13673 SD_UPDATE_RDWR_STATS(un, bp); 13674 SD_UPDATE_PARTITION_STATS(un, bp); 13675 13676 mutex_exit(SD_MUTEX(un)); 13677 13678 if (xp->xb_pktp != NULL) { 13679 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13680 xp->xb_pktp = NULL; 13681 } 13682 13683 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13684 13685 mutex_enter(SD_MUTEX(un)); 13686 13687 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13688 "sd_return_failed_command_no_restart: exit\n"); 13689 } 13690 13691 13692 /* 13693 * Function: sd_retry_command 13694 * 13695 * Description: queue up a command for retry, or (optionally) fail it 13696 * if retry counts are exhausted. 13697 * 13698 * Arguments: un - Pointer to the sd_lun struct for the target. 13699 * 13700 * bp - Pointer to the buf for the command to be retried. 13701 * 13702 * retry_check_flag - Flag to see which (if any) of the retry 13703 * counts should be decremented/checked. If the indicated 13704 * retry count is exhausted, then the command will not be 13705 * retried; it will be failed instead. This should use a 13706 * value equal to one of the following: 13707 * 13708 * SD_RETRIES_NOCHECK 13709 * SD_RESD_RETRIES_STANDARD 13710 * SD_RETRIES_VICTIM 13711 * 13712 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13713 * if the check should be made to see of FLAG_ISOLATE is set 13714 * in the pkt. If FLAG_ISOLATE is set, then the command is 13715 * not retried, it is simply failed. 13716 * 13717 * user_funcp - Ptr to function to call before dispatching the 13718 * command. May be NULL if no action needs to be performed. 13719 * (Primarily intended for printing messages.) 13720 * 13721 * user_arg - Optional argument to be passed along to 13722 * the user_funcp call. 13723 * 13724 * failure_code - errno return code to set in the bp if the 13725 * command is going to be failed. 13726 * 13727 * retry_delay - Retry delay interval in (clock_t) units. May 13728 * be zero which indicates that the retry should be retried 13729 * immediately (ie, without an intervening delay). 13730 * 13731 * statp - Ptr to kstat function to be updated if the command 13732 * is queued for a delayed retry. May be NULL if no kstat 13733 * update is desired. 13734 * 13735 * Context: May be called from interrupt context. 13736 */ 13737 13738 static void 13739 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13740 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13741 code), void *user_arg, int failure_code, clock_t retry_delay, 13742 void (*statp)(kstat_io_t *)) 13743 { 13744 struct sd_xbuf *xp; 13745 struct scsi_pkt *pktp; 13746 13747 ASSERT(un != NULL); 13748 ASSERT(mutex_owned(SD_MUTEX(un))); 13749 ASSERT(bp != NULL); 13750 xp = SD_GET_XBUF(bp); 13751 ASSERT(xp != NULL); 13752 pktp = SD_GET_PKTP(bp); 13753 ASSERT(pktp != NULL); 13754 13755 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13756 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13757 13758 /* 13759 * If we are syncing or dumping, fail the command to avoid 13760 * recursively calling back into scsi_transport(). 13761 */ 13762 if (ddi_in_panic()) { 13763 goto fail_command_no_log; 13764 } 13765 13766 /* 13767 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13768 * log an error and fail the command. 13769 */ 13770 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13771 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13772 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13773 sd_dump_memory(un, SD_LOG_IO, "CDB", 13774 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13775 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13776 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13777 goto fail_command; 13778 } 13779 13780 /* 13781 * If we are suspended, then put the command onto head of the 13782 * wait queue since we don't want to start more commands, and 13783 * clear the un_retry_bp. Next time when we are resumed, will 13784 * handle the command in the wait queue. 13785 */ 13786 switch (un->un_state) { 13787 case SD_STATE_SUSPENDED: 13788 case SD_STATE_DUMPING: 13789 bp->av_forw = un->un_waitq_headp; 13790 un->un_waitq_headp = bp; 13791 if (un->un_waitq_tailp == NULL) { 13792 un->un_waitq_tailp = bp; 13793 } 13794 if (bp == un->un_retry_bp) { 13795 un->un_retry_bp = NULL; 13796 un->un_retry_statp = NULL; 13797 } 13798 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13799 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13800 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13801 return; 13802 default: 13803 break; 13804 } 13805 13806 /* 13807 * If the caller wants us to check FLAG_ISOLATE, then see if that 13808 * is set; if it is then we do not want to retry the command. 13809 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13810 */ 13811 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13812 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13813 goto fail_command; 13814 } 13815 } 13816 13817 13818 /* 13819 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13820 * command timeout or a selection timeout has occurred. This means 13821 * that we were unable to establish an kind of communication with 13822 * the target, and subsequent retries and/or commands are likely 13823 * to encounter similar results and take a long time to complete. 13824 * 13825 * If this is a failfast error condition, we need to update the 13826 * failfast state, even if this bp does not have B_FAILFAST set. 13827 */ 13828 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13829 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13830 ASSERT(un->un_failfast_bp == NULL); 13831 /* 13832 * If we are already in the active failfast state, and 13833 * another failfast error condition has been detected, 13834 * then fail this command if it has B_FAILFAST set. 13835 * If B_FAILFAST is clear, then maintain the legacy 13836 * behavior of retrying heroically, even tho this will 13837 * take a lot more time to fail the command. 13838 */ 13839 if (bp->b_flags & B_FAILFAST) { 13840 goto fail_command; 13841 } 13842 } else { 13843 /* 13844 * We're not in the active failfast state, but we 13845 * have a failfast error condition, so we must begin 13846 * transition to the next state. We do this regardless 13847 * of whether or not this bp has B_FAILFAST set. 13848 */ 13849 if (un->un_failfast_bp == NULL) { 13850 /* 13851 * This is the first bp to meet a failfast 13852 * condition so save it on un_failfast_bp & 13853 * do normal retry processing. Do not enter 13854 * active failfast state yet. This marks 13855 * entry into the "failfast pending" state. 13856 */ 13857 un->un_failfast_bp = bp; 13858 13859 } else if (un->un_failfast_bp == bp) { 13860 /* 13861 * This is the second time *this* bp has 13862 * encountered a failfast error condition, 13863 * so enter active failfast state & flush 13864 * queues as appropriate. 13865 */ 13866 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13867 un->un_failfast_bp = NULL; 13868 sd_failfast_flushq(un); 13869 13870 /* 13871 * Fail this bp now if B_FAILFAST set; 13872 * otherwise continue with retries. (It would 13873 * be pretty ironic if this bp succeeded on a 13874 * subsequent retry after we just flushed all 13875 * the queues). 13876 */ 13877 if (bp->b_flags & B_FAILFAST) { 13878 goto fail_command; 13879 } 13880 13881 #if !defined(lint) && !defined(__lint) 13882 } else { 13883 /* 13884 * If neither of the preceeding conditionals 13885 * was true, it means that there is some 13886 * *other* bp that has met an inital failfast 13887 * condition and is currently either being 13888 * retried or is waiting to be retried. In 13889 * that case we should perform normal retry 13890 * processing on *this* bp, since there is a 13891 * chance that the current failfast condition 13892 * is transient and recoverable. If that does 13893 * not turn out to be the case, then retries 13894 * will be cleared when the wait queue is 13895 * flushed anyway. 13896 */ 13897 #endif 13898 } 13899 } 13900 } else { 13901 /* 13902 * SD_RETRIES_FAILFAST is clear, which indicates that we 13903 * likely were able to at least establish some level of 13904 * communication with the target and subsequent commands 13905 * and/or retries are likely to get through to the target, 13906 * In this case we want to be aggressive about clearing 13907 * the failfast state. Note that this does not affect 13908 * the "failfast pending" condition. 13909 */ 13910 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13911 } 13912 13913 13914 /* 13915 * Check the specified retry count to see if we can still do 13916 * any retries with this pkt before we should fail it. 13917 */ 13918 switch (retry_check_flag & SD_RETRIES_MASK) { 13919 case SD_RETRIES_VICTIM: 13920 /* 13921 * Check the victim retry count. If exhausted, then fall 13922 * thru & check against the standard retry count. 13923 */ 13924 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13925 /* Increment count & proceed with the retry */ 13926 xp->xb_victim_retry_count++; 13927 break; 13928 } 13929 /* Victim retries exhausted, fall back to std. retries... */ 13930 /* FALLTHRU */ 13931 13932 case SD_RETRIES_STANDARD: 13933 if (xp->xb_retry_count >= un->un_retry_count) { 13934 /* Retries exhausted, fail the command */ 13935 SD_TRACE(SD_LOG_IO_CORE, un, 13936 "sd_retry_command: retries exhausted!\n"); 13937 /* 13938 * update b_resid for failed SCMD_READ & SCMD_WRITE 13939 * commands with nonzero pkt_resid. 13940 */ 13941 if ((pktp->pkt_reason == CMD_CMPLT) && 13942 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13943 (pktp->pkt_resid != 0)) { 13944 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13945 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13946 SD_UPDATE_B_RESID(bp, pktp); 13947 } 13948 } 13949 goto fail_command; 13950 } 13951 xp->xb_retry_count++; 13952 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13953 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13954 break; 13955 13956 case SD_RETRIES_UA: 13957 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13958 /* Retries exhausted, fail the command */ 13959 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13960 "Unit Attention retries exhausted. " 13961 "Check the target.\n"); 13962 goto fail_command; 13963 } 13964 xp->xb_ua_retry_count++; 13965 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13966 "sd_retry_command: retry count:%d\n", 13967 xp->xb_ua_retry_count); 13968 break; 13969 13970 case SD_RETRIES_BUSY: 13971 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13972 /* Retries exhausted, fail the command */ 13973 SD_TRACE(SD_LOG_IO_CORE, un, 13974 "sd_retry_command: retries exhausted!\n"); 13975 goto fail_command; 13976 } 13977 xp->xb_retry_count++; 13978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13979 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13980 break; 13981 13982 case SD_RETRIES_NOCHECK: 13983 default: 13984 /* No retry count to check. Just proceed with the retry */ 13985 break; 13986 } 13987 13988 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13989 13990 /* 13991 * If we were given a zero timeout, we must attempt to retry the 13992 * command immediately (ie, without a delay). 13993 */ 13994 if (retry_delay == 0) { 13995 /* 13996 * Check some limiting conditions to see if we can actually 13997 * do the immediate retry. If we cannot, then we must 13998 * fall back to queueing up a delayed retry. 13999 */ 14000 if (un->un_ncmds_in_transport >= un->un_throttle) { 14001 /* 14002 * We are at the throttle limit for the target, 14003 * fall back to delayed retry. 14004 */ 14005 retry_delay = SD_BSY_TIMEOUT; 14006 statp = kstat_waitq_enter; 14007 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14008 "sd_retry_command: immed. retry hit " 14009 "throttle!\n"); 14010 } else { 14011 /* 14012 * We're clear to proceed with the immediate retry. 14013 * First call the user-provided function (if any) 14014 */ 14015 if (user_funcp != NULL) { 14016 (*user_funcp)(un, bp, user_arg, 14017 SD_IMMEDIATE_RETRY_ISSUED); 14018 #ifdef __lock_lint 14019 sd_print_incomplete_msg(un, bp, user_arg, 14020 SD_IMMEDIATE_RETRY_ISSUED); 14021 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14022 SD_IMMEDIATE_RETRY_ISSUED); 14023 sd_print_sense_failed_msg(un, bp, user_arg, 14024 SD_IMMEDIATE_RETRY_ISSUED); 14025 #endif 14026 } 14027 14028 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14029 "sd_retry_command: issuing immediate retry\n"); 14030 14031 /* 14032 * Call sd_start_cmds() to transport the command to 14033 * the target. 14034 */ 14035 sd_start_cmds(un, bp); 14036 14037 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14038 "sd_retry_command exit\n"); 14039 return; 14040 } 14041 } 14042 14043 /* 14044 * Set up to retry the command after a delay. 14045 * First call the user-provided function (if any) 14046 */ 14047 if (user_funcp != NULL) { 14048 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14049 } 14050 14051 sd_set_retry_bp(un, bp, retry_delay, statp); 14052 14053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14054 return; 14055 14056 fail_command: 14057 14058 if (user_funcp != NULL) { 14059 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14060 } 14061 14062 fail_command_no_log: 14063 14064 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14065 "sd_retry_command: returning failed command\n"); 14066 14067 sd_return_failed_command(un, bp, failure_code); 14068 14069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14070 } 14071 14072 14073 /* 14074 * Function: sd_set_retry_bp 14075 * 14076 * Description: Set up the given bp for retry. 14077 * 14078 * Arguments: un - ptr to associated softstate 14079 * bp - ptr to buf(9S) for the command 14080 * retry_delay - time interval before issuing retry (may be 0) 14081 * statp - optional pointer to kstat function 14082 * 14083 * Context: May be called under interrupt context 14084 */ 14085 14086 static void 14087 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14088 void (*statp)(kstat_io_t *)) 14089 { 14090 ASSERT(un != NULL); 14091 ASSERT(mutex_owned(SD_MUTEX(un))); 14092 ASSERT(bp != NULL); 14093 14094 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14095 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14096 14097 /* 14098 * Indicate that the command is being retried. This will not allow any 14099 * other commands on the wait queue to be transported to the target 14100 * until this command has been completed (success or failure). The 14101 * "retry command" is not transported to the target until the given 14102 * time delay expires, unless the user specified a 0 retry_delay. 14103 * 14104 * Note: the timeout(9F) callback routine is what actually calls 14105 * sd_start_cmds() to transport the command, with the exception of a 14106 * zero retry_delay. The only current implementor of a zero retry delay 14107 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14108 */ 14109 if (un->un_retry_bp == NULL) { 14110 ASSERT(un->un_retry_statp == NULL); 14111 un->un_retry_bp = bp; 14112 14113 /* 14114 * If the user has not specified a delay the command should 14115 * be queued and no timeout should be scheduled. 14116 */ 14117 if (retry_delay == 0) { 14118 /* 14119 * Save the kstat pointer that will be used in the 14120 * call to SD_UPDATE_KSTATS() below, so that 14121 * sd_start_cmds() can correctly decrement the waitq 14122 * count when it is time to transport this command. 14123 */ 14124 un->un_retry_statp = statp; 14125 goto done; 14126 } 14127 } 14128 14129 if (un->un_retry_bp == bp) { 14130 /* 14131 * Save the kstat pointer that will be used in the call to 14132 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14133 * correctly decrement the waitq count when it is time to 14134 * transport this command. 14135 */ 14136 un->un_retry_statp = statp; 14137 14138 /* 14139 * Schedule a timeout if: 14140 * 1) The user has specified a delay. 14141 * 2) There is not a START_STOP_UNIT callback pending. 14142 * 14143 * If no delay has been specified, then it is up to the caller 14144 * to ensure that IO processing continues without stalling. 14145 * Effectively, this means that the caller will issue the 14146 * required call to sd_start_cmds(). The START_STOP_UNIT 14147 * callback does this after the START STOP UNIT command has 14148 * completed. In either of these cases we should not schedule 14149 * a timeout callback here. Also don't schedule the timeout if 14150 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14151 */ 14152 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14153 (un->un_direct_priority_timeid == NULL)) { 14154 un->un_retry_timeid = 14155 timeout(sd_start_retry_command, un, retry_delay); 14156 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14157 "sd_set_retry_bp: setting timeout: un: 0x%p" 14158 " bp:0x%p un_retry_timeid:0x%p\n", 14159 un, bp, un->un_retry_timeid); 14160 } 14161 } else { 14162 /* 14163 * We only get in here if there is already another command 14164 * waiting to be retried. In this case, we just put the 14165 * given command onto the wait queue, so it can be transported 14166 * after the current retry command has completed. 14167 * 14168 * Also we have to make sure that if the command at the head 14169 * of the wait queue is the un_failfast_bp, that we do not 14170 * put ahead of it any other commands that are to be retried. 14171 */ 14172 if ((un->un_failfast_bp != NULL) && 14173 (un->un_failfast_bp == un->un_waitq_headp)) { 14174 /* 14175 * Enqueue this command AFTER the first command on 14176 * the wait queue (which is also un_failfast_bp). 14177 */ 14178 bp->av_forw = un->un_waitq_headp->av_forw; 14179 un->un_waitq_headp->av_forw = bp; 14180 if (un->un_waitq_headp == un->un_waitq_tailp) { 14181 un->un_waitq_tailp = bp; 14182 } 14183 } else { 14184 /* Enqueue this command at the head of the waitq. */ 14185 bp->av_forw = un->un_waitq_headp; 14186 un->un_waitq_headp = bp; 14187 if (un->un_waitq_tailp == NULL) { 14188 un->un_waitq_tailp = bp; 14189 } 14190 } 14191 14192 if (statp == NULL) { 14193 statp = kstat_waitq_enter; 14194 } 14195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14196 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14197 } 14198 14199 done: 14200 if (statp != NULL) { 14201 SD_UPDATE_KSTATS(un, statp, bp); 14202 } 14203 14204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14205 "sd_set_retry_bp: exit un:0x%p\n", un); 14206 } 14207 14208 14209 /* 14210 * Function: sd_start_retry_command 14211 * 14212 * Description: Start the command that has been waiting on the target's 14213 * retry queue. Called from timeout(9F) context after the 14214 * retry delay interval has expired. 14215 * 14216 * Arguments: arg - pointer to associated softstate for the device. 14217 * 14218 * Context: timeout(9F) thread context. May not sleep. 14219 */ 14220 14221 static void 14222 sd_start_retry_command(void *arg) 14223 { 14224 struct sd_lun *un = arg; 14225 14226 ASSERT(un != NULL); 14227 ASSERT(!mutex_owned(SD_MUTEX(un))); 14228 14229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14230 "sd_start_retry_command: entry\n"); 14231 14232 mutex_enter(SD_MUTEX(un)); 14233 14234 un->un_retry_timeid = NULL; 14235 14236 if (un->un_retry_bp != NULL) { 14237 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14238 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14239 un, un->un_retry_bp); 14240 sd_start_cmds(un, un->un_retry_bp); 14241 } 14242 14243 mutex_exit(SD_MUTEX(un)); 14244 14245 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14246 "sd_start_retry_command: exit\n"); 14247 } 14248 14249 14250 /* 14251 * Function: sd_start_direct_priority_command 14252 * 14253 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14254 * received TRAN_BUSY when we called scsi_transport() to send it 14255 * to the underlying HBA. This function is called from timeout(9F) 14256 * context after the delay interval has expired. 14257 * 14258 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14259 * 14260 * Context: timeout(9F) thread context. May not sleep. 14261 */ 14262 14263 static void 14264 sd_start_direct_priority_command(void *arg) 14265 { 14266 struct buf *priority_bp = arg; 14267 struct sd_lun *un; 14268 14269 ASSERT(priority_bp != NULL); 14270 un = SD_GET_UN(priority_bp); 14271 ASSERT(un != NULL); 14272 ASSERT(!mutex_owned(SD_MUTEX(un))); 14273 14274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14275 "sd_start_direct_priority_command: entry\n"); 14276 14277 mutex_enter(SD_MUTEX(un)); 14278 un->un_direct_priority_timeid = NULL; 14279 sd_start_cmds(un, priority_bp); 14280 mutex_exit(SD_MUTEX(un)); 14281 14282 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14283 "sd_start_direct_priority_command: exit\n"); 14284 } 14285 14286 14287 /* 14288 * Function: sd_send_request_sense_command 14289 * 14290 * Description: Sends a REQUEST SENSE command to the target 14291 * 14292 * Context: May be called from interrupt context. 14293 */ 14294 14295 static void 14296 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14297 struct scsi_pkt *pktp) 14298 { 14299 ASSERT(bp != NULL); 14300 ASSERT(un != NULL); 14301 ASSERT(mutex_owned(SD_MUTEX(un))); 14302 14303 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14304 "entry: buf:0x%p\n", bp); 14305 14306 /* 14307 * If we are syncing or dumping, then fail the command to avoid a 14308 * recursive callback into scsi_transport(). Also fail the command 14309 * if we are suspended (legacy behavior). 14310 */ 14311 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14312 (un->un_state == SD_STATE_DUMPING)) { 14313 sd_return_failed_command(un, bp, EIO); 14314 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14315 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14316 return; 14317 } 14318 14319 /* 14320 * Retry the failed command and don't issue the request sense if: 14321 * 1) the sense buf is busy 14322 * 2) we have 1 or more outstanding commands on the target 14323 * (the sense data will be cleared or invalidated any way) 14324 * 14325 * Note: There could be an issue with not checking a retry limit here, 14326 * the problem is determining which retry limit to check. 14327 */ 14328 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14329 /* Don't retry if the command is flagged as non-retryable */ 14330 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14331 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14332 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14333 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14334 "sd_send_request_sense_command: " 14335 "at full throttle, retrying exit\n"); 14336 } else { 14337 sd_return_failed_command(un, bp, EIO); 14338 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14339 "sd_send_request_sense_command: " 14340 "at full throttle, non-retryable exit\n"); 14341 } 14342 return; 14343 } 14344 14345 sd_mark_rqs_busy(un, bp); 14346 sd_start_cmds(un, un->un_rqs_bp); 14347 14348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14349 "sd_send_request_sense_command: exit\n"); 14350 } 14351 14352 14353 /* 14354 * Function: sd_mark_rqs_busy 14355 * 14356 * Description: Indicate that the request sense bp for this instance is 14357 * in use. 14358 * 14359 * Context: May be called under interrupt context 14360 */ 14361 14362 static void 14363 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14364 { 14365 struct sd_xbuf *sense_xp; 14366 14367 ASSERT(un != NULL); 14368 ASSERT(bp != NULL); 14369 ASSERT(mutex_owned(SD_MUTEX(un))); 14370 ASSERT(un->un_sense_isbusy == 0); 14371 14372 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14373 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14374 14375 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14376 ASSERT(sense_xp != NULL); 14377 14378 SD_INFO(SD_LOG_IO, un, 14379 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14380 14381 ASSERT(sense_xp->xb_pktp != NULL); 14382 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14383 == (FLAG_SENSING | FLAG_HEAD)); 14384 14385 un->un_sense_isbusy = 1; 14386 un->un_rqs_bp->b_resid = 0; 14387 sense_xp->xb_pktp->pkt_resid = 0; 14388 sense_xp->xb_pktp->pkt_reason = 0; 14389 14390 /* So we can get back the bp at interrupt time! */ 14391 sense_xp->xb_sense_bp = bp; 14392 14393 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14394 14395 /* 14396 * Mark this buf as awaiting sense data. (This is already set in 14397 * the pkt_flags for the RQS packet.) 14398 */ 14399 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14400 14401 sense_xp->xb_retry_count = 0; 14402 sense_xp->xb_victim_retry_count = 0; 14403 sense_xp->xb_ua_retry_count = 0; 14404 sense_xp->xb_nr_retry_count = 0; 14405 sense_xp->xb_dma_resid = 0; 14406 14407 /* Clean up the fields for auto-request sense */ 14408 sense_xp->xb_sense_status = 0; 14409 sense_xp->xb_sense_state = 0; 14410 sense_xp->xb_sense_resid = 0; 14411 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14412 14413 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14414 } 14415 14416 14417 /* 14418 * Function: sd_mark_rqs_idle 14419 * 14420 * Description: SD_MUTEX must be held continuously through this routine 14421 * to prevent reuse of the rqs struct before the caller can 14422 * complete it's processing. 14423 * 14424 * Return Code: Pointer to the RQS buf 14425 * 14426 * Context: May be called under interrupt context 14427 */ 14428 14429 static struct buf * 14430 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14431 { 14432 struct buf *bp; 14433 ASSERT(un != NULL); 14434 ASSERT(sense_xp != NULL); 14435 ASSERT(mutex_owned(SD_MUTEX(un))); 14436 ASSERT(un->un_sense_isbusy != 0); 14437 14438 un->un_sense_isbusy = 0; 14439 bp = sense_xp->xb_sense_bp; 14440 sense_xp->xb_sense_bp = NULL; 14441 14442 /* This pkt is no longer interested in getting sense data */ 14443 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14444 14445 return (bp); 14446 } 14447 14448 14449 14450 /* 14451 * Function: sd_alloc_rqs 14452 * 14453 * Description: Set up the unit to receive auto request sense data 14454 * 14455 * Return Code: DDI_SUCCESS or DDI_FAILURE 14456 * 14457 * Context: Called under attach(9E) context 14458 */ 14459 14460 static int 14461 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14462 { 14463 struct sd_xbuf *xp; 14464 14465 ASSERT(un != NULL); 14466 ASSERT(!mutex_owned(SD_MUTEX(un))); 14467 ASSERT(un->un_rqs_bp == NULL); 14468 ASSERT(un->un_rqs_pktp == NULL); 14469 14470 /* 14471 * First allocate the required buf and scsi_pkt structs, then set up 14472 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14473 */ 14474 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14475 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14476 if (un->un_rqs_bp == NULL) { 14477 return (DDI_FAILURE); 14478 } 14479 14480 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14481 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14482 14483 if (un->un_rqs_pktp == NULL) { 14484 sd_free_rqs(un); 14485 return (DDI_FAILURE); 14486 } 14487 14488 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14489 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14490 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14491 14492 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14493 14494 /* Set up the other needed members in the ARQ scsi_pkt. */ 14495 un->un_rqs_pktp->pkt_comp = sdintr; 14496 un->un_rqs_pktp->pkt_time = sd_io_time; 14497 un->un_rqs_pktp->pkt_flags |= 14498 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14499 14500 /* 14501 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14502 * provide any intpkt, destroypkt routines as we take care of 14503 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14504 */ 14505 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14506 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14507 xp->xb_pktp = un->un_rqs_pktp; 14508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14509 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14510 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14511 14512 /* 14513 * Save the pointer to the request sense private bp so it can 14514 * be retrieved in sdintr. 14515 */ 14516 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14517 ASSERT(un->un_rqs_bp->b_private == xp); 14518 14519 /* 14520 * See if the HBA supports auto-request sense for the specified 14521 * target/lun. If it does, then try to enable it (if not already 14522 * enabled). 14523 * 14524 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14525 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14526 * return success. However, in both of these cases ARQ is always 14527 * enabled and scsi_ifgetcap will always return true. The best approach 14528 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14529 * 14530 * The 3rd case is the HBA (adp) always return enabled on 14531 * scsi_ifgetgetcap even when it's not enable, the best approach 14532 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14533 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14534 */ 14535 14536 if (un->un_f_is_fibre == TRUE) { 14537 un->un_f_arq_enabled = TRUE; 14538 } else { 14539 #if defined(__i386) || defined(__amd64) 14540 /* 14541 * Circumvent the Adaptec bug, remove this code when 14542 * the bug is fixed 14543 */ 14544 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14545 #endif 14546 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14547 case 0: 14548 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14549 "sd_alloc_rqs: HBA supports ARQ\n"); 14550 /* 14551 * ARQ is supported by this HBA but currently is not 14552 * enabled. Attempt to enable it and if successful then 14553 * mark this instance as ARQ enabled. 14554 */ 14555 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14556 == 1) { 14557 /* Successfully enabled ARQ in the HBA */ 14558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14559 "sd_alloc_rqs: ARQ enabled\n"); 14560 un->un_f_arq_enabled = TRUE; 14561 } else { 14562 /* Could not enable ARQ in the HBA */ 14563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14564 "sd_alloc_rqs: failed ARQ enable\n"); 14565 un->un_f_arq_enabled = FALSE; 14566 } 14567 break; 14568 case 1: 14569 /* 14570 * ARQ is supported by this HBA and is already enabled. 14571 * Just mark ARQ as enabled for this instance. 14572 */ 14573 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14574 "sd_alloc_rqs: ARQ already enabled\n"); 14575 un->un_f_arq_enabled = TRUE; 14576 break; 14577 default: 14578 /* 14579 * ARQ is not supported by this HBA; disable it for this 14580 * instance. 14581 */ 14582 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14583 "sd_alloc_rqs: HBA does not support ARQ\n"); 14584 un->un_f_arq_enabled = FALSE; 14585 break; 14586 } 14587 } 14588 14589 return (DDI_SUCCESS); 14590 } 14591 14592 14593 /* 14594 * Function: sd_free_rqs 14595 * 14596 * Description: Cleanup for the pre-instance RQS command. 14597 * 14598 * Context: Kernel thread context 14599 */ 14600 14601 static void 14602 sd_free_rqs(struct sd_lun *un) 14603 { 14604 ASSERT(un != NULL); 14605 14606 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14607 14608 /* 14609 * If consistent memory is bound to a scsi_pkt, the pkt 14610 * has to be destroyed *before* freeing the consistent memory. 14611 * Don't change the sequence of this operations. 14612 * scsi_destroy_pkt() might access memory, which isn't allowed, 14613 * after it was freed in scsi_free_consistent_buf(). 14614 */ 14615 if (un->un_rqs_pktp != NULL) { 14616 scsi_destroy_pkt(un->un_rqs_pktp); 14617 un->un_rqs_pktp = NULL; 14618 } 14619 14620 if (un->un_rqs_bp != NULL) { 14621 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14622 if (xp != NULL) { 14623 kmem_free(xp, sizeof (struct sd_xbuf)); 14624 } 14625 scsi_free_consistent_buf(un->un_rqs_bp); 14626 un->un_rqs_bp = NULL; 14627 } 14628 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14629 } 14630 14631 14632 14633 /* 14634 * Function: sd_reduce_throttle 14635 * 14636 * Description: Reduces the maximum # of outstanding commands on a 14637 * target to the current number of outstanding commands. 14638 * Queues a tiemout(9F) callback to restore the limit 14639 * after a specified interval has elapsed. 14640 * Typically used when we get a TRAN_BUSY return code 14641 * back from scsi_transport(). 14642 * 14643 * Arguments: un - ptr to the sd_lun softstate struct 14644 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14645 * 14646 * Context: May be called from interrupt context 14647 */ 14648 14649 static void 14650 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14651 { 14652 ASSERT(un != NULL); 14653 ASSERT(mutex_owned(SD_MUTEX(un))); 14654 ASSERT(un->un_ncmds_in_transport >= 0); 14655 14656 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14657 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14658 un, un->un_throttle, un->un_ncmds_in_transport); 14659 14660 if (un->un_throttle > 1) { 14661 if (un->un_f_use_adaptive_throttle == TRUE) { 14662 switch (throttle_type) { 14663 case SD_THROTTLE_TRAN_BUSY: 14664 if (un->un_busy_throttle == 0) { 14665 un->un_busy_throttle = un->un_throttle; 14666 } 14667 break; 14668 case SD_THROTTLE_QFULL: 14669 un->un_busy_throttle = 0; 14670 break; 14671 default: 14672 ASSERT(FALSE); 14673 } 14674 14675 if (un->un_ncmds_in_transport > 0) { 14676 un->un_throttle = un->un_ncmds_in_transport; 14677 } 14678 14679 } else { 14680 if (un->un_ncmds_in_transport == 0) { 14681 un->un_throttle = 1; 14682 } else { 14683 un->un_throttle = un->un_ncmds_in_transport; 14684 } 14685 } 14686 } 14687 14688 /* Reschedule the timeout if none is currently active */ 14689 if (un->un_reset_throttle_timeid == NULL) { 14690 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14691 un, SD_THROTTLE_RESET_INTERVAL); 14692 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14693 "sd_reduce_throttle: timeout scheduled!\n"); 14694 } 14695 14696 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14697 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14698 } 14699 14700 14701 14702 /* 14703 * Function: sd_restore_throttle 14704 * 14705 * Description: Callback function for timeout(9F). Resets the current 14706 * value of un->un_throttle to its default. 14707 * 14708 * Arguments: arg - pointer to associated softstate for the device. 14709 * 14710 * Context: May be called from interrupt context 14711 */ 14712 14713 static void 14714 sd_restore_throttle(void *arg) 14715 { 14716 struct sd_lun *un = arg; 14717 14718 ASSERT(un != NULL); 14719 ASSERT(!mutex_owned(SD_MUTEX(un))); 14720 14721 mutex_enter(SD_MUTEX(un)); 14722 14723 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14724 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14725 14726 un->un_reset_throttle_timeid = NULL; 14727 14728 if (un->un_f_use_adaptive_throttle == TRUE) { 14729 /* 14730 * If un_busy_throttle is nonzero, then it contains the 14731 * value that un_throttle was when we got a TRAN_BUSY back 14732 * from scsi_transport(). We want to revert back to this 14733 * value. 14734 * 14735 * In the QFULL case, the throttle limit will incrementally 14736 * increase until it reaches max throttle. 14737 */ 14738 if (un->un_busy_throttle > 0) { 14739 un->un_throttle = un->un_busy_throttle; 14740 un->un_busy_throttle = 0; 14741 } else { 14742 /* 14743 * increase throttle by 10% open gate slowly, schedule 14744 * another restore if saved throttle has not been 14745 * reached 14746 */ 14747 short throttle; 14748 if (sd_qfull_throttle_enable) { 14749 throttle = un->un_throttle + 14750 max((un->un_throttle / 10), 1); 14751 un->un_throttle = 14752 (throttle < un->un_saved_throttle) ? 14753 throttle : un->un_saved_throttle; 14754 if (un->un_throttle < un->un_saved_throttle) { 14755 un->un_reset_throttle_timeid = 14756 timeout(sd_restore_throttle, 14757 un, 14758 SD_QFULL_THROTTLE_RESET_INTERVAL); 14759 } 14760 } 14761 } 14762 14763 /* 14764 * If un_throttle has fallen below the low-water mark, we 14765 * restore the maximum value here (and allow it to ratchet 14766 * down again if necessary). 14767 */ 14768 if (un->un_throttle < un->un_min_throttle) { 14769 un->un_throttle = un->un_saved_throttle; 14770 } 14771 } else { 14772 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14773 "restoring limit from 0x%x to 0x%x\n", 14774 un->un_throttle, un->un_saved_throttle); 14775 un->un_throttle = un->un_saved_throttle; 14776 } 14777 14778 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14779 "sd_restore_throttle: calling sd_start_cmds!\n"); 14780 14781 sd_start_cmds(un, NULL); 14782 14783 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14784 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14785 un, un->un_throttle); 14786 14787 mutex_exit(SD_MUTEX(un)); 14788 14789 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14790 } 14791 14792 /* 14793 * Function: sdrunout 14794 * 14795 * Description: Callback routine for scsi_init_pkt when a resource allocation 14796 * fails. 14797 * 14798 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14799 * soft state instance. 14800 * 14801 * Return Code: The scsi_init_pkt routine allows for the callback function to 14802 * return a 0 indicating the callback should be rescheduled or a 1 14803 * indicating not to reschedule. This routine always returns 1 14804 * because the driver always provides a callback function to 14805 * scsi_init_pkt. This results in a callback always being scheduled 14806 * (via the scsi_init_pkt callback implementation) if a resource 14807 * failure occurs. 14808 * 14809 * Context: This callback function may not block or call routines that block 14810 * 14811 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14812 * request persisting at the head of the list which cannot be 14813 * satisfied even after multiple retries. In the future the driver 14814 * may implement some time of maximum runout count before failing 14815 * an I/O. 14816 */ 14817 14818 static int 14819 sdrunout(caddr_t arg) 14820 { 14821 struct sd_lun *un = (struct sd_lun *)arg; 14822 14823 ASSERT(un != NULL); 14824 ASSERT(!mutex_owned(SD_MUTEX(un))); 14825 14826 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14827 14828 mutex_enter(SD_MUTEX(un)); 14829 sd_start_cmds(un, NULL); 14830 mutex_exit(SD_MUTEX(un)); 14831 /* 14832 * This callback routine always returns 1 (i.e. do not reschedule) 14833 * because we always specify sdrunout as the callback handler for 14834 * scsi_init_pkt inside the call to sd_start_cmds. 14835 */ 14836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14837 return (1); 14838 } 14839 14840 14841 /* 14842 * Function: sdintr 14843 * 14844 * Description: Completion callback routine for scsi_pkt(9S) structs 14845 * sent to the HBA driver via scsi_transport(9F). 14846 * 14847 * Context: Interrupt context 14848 */ 14849 14850 static void 14851 sdintr(struct scsi_pkt *pktp) 14852 { 14853 struct buf *bp; 14854 struct sd_xbuf *xp; 14855 struct sd_lun *un; 14856 size_t actual_len; 14857 14858 ASSERT(pktp != NULL); 14859 bp = (struct buf *)pktp->pkt_private; 14860 ASSERT(bp != NULL); 14861 xp = SD_GET_XBUF(bp); 14862 ASSERT(xp != NULL); 14863 ASSERT(xp->xb_pktp != NULL); 14864 un = SD_GET_UN(bp); 14865 ASSERT(un != NULL); 14866 ASSERT(!mutex_owned(SD_MUTEX(un))); 14867 14868 #ifdef SD_FAULT_INJECTION 14869 14870 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14871 /* SD FaultInjection */ 14872 sd_faultinjection(pktp); 14873 14874 #endif /* SD_FAULT_INJECTION */ 14875 14876 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14877 " xp:0x%p, un:0x%p\n", bp, xp, un); 14878 14879 mutex_enter(SD_MUTEX(un)); 14880 14881 /* Reduce the count of the #commands currently in transport */ 14882 un->un_ncmds_in_transport--; 14883 ASSERT(un->un_ncmds_in_transport >= 0); 14884 14885 /* Increment counter to indicate that the callback routine is active */ 14886 un->un_in_callback++; 14887 14888 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14889 14890 #ifdef SDDEBUG 14891 if (bp == un->un_retry_bp) { 14892 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14893 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14894 un, un->un_retry_bp, un->un_ncmds_in_transport); 14895 } 14896 #endif 14897 14898 /* 14899 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14900 * state if needed. 14901 */ 14902 if (pktp->pkt_reason == CMD_DEV_GONE) { 14903 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14904 "Command failed to complete...Device is gone\n"); 14905 if (un->un_mediastate != DKIO_DEV_GONE) { 14906 un->un_mediastate = DKIO_DEV_GONE; 14907 cv_broadcast(&un->un_state_cv); 14908 } 14909 sd_return_failed_command(un, bp, EIO); 14910 goto exit; 14911 } 14912 14913 if (pktp->pkt_state & STATE_XARQ_DONE) { 14914 SD_TRACE(SD_LOG_COMMON, un, 14915 "sdintr: extra sense data received. pkt=%p\n", pktp); 14916 } 14917 14918 /* 14919 * First see if the pkt has auto-request sense data with it.... 14920 * Look at the packet state first so we don't take a performance 14921 * hit looking at the arq enabled flag unless absolutely necessary. 14922 */ 14923 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14924 (un->un_f_arq_enabled == TRUE)) { 14925 /* 14926 * The HBA did an auto request sense for this command so check 14927 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14928 * driver command that should not be retried. 14929 */ 14930 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14931 /* 14932 * Save the relevant sense info into the xp for the 14933 * original cmd. 14934 */ 14935 struct scsi_arq_status *asp; 14936 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14937 xp->xb_sense_status = 14938 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14939 xp->xb_sense_state = asp->sts_rqpkt_state; 14940 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14941 if (pktp->pkt_state & STATE_XARQ_DONE) { 14942 actual_len = MAX_SENSE_LENGTH - 14943 xp->xb_sense_resid; 14944 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14945 MAX_SENSE_LENGTH); 14946 } else { 14947 if (xp->xb_sense_resid > SENSE_LENGTH) { 14948 actual_len = MAX_SENSE_LENGTH - 14949 xp->xb_sense_resid; 14950 } else { 14951 actual_len = SENSE_LENGTH - 14952 xp->xb_sense_resid; 14953 } 14954 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14955 if ((((struct uscsi_cmd *) 14956 (xp->xb_pktinfo))->uscsi_rqlen) > 14957 actual_len) { 14958 xp->xb_sense_resid = 14959 (((struct uscsi_cmd *) 14960 (xp->xb_pktinfo))-> 14961 uscsi_rqlen) - actual_len; 14962 } else { 14963 xp->xb_sense_resid = 0; 14964 } 14965 } 14966 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14967 SENSE_LENGTH); 14968 } 14969 14970 /* fail the command */ 14971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14972 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14973 sd_return_failed_command(un, bp, EIO); 14974 goto exit; 14975 } 14976 14977 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14978 /* 14979 * We want to either retry or fail this command, so free 14980 * the DMA resources here. If we retry the command then 14981 * the DMA resources will be reallocated in sd_start_cmds(). 14982 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14983 * causes the *entire* transfer to start over again from the 14984 * beginning of the request, even for PARTIAL chunks that 14985 * have already transferred successfully. 14986 */ 14987 if ((un->un_f_is_fibre == TRUE) && 14988 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14989 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14990 scsi_dmafree(pktp); 14991 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14992 } 14993 #endif 14994 14995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14996 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14997 14998 sd_handle_auto_request_sense(un, bp, xp, pktp); 14999 goto exit; 15000 } 15001 15002 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15003 if (pktp->pkt_flags & FLAG_SENSING) { 15004 /* This pktp is from the unit's REQUEST_SENSE command */ 15005 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15006 "sdintr: sd_handle_request_sense\n"); 15007 sd_handle_request_sense(un, bp, xp, pktp); 15008 goto exit; 15009 } 15010 15011 /* 15012 * Check to see if the command successfully completed as requested; 15013 * this is the most common case (and also the hot performance path). 15014 * 15015 * Requirements for successful completion are: 15016 * pkt_reason is CMD_CMPLT and packet status is status good. 15017 * In addition: 15018 * - A residual of zero indicates successful completion no matter what 15019 * the command is. 15020 * - If the residual is not zero and the command is not a read or 15021 * write, then it's still defined as successful completion. In other 15022 * words, if the command is a read or write the residual must be 15023 * zero for successful completion. 15024 * - If the residual is not zero and the command is a read or 15025 * write, and it's a USCSICMD, then it's still defined as 15026 * successful completion. 15027 */ 15028 if ((pktp->pkt_reason == CMD_CMPLT) && 15029 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15030 15031 /* 15032 * Since this command is returned with a good status, we 15033 * can reset the count for Sonoma failover. 15034 */ 15035 un->un_sonoma_failure_count = 0; 15036 15037 /* 15038 * Return all USCSI commands on good status 15039 */ 15040 if (pktp->pkt_resid == 0) { 15041 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15042 "sdintr: returning command for resid == 0\n"); 15043 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15044 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15045 SD_UPDATE_B_RESID(bp, pktp); 15046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15047 "sdintr: returning command for resid != 0\n"); 15048 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15049 SD_UPDATE_B_RESID(bp, pktp); 15050 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15051 "sdintr: returning uscsi command\n"); 15052 } else { 15053 goto not_successful; 15054 } 15055 sd_return_command(un, bp); 15056 15057 /* 15058 * Decrement counter to indicate that the callback routine 15059 * is done. 15060 */ 15061 un->un_in_callback--; 15062 ASSERT(un->un_in_callback >= 0); 15063 mutex_exit(SD_MUTEX(un)); 15064 15065 return; 15066 } 15067 15068 not_successful: 15069 15070 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15071 /* 15072 * The following is based upon knowledge of the underlying transport 15073 * and its use of DMA resources. This code should be removed when 15074 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15075 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15076 * and sd_start_cmds(). 15077 * 15078 * Free any DMA resources associated with this command if there 15079 * is a chance it could be retried or enqueued for later retry. 15080 * If we keep the DMA binding then mpxio cannot reissue the 15081 * command on another path whenever a path failure occurs. 15082 * 15083 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15084 * causes the *entire* transfer to start over again from the 15085 * beginning of the request, even for PARTIAL chunks that 15086 * have already transferred successfully. 15087 * 15088 * This is only done for non-uscsi commands (and also skipped for the 15089 * driver's internal RQS command). Also just do this for Fibre Channel 15090 * devices as these are the only ones that support mpxio. 15091 */ 15092 if ((un->un_f_is_fibre == TRUE) && 15093 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15094 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15095 scsi_dmafree(pktp); 15096 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15097 } 15098 #endif 15099 15100 /* 15101 * The command did not successfully complete as requested so check 15102 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15103 * driver command that should not be retried so just return. If 15104 * FLAG_DIAGNOSE is not set the error will be processed below. 15105 */ 15106 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15107 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15108 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15109 /* 15110 * Issue a request sense if a check condition caused the error 15111 * (we handle the auto request sense case above), otherwise 15112 * just fail the command. 15113 */ 15114 if ((pktp->pkt_reason == CMD_CMPLT) && 15115 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15116 sd_send_request_sense_command(un, bp, pktp); 15117 } else { 15118 sd_return_failed_command(un, bp, EIO); 15119 } 15120 goto exit; 15121 } 15122 15123 /* 15124 * The command did not successfully complete as requested so process 15125 * the error, retry, and/or attempt recovery. 15126 */ 15127 switch (pktp->pkt_reason) { 15128 case CMD_CMPLT: 15129 switch (SD_GET_PKT_STATUS(pktp)) { 15130 case STATUS_GOOD: 15131 /* 15132 * The command completed successfully with a non-zero 15133 * residual 15134 */ 15135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15136 "sdintr: STATUS_GOOD \n"); 15137 sd_pkt_status_good(un, bp, xp, pktp); 15138 break; 15139 15140 case STATUS_CHECK: 15141 case STATUS_TERMINATED: 15142 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15143 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15144 sd_pkt_status_check_condition(un, bp, xp, pktp); 15145 break; 15146 15147 case STATUS_BUSY: 15148 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15149 "sdintr: STATUS_BUSY\n"); 15150 sd_pkt_status_busy(un, bp, xp, pktp); 15151 break; 15152 15153 case STATUS_RESERVATION_CONFLICT: 15154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15155 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15156 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15157 break; 15158 15159 case STATUS_QFULL: 15160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15161 "sdintr: STATUS_QFULL\n"); 15162 sd_pkt_status_qfull(un, bp, xp, pktp); 15163 break; 15164 15165 case STATUS_MET: 15166 case STATUS_INTERMEDIATE: 15167 case STATUS_SCSI2: 15168 case STATUS_INTERMEDIATE_MET: 15169 case STATUS_ACA_ACTIVE: 15170 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15171 "Unexpected SCSI status received: 0x%x\n", 15172 SD_GET_PKT_STATUS(pktp)); 15173 sd_return_failed_command(un, bp, EIO); 15174 break; 15175 15176 default: 15177 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15178 "Invalid SCSI status received: 0x%x\n", 15179 SD_GET_PKT_STATUS(pktp)); 15180 sd_return_failed_command(un, bp, EIO); 15181 break; 15182 15183 } 15184 break; 15185 15186 case CMD_INCOMPLETE: 15187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15188 "sdintr: CMD_INCOMPLETE\n"); 15189 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15190 break; 15191 case CMD_TRAN_ERR: 15192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15193 "sdintr: CMD_TRAN_ERR\n"); 15194 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15195 break; 15196 case CMD_RESET: 15197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15198 "sdintr: CMD_RESET \n"); 15199 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15200 break; 15201 case CMD_ABORTED: 15202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15203 "sdintr: CMD_ABORTED \n"); 15204 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15205 break; 15206 case CMD_TIMEOUT: 15207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15208 "sdintr: CMD_TIMEOUT\n"); 15209 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15210 break; 15211 case CMD_UNX_BUS_FREE: 15212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15213 "sdintr: CMD_UNX_BUS_FREE \n"); 15214 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15215 break; 15216 case CMD_TAG_REJECT: 15217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15218 "sdintr: CMD_TAG_REJECT\n"); 15219 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15220 break; 15221 default: 15222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15223 "sdintr: default\n"); 15224 sd_pkt_reason_default(un, bp, xp, pktp); 15225 break; 15226 } 15227 15228 exit: 15229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15230 15231 /* Decrement counter to indicate that the callback routine is done. */ 15232 un->un_in_callback--; 15233 ASSERT(un->un_in_callback >= 0); 15234 15235 /* 15236 * At this point, the pkt has been dispatched, ie, it is either 15237 * being re-tried or has been returned to its caller and should 15238 * not be referenced. 15239 */ 15240 15241 mutex_exit(SD_MUTEX(un)); 15242 } 15243 15244 15245 /* 15246 * Function: sd_print_incomplete_msg 15247 * 15248 * Description: Prints the error message for a CMD_INCOMPLETE error. 15249 * 15250 * Arguments: un - ptr to associated softstate for the device. 15251 * bp - ptr to the buf(9S) for the command. 15252 * arg - message string ptr 15253 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15254 * or SD_NO_RETRY_ISSUED. 15255 * 15256 * Context: May be called under interrupt context 15257 */ 15258 15259 static void 15260 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15261 { 15262 struct scsi_pkt *pktp; 15263 char *msgp; 15264 char *cmdp = arg; 15265 15266 ASSERT(un != NULL); 15267 ASSERT(mutex_owned(SD_MUTEX(un))); 15268 ASSERT(bp != NULL); 15269 ASSERT(arg != NULL); 15270 pktp = SD_GET_PKTP(bp); 15271 ASSERT(pktp != NULL); 15272 15273 switch (code) { 15274 case SD_DELAYED_RETRY_ISSUED: 15275 case SD_IMMEDIATE_RETRY_ISSUED: 15276 msgp = "retrying"; 15277 break; 15278 case SD_NO_RETRY_ISSUED: 15279 default: 15280 msgp = "giving up"; 15281 break; 15282 } 15283 15284 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15286 "incomplete %s- %s\n", cmdp, msgp); 15287 } 15288 } 15289 15290 15291 15292 /* 15293 * Function: sd_pkt_status_good 15294 * 15295 * Description: Processing for a STATUS_GOOD code in pkt_status. 15296 * 15297 * Context: May be called under interrupt context 15298 */ 15299 15300 static void 15301 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15302 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15303 { 15304 char *cmdp; 15305 15306 ASSERT(un != NULL); 15307 ASSERT(mutex_owned(SD_MUTEX(un))); 15308 ASSERT(bp != NULL); 15309 ASSERT(xp != NULL); 15310 ASSERT(pktp != NULL); 15311 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15312 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15313 ASSERT(pktp->pkt_resid != 0); 15314 15315 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15316 15317 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15318 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15319 case SCMD_READ: 15320 cmdp = "read"; 15321 break; 15322 case SCMD_WRITE: 15323 cmdp = "write"; 15324 break; 15325 default: 15326 SD_UPDATE_B_RESID(bp, pktp); 15327 sd_return_command(un, bp); 15328 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15329 return; 15330 } 15331 15332 /* 15333 * See if we can retry the read/write, preferrably immediately. 15334 * If retries are exhaused, then sd_retry_command() will update 15335 * the b_resid count. 15336 */ 15337 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15338 cmdp, EIO, (clock_t)0, NULL); 15339 15340 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15341 } 15342 15343 15344 15345 15346 15347 /* 15348 * Function: sd_handle_request_sense 15349 * 15350 * Description: Processing for non-auto Request Sense command. 15351 * 15352 * Arguments: un - ptr to associated softstate 15353 * sense_bp - ptr to buf(9S) for the RQS command 15354 * sense_xp - ptr to the sd_xbuf for the RQS command 15355 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15356 * 15357 * Context: May be called under interrupt context 15358 */ 15359 15360 static void 15361 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15362 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15363 { 15364 struct buf *cmd_bp; /* buf for the original command */ 15365 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15366 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15367 size_t actual_len; /* actual sense data length */ 15368 15369 ASSERT(un != NULL); 15370 ASSERT(mutex_owned(SD_MUTEX(un))); 15371 ASSERT(sense_bp != NULL); 15372 ASSERT(sense_xp != NULL); 15373 ASSERT(sense_pktp != NULL); 15374 15375 /* 15376 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15377 * RQS command and not the original command. 15378 */ 15379 ASSERT(sense_pktp == un->un_rqs_pktp); 15380 ASSERT(sense_bp == un->un_rqs_bp); 15381 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15382 (FLAG_SENSING | FLAG_HEAD)); 15383 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15384 FLAG_SENSING) == FLAG_SENSING); 15385 15386 /* These are the bp, xp, and pktp for the original command */ 15387 cmd_bp = sense_xp->xb_sense_bp; 15388 cmd_xp = SD_GET_XBUF(cmd_bp); 15389 cmd_pktp = SD_GET_PKTP(cmd_bp); 15390 15391 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15392 /* 15393 * The REQUEST SENSE command failed. Release the REQUEST 15394 * SENSE command for re-use, get back the bp for the original 15395 * command, and attempt to re-try the original command if 15396 * FLAG_DIAGNOSE is not set in the original packet. 15397 */ 15398 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15399 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15400 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15401 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15402 NULL, NULL, EIO, (clock_t)0, NULL); 15403 return; 15404 } 15405 } 15406 15407 /* 15408 * Save the relevant sense info into the xp for the original cmd. 15409 * 15410 * Note: if the request sense failed the state info will be zero 15411 * as set in sd_mark_rqs_busy() 15412 */ 15413 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15414 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15415 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15416 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15417 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15418 SENSE_LENGTH)) { 15419 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15420 MAX_SENSE_LENGTH); 15421 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15422 } else { 15423 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15424 SENSE_LENGTH); 15425 if (actual_len < SENSE_LENGTH) { 15426 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15427 } else { 15428 cmd_xp->xb_sense_resid = 0; 15429 } 15430 } 15431 15432 /* 15433 * Free up the RQS command.... 15434 * NOTE: 15435 * Must do this BEFORE calling sd_validate_sense_data! 15436 * sd_validate_sense_data may return the original command in 15437 * which case the pkt will be freed and the flags can no 15438 * longer be touched. 15439 * SD_MUTEX is held through this process until the command 15440 * is dispatched based upon the sense data, so there are 15441 * no race conditions. 15442 */ 15443 (void) sd_mark_rqs_idle(un, sense_xp); 15444 15445 /* 15446 * For a retryable command see if we have valid sense data, if so then 15447 * turn it over to sd_decode_sense() to figure out the right course of 15448 * action. Just fail a non-retryable command. 15449 */ 15450 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15451 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15452 SD_SENSE_DATA_IS_VALID) { 15453 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15454 } 15455 } else { 15456 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15457 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15458 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15459 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15460 sd_return_failed_command(un, cmd_bp, EIO); 15461 } 15462 } 15463 15464 15465 15466 15467 /* 15468 * Function: sd_handle_auto_request_sense 15469 * 15470 * Description: Processing for auto-request sense information. 15471 * 15472 * Arguments: un - ptr to associated softstate 15473 * bp - ptr to buf(9S) for the command 15474 * xp - ptr to the sd_xbuf for the command 15475 * pktp - ptr to the scsi_pkt(9S) for the command 15476 * 15477 * Context: May be called under interrupt context 15478 */ 15479 15480 static void 15481 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15482 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15483 { 15484 struct scsi_arq_status *asp; 15485 size_t actual_len; 15486 15487 ASSERT(un != NULL); 15488 ASSERT(mutex_owned(SD_MUTEX(un))); 15489 ASSERT(bp != NULL); 15490 ASSERT(xp != NULL); 15491 ASSERT(pktp != NULL); 15492 ASSERT(pktp != un->un_rqs_pktp); 15493 ASSERT(bp != un->un_rqs_bp); 15494 15495 /* 15496 * For auto-request sense, we get a scsi_arq_status back from 15497 * the HBA, with the sense data in the sts_sensedata member. 15498 * The pkt_scbp of the packet points to this scsi_arq_status. 15499 */ 15500 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15501 15502 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15503 /* 15504 * The auto REQUEST SENSE failed; see if we can re-try 15505 * the original command. 15506 */ 15507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15508 "auto request sense failed (reason=%s)\n", 15509 scsi_rname(asp->sts_rqpkt_reason)); 15510 15511 sd_reset_target(un, pktp); 15512 15513 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15514 NULL, NULL, EIO, (clock_t)0, NULL); 15515 return; 15516 } 15517 15518 /* Save the relevant sense info into the xp for the original cmd. */ 15519 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15520 xp->xb_sense_state = asp->sts_rqpkt_state; 15521 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15522 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15523 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15524 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15525 MAX_SENSE_LENGTH); 15526 } else { 15527 if (xp->xb_sense_resid > SENSE_LENGTH) { 15528 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15529 } else { 15530 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15531 } 15532 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15533 if ((((struct uscsi_cmd *) 15534 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15535 xp->xb_sense_resid = (((struct uscsi_cmd *) 15536 (xp->xb_pktinfo))->uscsi_rqlen) - 15537 actual_len; 15538 } else { 15539 xp->xb_sense_resid = 0; 15540 } 15541 } 15542 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15543 } 15544 15545 /* 15546 * See if we have valid sense data, if so then turn it over to 15547 * sd_decode_sense() to figure out the right course of action. 15548 */ 15549 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15550 SD_SENSE_DATA_IS_VALID) { 15551 sd_decode_sense(un, bp, xp, pktp); 15552 } 15553 } 15554 15555 15556 /* 15557 * Function: sd_print_sense_failed_msg 15558 * 15559 * Description: Print log message when RQS has failed. 15560 * 15561 * Arguments: un - ptr to associated softstate 15562 * bp - ptr to buf(9S) for the command 15563 * arg - generic message string ptr 15564 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15565 * or SD_NO_RETRY_ISSUED 15566 * 15567 * Context: May be called from interrupt context 15568 */ 15569 15570 static void 15571 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15572 int code) 15573 { 15574 char *msgp = arg; 15575 15576 ASSERT(un != NULL); 15577 ASSERT(mutex_owned(SD_MUTEX(un))); 15578 ASSERT(bp != NULL); 15579 15580 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15581 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15582 } 15583 } 15584 15585 15586 /* 15587 * Function: sd_validate_sense_data 15588 * 15589 * Description: Check the given sense data for validity. 15590 * If the sense data is not valid, the command will 15591 * be either failed or retried! 15592 * 15593 * Return Code: SD_SENSE_DATA_IS_INVALID 15594 * SD_SENSE_DATA_IS_VALID 15595 * 15596 * Context: May be called from interrupt context 15597 */ 15598 15599 static int 15600 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15601 size_t actual_len) 15602 { 15603 struct scsi_extended_sense *esp; 15604 struct scsi_pkt *pktp; 15605 char *msgp = NULL; 15606 15607 ASSERT(un != NULL); 15608 ASSERT(mutex_owned(SD_MUTEX(un))); 15609 ASSERT(bp != NULL); 15610 ASSERT(bp != un->un_rqs_bp); 15611 ASSERT(xp != NULL); 15612 15613 pktp = SD_GET_PKTP(bp); 15614 ASSERT(pktp != NULL); 15615 15616 /* 15617 * Check the status of the RQS command (auto or manual). 15618 */ 15619 switch (xp->xb_sense_status & STATUS_MASK) { 15620 case STATUS_GOOD: 15621 break; 15622 15623 case STATUS_RESERVATION_CONFLICT: 15624 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15625 return (SD_SENSE_DATA_IS_INVALID); 15626 15627 case STATUS_BUSY: 15628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15629 "Busy Status on REQUEST SENSE\n"); 15630 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15631 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15632 return (SD_SENSE_DATA_IS_INVALID); 15633 15634 case STATUS_QFULL: 15635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15636 "QFULL Status on REQUEST SENSE\n"); 15637 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15638 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15639 return (SD_SENSE_DATA_IS_INVALID); 15640 15641 case STATUS_CHECK: 15642 case STATUS_TERMINATED: 15643 msgp = "Check Condition on REQUEST SENSE\n"; 15644 goto sense_failed; 15645 15646 default: 15647 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15648 goto sense_failed; 15649 } 15650 15651 /* 15652 * See if we got the minimum required amount of sense data. 15653 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15654 * or less. 15655 */ 15656 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15657 (actual_len == 0)) { 15658 msgp = "Request Sense couldn't get sense data\n"; 15659 goto sense_failed; 15660 } 15661 15662 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15663 msgp = "Not enough sense information\n"; 15664 goto sense_failed; 15665 } 15666 15667 /* 15668 * We require the extended sense data 15669 */ 15670 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15671 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15672 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15673 static char tmp[8]; 15674 static char buf[148]; 15675 char *p = (char *)(xp->xb_sense_data); 15676 int i; 15677 15678 mutex_enter(&sd_sense_mutex); 15679 (void) strcpy(buf, "undecodable sense information:"); 15680 for (i = 0; i < actual_len; i++) { 15681 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15682 (void) strcpy(&buf[strlen(buf)], tmp); 15683 } 15684 i = strlen(buf); 15685 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15686 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15687 mutex_exit(&sd_sense_mutex); 15688 } 15689 /* Note: Legacy behavior, fail the command with no retry */ 15690 sd_return_failed_command(un, bp, EIO); 15691 return (SD_SENSE_DATA_IS_INVALID); 15692 } 15693 15694 /* 15695 * Check that es_code is valid (es_class concatenated with es_code 15696 * make up the "response code" field. es_class will always be 7, so 15697 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15698 * format. 15699 */ 15700 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15701 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15702 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15703 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15704 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15705 goto sense_failed; 15706 } 15707 15708 return (SD_SENSE_DATA_IS_VALID); 15709 15710 sense_failed: 15711 /* 15712 * If the request sense failed (for whatever reason), attempt 15713 * to retry the original command. 15714 */ 15715 #if defined(__i386) || defined(__amd64) 15716 /* 15717 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15718 * sddef.h for Sparc platform, and x86 uses 1 binary 15719 * for both SCSI/FC. 15720 * The SD_RETRY_DELAY value need to be adjusted here 15721 * when SD_RETRY_DELAY change in sddef.h 15722 */ 15723 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15724 sd_print_sense_failed_msg, msgp, EIO, 15725 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15726 #else 15727 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15728 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15729 #endif 15730 15731 return (SD_SENSE_DATA_IS_INVALID); 15732 } 15733 15734 15735 15736 /* 15737 * Function: sd_decode_sense 15738 * 15739 * Description: Take recovery action(s) when SCSI Sense Data is received. 15740 * 15741 * Context: Interrupt context. 15742 */ 15743 15744 static void 15745 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15746 struct scsi_pkt *pktp) 15747 { 15748 uint8_t sense_key; 15749 15750 ASSERT(un != NULL); 15751 ASSERT(mutex_owned(SD_MUTEX(un))); 15752 ASSERT(bp != NULL); 15753 ASSERT(bp != un->un_rqs_bp); 15754 ASSERT(xp != NULL); 15755 ASSERT(pktp != NULL); 15756 15757 sense_key = scsi_sense_key(xp->xb_sense_data); 15758 15759 switch (sense_key) { 15760 case KEY_NO_SENSE: 15761 sd_sense_key_no_sense(un, bp, xp, pktp); 15762 break; 15763 case KEY_RECOVERABLE_ERROR: 15764 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15765 bp, xp, pktp); 15766 break; 15767 case KEY_NOT_READY: 15768 sd_sense_key_not_ready(un, xp->xb_sense_data, 15769 bp, xp, pktp); 15770 break; 15771 case KEY_MEDIUM_ERROR: 15772 case KEY_HARDWARE_ERROR: 15773 sd_sense_key_medium_or_hardware_error(un, 15774 xp->xb_sense_data, bp, xp, pktp); 15775 break; 15776 case KEY_ILLEGAL_REQUEST: 15777 sd_sense_key_illegal_request(un, bp, xp, pktp); 15778 break; 15779 case KEY_UNIT_ATTENTION: 15780 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15781 bp, xp, pktp); 15782 break; 15783 case KEY_WRITE_PROTECT: 15784 case KEY_VOLUME_OVERFLOW: 15785 case KEY_MISCOMPARE: 15786 sd_sense_key_fail_command(un, bp, xp, pktp); 15787 break; 15788 case KEY_BLANK_CHECK: 15789 sd_sense_key_blank_check(un, bp, xp, pktp); 15790 break; 15791 case KEY_ABORTED_COMMAND: 15792 sd_sense_key_aborted_command(un, bp, xp, pktp); 15793 break; 15794 case KEY_VENDOR_UNIQUE: 15795 case KEY_COPY_ABORTED: 15796 case KEY_EQUAL: 15797 case KEY_RESERVED: 15798 default: 15799 sd_sense_key_default(un, xp->xb_sense_data, 15800 bp, xp, pktp); 15801 break; 15802 } 15803 } 15804 15805 15806 /* 15807 * Function: sd_dump_memory 15808 * 15809 * Description: Debug logging routine to print the contents of a user provided 15810 * buffer. The output of the buffer is broken up into 256 byte 15811 * segments due to a size constraint of the scsi_log. 15812 * implementation. 15813 * 15814 * Arguments: un - ptr to softstate 15815 * comp - component mask 15816 * title - "title" string to preceed data when printed 15817 * data - ptr to data block to be printed 15818 * len - size of data block to be printed 15819 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15820 * 15821 * Context: May be called from interrupt context 15822 */ 15823 15824 #define SD_DUMP_MEMORY_BUF_SIZE 256 15825 15826 static char *sd_dump_format_string[] = { 15827 " 0x%02x", 15828 " %c" 15829 }; 15830 15831 static void 15832 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15833 int len, int fmt) 15834 { 15835 int i, j; 15836 int avail_count; 15837 int start_offset; 15838 int end_offset; 15839 size_t entry_len; 15840 char *bufp; 15841 char *local_buf; 15842 char *format_string; 15843 15844 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15845 15846 /* 15847 * In the debug version of the driver, this function is called from a 15848 * number of places which are NOPs in the release driver. 15849 * The debug driver therefore has additional methods of filtering 15850 * debug output. 15851 */ 15852 #ifdef SDDEBUG 15853 /* 15854 * In the debug version of the driver we can reduce the amount of debug 15855 * messages by setting sd_error_level to something other than 15856 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15857 * sd_component_mask. 15858 */ 15859 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15860 (sd_error_level != SCSI_ERR_ALL)) { 15861 return; 15862 } 15863 if (((sd_component_mask & comp) == 0) || 15864 (sd_error_level != SCSI_ERR_ALL)) { 15865 return; 15866 } 15867 #else 15868 if (sd_error_level != SCSI_ERR_ALL) { 15869 return; 15870 } 15871 #endif 15872 15873 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15874 bufp = local_buf; 15875 /* 15876 * Available length is the length of local_buf[], minus the 15877 * length of the title string, minus one for the ":", minus 15878 * one for the newline, minus one for the NULL terminator. 15879 * This gives the #bytes available for holding the printed 15880 * values from the given data buffer. 15881 */ 15882 if (fmt == SD_LOG_HEX) { 15883 format_string = sd_dump_format_string[0]; 15884 } else /* SD_LOG_CHAR */ { 15885 format_string = sd_dump_format_string[1]; 15886 } 15887 /* 15888 * Available count is the number of elements from the given 15889 * data buffer that we can fit into the available length. 15890 * This is based upon the size of the format string used. 15891 * Make one entry and find it's size. 15892 */ 15893 (void) sprintf(bufp, format_string, data[0]); 15894 entry_len = strlen(bufp); 15895 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15896 15897 j = 0; 15898 while (j < len) { 15899 bufp = local_buf; 15900 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15901 start_offset = j; 15902 15903 end_offset = start_offset + avail_count; 15904 15905 (void) sprintf(bufp, "%s:", title); 15906 bufp += strlen(bufp); 15907 for (i = start_offset; ((i < end_offset) && (j < len)); 15908 i++, j++) { 15909 (void) sprintf(bufp, format_string, data[i]); 15910 bufp += entry_len; 15911 } 15912 (void) sprintf(bufp, "\n"); 15913 15914 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15915 } 15916 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15917 } 15918 15919 /* 15920 * Function: sd_print_sense_msg 15921 * 15922 * Description: Log a message based upon the given sense data. 15923 * 15924 * Arguments: un - ptr to associated softstate 15925 * bp - ptr to buf(9S) for the command 15926 * arg - ptr to associate sd_sense_info struct 15927 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15928 * or SD_NO_RETRY_ISSUED 15929 * 15930 * Context: May be called from interrupt context 15931 */ 15932 15933 static void 15934 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15935 { 15936 struct sd_xbuf *xp; 15937 struct scsi_pkt *pktp; 15938 uint8_t *sensep; 15939 daddr_t request_blkno; 15940 diskaddr_t err_blkno; 15941 int severity; 15942 int pfa_flag; 15943 extern struct scsi_key_strings scsi_cmds[]; 15944 15945 ASSERT(un != NULL); 15946 ASSERT(mutex_owned(SD_MUTEX(un))); 15947 ASSERT(bp != NULL); 15948 xp = SD_GET_XBUF(bp); 15949 ASSERT(xp != NULL); 15950 pktp = SD_GET_PKTP(bp); 15951 ASSERT(pktp != NULL); 15952 ASSERT(arg != NULL); 15953 15954 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15955 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15956 15957 if ((code == SD_DELAYED_RETRY_ISSUED) || 15958 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15959 severity = SCSI_ERR_RETRYABLE; 15960 } 15961 15962 /* Use absolute block number for the request block number */ 15963 request_blkno = xp->xb_blkno; 15964 15965 /* 15966 * Now try to get the error block number from the sense data 15967 */ 15968 sensep = xp->xb_sense_data; 15969 15970 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15971 (uint64_t *)&err_blkno)) { 15972 /* 15973 * We retrieved the error block number from the information 15974 * portion of the sense data. 15975 * 15976 * For USCSI commands we are better off using the error 15977 * block no. as the requested block no. (This is the best 15978 * we can estimate.) 15979 */ 15980 if ((SD_IS_BUFIO(xp) == FALSE) && 15981 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15982 request_blkno = err_blkno; 15983 } 15984 } else { 15985 /* 15986 * Without the es_valid bit set (for fixed format) or an 15987 * information descriptor (for descriptor format) we cannot 15988 * be certain of the error blkno, so just use the 15989 * request_blkno. 15990 */ 15991 err_blkno = (diskaddr_t)request_blkno; 15992 } 15993 15994 /* 15995 * The following will log the buffer contents for the release driver 15996 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15997 * level is set to verbose. 15998 */ 15999 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16000 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16001 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16002 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16003 16004 if (pfa_flag == FALSE) { 16005 /* This is normally only set for USCSI */ 16006 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16007 return; 16008 } 16009 16010 if ((SD_IS_BUFIO(xp) == TRUE) && 16011 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16012 (severity < sd_error_level))) { 16013 return; 16014 } 16015 } 16016 16017 /* 16018 * Check for Sonoma Failover and keep a count of how many failed I/O's 16019 */ 16020 if ((SD_IS_LSI(un)) && 16021 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16022 (scsi_sense_asc(sensep) == 0x94) && 16023 (scsi_sense_ascq(sensep) == 0x01)) { 16024 un->un_sonoma_failure_count++; 16025 if (un->un_sonoma_failure_count > 1) { 16026 return; 16027 } 16028 } 16029 16030 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16031 request_blkno, err_blkno, scsi_cmds, 16032 (struct scsi_extended_sense *)sensep, 16033 un->un_additional_codes, NULL); 16034 } 16035 16036 /* 16037 * Function: sd_sense_key_no_sense 16038 * 16039 * Description: Recovery action when sense data was not received. 16040 * 16041 * Context: May be called from interrupt context 16042 */ 16043 16044 static void 16045 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16046 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16047 { 16048 struct sd_sense_info si; 16049 16050 ASSERT(un != NULL); 16051 ASSERT(mutex_owned(SD_MUTEX(un))); 16052 ASSERT(bp != NULL); 16053 ASSERT(xp != NULL); 16054 ASSERT(pktp != NULL); 16055 16056 si.ssi_severity = SCSI_ERR_FATAL; 16057 si.ssi_pfa_flag = FALSE; 16058 16059 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16060 16061 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16062 &si, EIO, (clock_t)0, NULL); 16063 } 16064 16065 16066 /* 16067 * Function: sd_sense_key_recoverable_error 16068 * 16069 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16070 * 16071 * Context: May be called from interrupt context 16072 */ 16073 16074 static void 16075 sd_sense_key_recoverable_error(struct sd_lun *un, 16076 uint8_t *sense_datap, 16077 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16078 { 16079 struct sd_sense_info si; 16080 uint8_t asc = scsi_sense_asc(sense_datap); 16081 16082 ASSERT(un != NULL); 16083 ASSERT(mutex_owned(SD_MUTEX(un))); 16084 ASSERT(bp != NULL); 16085 ASSERT(xp != NULL); 16086 ASSERT(pktp != NULL); 16087 16088 /* 16089 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16090 */ 16091 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16092 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16093 si.ssi_severity = SCSI_ERR_INFO; 16094 si.ssi_pfa_flag = TRUE; 16095 } else { 16096 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16097 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16098 si.ssi_severity = SCSI_ERR_RECOVERED; 16099 si.ssi_pfa_flag = FALSE; 16100 } 16101 16102 if (pktp->pkt_resid == 0) { 16103 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16104 sd_return_command(un, bp); 16105 return; 16106 } 16107 16108 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16109 &si, EIO, (clock_t)0, NULL); 16110 } 16111 16112 16113 16114 16115 /* 16116 * Function: sd_sense_key_not_ready 16117 * 16118 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16119 * 16120 * Context: May be called from interrupt context 16121 */ 16122 16123 static void 16124 sd_sense_key_not_ready(struct sd_lun *un, 16125 uint8_t *sense_datap, 16126 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16127 { 16128 struct sd_sense_info si; 16129 uint8_t asc = scsi_sense_asc(sense_datap); 16130 uint8_t ascq = scsi_sense_ascq(sense_datap); 16131 16132 ASSERT(un != NULL); 16133 ASSERT(mutex_owned(SD_MUTEX(un))); 16134 ASSERT(bp != NULL); 16135 ASSERT(xp != NULL); 16136 ASSERT(pktp != NULL); 16137 16138 si.ssi_severity = SCSI_ERR_FATAL; 16139 si.ssi_pfa_flag = FALSE; 16140 16141 /* 16142 * Update error stats after first NOT READY error. Disks may have 16143 * been powered down and may need to be restarted. For CDROMs, 16144 * report NOT READY errors only if media is present. 16145 */ 16146 if ((ISCD(un) && (asc == 0x3A)) || 16147 (xp->xb_nr_retry_count > 0)) { 16148 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16149 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16150 } 16151 16152 /* 16153 * Just fail if the "not ready" retry limit has been reached. 16154 */ 16155 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16156 /* Special check for error message printing for removables. */ 16157 if (un->un_f_has_removable_media && (asc == 0x04) && 16158 (ascq >= 0x04)) { 16159 si.ssi_severity = SCSI_ERR_ALL; 16160 } 16161 goto fail_command; 16162 } 16163 16164 /* 16165 * Check the ASC and ASCQ in the sense data as needed, to determine 16166 * what to do. 16167 */ 16168 switch (asc) { 16169 case 0x04: /* LOGICAL UNIT NOT READY */ 16170 /* 16171 * disk drives that don't spin up result in a very long delay 16172 * in format without warning messages. We will log a message 16173 * if the error level is set to verbose. 16174 */ 16175 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16176 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16177 "logical unit not ready, resetting disk\n"); 16178 } 16179 16180 /* 16181 * There are different requirements for CDROMs and disks for 16182 * the number of retries. If a CD-ROM is giving this, it is 16183 * probably reading TOC and is in the process of getting 16184 * ready, so we should keep on trying for a long time to make 16185 * sure that all types of media are taken in account (for 16186 * some media the drive takes a long time to read TOC). For 16187 * disks we do not want to retry this too many times as this 16188 * can cause a long hang in format when the drive refuses to 16189 * spin up (a very common failure). 16190 */ 16191 switch (ascq) { 16192 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16193 /* 16194 * Disk drives frequently refuse to spin up which 16195 * results in a very long hang in format without 16196 * warning messages. 16197 * 16198 * Note: This code preserves the legacy behavior of 16199 * comparing xb_nr_retry_count against zero for fibre 16200 * channel targets instead of comparing against the 16201 * un_reset_retry_count value. The reason for this 16202 * discrepancy has been so utterly lost beneath the 16203 * Sands of Time that even Indiana Jones could not 16204 * find it. 16205 */ 16206 if (un->un_f_is_fibre == TRUE) { 16207 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16208 (xp->xb_nr_retry_count > 0)) && 16209 (un->un_startstop_timeid == NULL)) { 16210 scsi_log(SD_DEVINFO(un), sd_label, 16211 CE_WARN, "logical unit not ready, " 16212 "resetting disk\n"); 16213 sd_reset_target(un, pktp); 16214 } 16215 } else { 16216 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16217 (xp->xb_nr_retry_count > 16218 un->un_reset_retry_count)) && 16219 (un->un_startstop_timeid == NULL)) { 16220 scsi_log(SD_DEVINFO(un), sd_label, 16221 CE_WARN, "logical unit not ready, " 16222 "resetting disk\n"); 16223 sd_reset_target(un, pktp); 16224 } 16225 } 16226 break; 16227 16228 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16229 /* 16230 * If the target is in the process of becoming 16231 * ready, just proceed with the retry. This can 16232 * happen with CD-ROMs that take a long time to 16233 * read TOC after a power cycle or reset. 16234 */ 16235 goto do_retry; 16236 16237 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16238 break; 16239 16240 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16241 /* 16242 * Retries cannot help here so just fail right away. 16243 */ 16244 goto fail_command; 16245 16246 case 0x88: 16247 /* 16248 * Vendor-unique code for T3/T4: it indicates a 16249 * path problem in a mutipathed config, but as far as 16250 * the target driver is concerned it equates to a fatal 16251 * error, so we should just fail the command right away 16252 * (without printing anything to the console). If this 16253 * is not a T3/T4, fall thru to the default recovery 16254 * action. 16255 * T3/T4 is FC only, don't need to check is_fibre 16256 */ 16257 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16258 sd_return_failed_command(un, bp, EIO); 16259 return; 16260 } 16261 /* FALLTHRU */ 16262 16263 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16264 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16265 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16266 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16267 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16268 default: /* Possible future codes in SCSI spec? */ 16269 /* 16270 * For removable-media devices, do not retry if 16271 * ASCQ > 2 as these result mostly from USCSI commands 16272 * on MMC devices issued to check status of an 16273 * operation initiated in immediate mode. Also for 16274 * ASCQ >= 4 do not print console messages as these 16275 * mainly represent a user-initiated operation 16276 * instead of a system failure. 16277 */ 16278 if (un->un_f_has_removable_media) { 16279 si.ssi_severity = SCSI_ERR_ALL; 16280 goto fail_command; 16281 } 16282 break; 16283 } 16284 16285 /* 16286 * As part of our recovery attempt for the NOT READY 16287 * condition, we issue a START STOP UNIT command. However 16288 * we want to wait for a short delay before attempting this 16289 * as there may still be more commands coming back from the 16290 * target with the check condition. To do this we use 16291 * timeout(9F) to call sd_start_stop_unit_callback() after 16292 * the delay interval expires. (sd_start_stop_unit_callback() 16293 * dispatches sd_start_stop_unit_task(), which will issue 16294 * the actual START STOP UNIT command. The delay interval 16295 * is one-half of the delay that we will use to retry the 16296 * command that generated the NOT READY condition. 16297 * 16298 * Note that we could just dispatch sd_start_stop_unit_task() 16299 * from here and allow it to sleep for the delay interval, 16300 * but then we would be tying up the taskq thread 16301 * uncesessarily for the duration of the delay. 16302 * 16303 * Do not issue the START STOP UNIT if the current command 16304 * is already a START STOP UNIT. 16305 */ 16306 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16307 break; 16308 } 16309 16310 /* 16311 * Do not schedule the timeout if one is already pending. 16312 */ 16313 if (un->un_startstop_timeid != NULL) { 16314 SD_INFO(SD_LOG_ERROR, un, 16315 "sd_sense_key_not_ready: restart already issued to" 16316 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16317 ddi_get_instance(SD_DEVINFO(un))); 16318 break; 16319 } 16320 16321 /* 16322 * Schedule the START STOP UNIT command, then queue the command 16323 * for a retry. 16324 * 16325 * Note: A timeout is not scheduled for this retry because we 16326 * want the retry to be serial with the START_STOP_UNIT. The 16327 * retry will be started when the START_STOP_UNIT is completed 16328 * in sd_start_stop_unit_task. 16329 */ 16330 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16331 un, SD_BSY_TIMEOUT / 2); 16332 xp->xb_nr_retry_count++; 16333 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16334 return; 16335 16336 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16337 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16339 "unit does not respond to selection\n"); 16340 } 16341 break; 16342 16343 case 0x3A: /* MEDIUM NOT PRESENT */ 16344 if (sd_error_level >= SCSI_ERR_FATAL) { 16345 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16346 "Caddy not inserted in drive\n"); 16347 } 16348 16349 sr_ejected(un); 16350 un->un_mediastate = DKIO_EJECTED; 16351 /* The state has changed, inform the media watch routines */ 16352 cv_broadcast(&un->un_state_cv); 16353 /* Just fail if no media is present in the drive. */ 16354 goto fail_command; 16355 16356 default: 16357 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16358 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16359 "Unit not Ready. Additional sense code 0x%x\n", 16360 asc); 16361 } 16362 break; 16363 } 16364 16365 do_retry: 16366 16367 /* 16368 * Retry the command, as some targets may report NOT READY for 16369 * several seconds after being reset. 16370 */ 16371 xp->xb_nr_retry_count++; 16372 si.ssi_severity = SCSI_ERR_RETRYABLE; 16373 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16374 &si, EIO, SD_BSY_TIMEOUT, NULL); 16375 16376 return; 16377 16378 fail_command: 16379 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16380 sd_return_failed_command(un, bp, EIO); 16381 } 16382 16383 16384 16385 /* 16386 * Function: sd_sense_key_medium_or_hardware_error 16387 * 16388 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16389 * sense key. 16390 * 16391 * Context: May be called from interrupt context 16392 */ 16393 16394 static void 16395 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16396 uint8_t *sense_datap, 16397 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16398 { 16399 struct sd_sense_info si; 16400 uint8_t sense_key = scsi_sense_key(sense_datap); 16401 uint8_t asc = scsi_sense_asc(sense_datap); 16402 16403 ASSERT(un != NULL); 16404 ASSERT(mutex_owned(SD_MUTEX(un))); 16405 ASSERT(bp != NULL); 16406 ASSERT(xp != NULL); 16407 ASSERT(pktp != NULL); 16408 16409 si.ssi_severity = SCSI_ERR_FATAL; 16410 si.ssi_pfa_flag = FALSE; 16411 16412 if (sense_key == KEY_MEDIUM_ERROR) { 16413 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16414 } 16415 16416 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16417 16418 if ((un->un_reset_retry_count != 0) && 16419 (xp->xb_retry_count == un->un_reset_retry_count)) { 16420 mutex_exit(SD_MUTEX(un)); 16421 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16422 if (un->un_f_allow_bus_device_reset == TRUE) { 16423 16424 boolean_t try_resetting_target = B_TRUE; 16425 16426 /* 16427 * We need to be able to handle specific ASC when we are 16428 * handling a KEY_HARDWARE_ERROR. In particular 16429 * taking the default action of resetting the target may 16430 * not be the appropriate way to attempt recovery. 16431 * Resetting a target because of a single LUN failure 16432 * victimizes all LUNs on that target. 16433 * 16434 * This is true for the LSI arrays, if an LSI 16435 * array controller returns an ASC of 0x84 (LUN Dead) we 16436 * should trust it. 16437 */ 16438 16439 if (sense_key == KEY_HARDWARE_ERROR) { 16440 switch (asc) { 16441 case 0x84: 16442 if (SD_IS_LSI(un)) { 16443 try_resetting_target = B_FALSE; 16444 } 16445 break; 16446 default: 16447 break; 16448 } 16449 } 16450 16451 if (try_resetting_target == B_TRUE) { 16452 int reset_retval = 0; 16453 if (un->un_f_lun_reset_enabled == TRUE) { 16454 SD_TRACE(SD_LOG_IO_CORE, un, 16455 "sd_sense_key_medium_or_hardware_" 16456 "error: issuing RESET_LUN\n"); 16457 reset_retval = 16458 scsi_reset(SD_ADDRESS(un), 16459 RESET_LUN); 16460 } 16461 if (reset_retval == 0) { 16462 SD_TRACE(SD_LOG_IO_CORE, un, 16463 "sd_sense_key_medium_or_hardware_" 16464 "error: issuing RESET_TARGET\n"); 16465 (void) scsi_reset(SD_ADDRESS(un), 16466 RESET_TARGET); 16467 } 16468 } 16469 } 16470 mutex_enter(SD_MUTEX(un)); 16471 } 16472 16473 /* 16474 * This really ought to be a fatal error, but we will retry anyway 16475 * as some drives report this as a spurious error. 16476 */ 16477 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16478 &si, EIO, (clock_t)0, NULL); 16479 } 16480 16481 16482 16483 /* 16484 * Function: sd_sense_key_illegal_request 16485 * 16486 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16487 * 16488 * Context: May be called from interrupt context 16489 */ 16490 16491 static void 16492 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16493 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16494 { 16495 struct sd_sense_info si; 16496 16497 ASSERT(un != NULL); 16498 ASSERT(mutex_owned(SD_MUTEX(un))); 16499 ASSERT(bp != NULL); 16500 ASSERT(xp != NULL); 16501 ASSERT(pktp != NULL); 16502 16503 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16504 16505 si.ssi_severity = SCSI_ERR_INFO; 16506 si.ssi_pfa_flag = FALSE; 16507 16508 /* Pointless to retry if the target thinks it's an illegal request */ 16509 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16510 sd_return_failed_command(un, bp, EIO); 16511 } 16512 16513 16514 16515 16516 /* 16517 * Function: sd_sense_key_unit_attention 16518 * 16519 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16520 * 16521 * Context: May be called from interrupt context 16522 */ 16523 16524 static void 16525 sd_sense_key_unit_attention(struct sd_lun *un, 16526 uint8_t *sense_datap, 16527 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16528 { 16529 /* 16530 * For UNIT ATTENTION we allow retries for one minute. Devices 16531 * like Sonoma can return UNIT ATTENTION close to a minute 16532 * under certain conditions. 16533 */ 16534 int retry_check_flag = SD_RETRIES_UA; 16535 boolean_t kstat_updated = B_FALSE; 16536 struct sd_sense_info si; 16537 uint8_t asc = scsi_sense_asc(sense_datap); 16538 uint8_t ascq = scsi_sense_ascq(sense_datap); 16539 16540 ASSERT(un != NULL); 16541 ASSERT(mutex_owned(SD_MUTEX(un))); 16542 ASSERT(bp != NULL); 16543 ASSERT(xp != NULL); 16544 ASSERT(pktp != NULL); 16545 16546 si.ssi_severity = SCSI_ERR_INFO; 16547 si.ssi_pfa_flag = FALSE; 16548 16549 16550 switch (asc) { 16551 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16552 if (sd_report_pfa != 0) { 16553 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16554 si.ssi_pfa_flag = TRUE; 16555 retry_check_flag = SD_RETRIES_STANDARD; 16556 goto do_retry; 16557 } 16558 16559 break; 16560 16561 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16562 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16563 un->un_resvd_status |= 16564 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16565 } 16566 #ifdef _LP64 16567 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16568 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16569 un, KM_NOSLEEP) == 0) { 16570 /* 16571 * If we can't dispatch the task we'll just 16572 * live without descriptor sense. We can 16573 * try again on the next "unit attention" 16574 */ 16575 SD_ERROR(SD_LOG_ERROR, un, 16576 "sd_sense_key_unit_attention: " 16577 "Could not dispatch " 16578 "sd_reenable_dsense_task\n"); 16579 } 16580 } 16581 #endif /* _LP64 */ 16582 /* FALLTHRU */ 16583 16584 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16585 if (!un->un_f_has_removable_media) { 16586 break; 16587 } 16588 16589 /* 16590 * When we get a unit attention from a removable-media device, 16591 * it may be in a state that will take a long time to recover 16592 * (e.g., from a reset). Since we are executing in interrupt 16593 * context here, we cannot wait around for the device to come 16594 * back. So hand this command off to sd_media_change_task() 16595 * for deferred processing under taskq thread context. (Note 16596 * that the command still may be failed if a problem is 16597 * encountered at a later time.) 16598 */ 16599 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16600 KM_NOSLEEP) == 0) { 16601 /* 16602 * Cannot dispatch the request so fail the command. 16603 */ 16604 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16605 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16606 si.ssi_severity = SCSI_ERR_FATAL; 16607 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16608 sd_return_failed_command(un, bp, EIO); 16609 } 16610 16611 /* 16612 * If failed to dispatch sd_media_change_task(), we already 16613 * updated kstat. If succeed to dispatch sd_media_change_task(), 16614 * we should update kstat later if it encounters an error. So, 16615 * we update kstat_updated flag here. 16616 */ 16617 kstat_updated = B_TRUE; 16618 16619 /* 16620 * Either the command has been successfully dispatched to a 16621 * task Q for retrying, or the dispatch failed. In either case 16622 * do NOT retry again by calling sd_retry_command. This sets up 16623 * two retries of the same command and when one completes and 16624 * frees the resources the other will access freed memory, 16625 * a bad thing. 16626 */ 16627 return; 16628 16629 default: 16630 break; 16631 } 16632 16633 /* 16634 * ASC ASCQ 16635 * 2A 09 Capacity data has changed 16636 * 2A 01 Mode parameters changed 16637 * 3F 0E Reported luns data has changed 16638 * Arrays that support logical unit expansion should report 16639 * capacity changes(2Ah/09). Mode parameters changed and 16640 * reported luns data has changed are the approximation. 16641 */ 16642 if (((asc == 0x2a) && (ascq == 0x09)) || 16643 ((asc == 0x2a) && (ascq == 0x01)) || 16644 ((asc == 0x3f) && (ascq == 0x0e))) { 16645 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16646 KM_NOSLEEP) == 0) { 16647 SD_ERROR(SD_LOG_ERROR, un, 16648 "sd_sense_key_unit_attention: " 16649 "Could not dispatch sd_target_change_task\n"); 16650 } 16651 } 16652 16653 /* 16654 * Update kstat if we haven't done that. 16655 */ 16656 if (!kstat_updated) { 16657 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16658 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16659 } 16660 16661 do_retry: 16662 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16663 EIO, SD_UA_RETRY_DELAY, NULL); 16664 } 16665 16666 16667 16668 /* 16669 * Function: sd_sense_key_fail_command 16670 * 16671 * Description: Use to fail a command when we don't like the sense key that 16672 * was returned. 16673 * 16674 * Context: May be called from interrupt context 16675 */ 16676 16677 static void 16678 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16679 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16680 { 16681 struct sd_sense_info si; 16682 16683 ASSERT(un != NULL); 16684 ASSERT(mutex_owned(SD_MUTEX(un))); 16685 ASSERT(bp != NULL); 16686 ASSERT(xp != NULL); 16687 ASSERT(pktp != NULL); 16688 16689 si.ssi_severity = SCSI_ERR_FATAL; 16690 si.ssi_pfa_flag = FALSE; 16691 16692 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16693 sd_return_failed_command(un, bp, EIO); 16694 } 16695 16696 16697 16698 /* 16699 * Function: sd_sense_key_blank_check 16700 * 16701 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16702 * Has no monetary connotation. 16703 * 16704 * Context: May be called from interrupt context 16705 */ 16706 16707 static void 16708 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16709 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16710 { 16711 struct sd_sense_info si; 16712 16713 ASSERT(un != NULL); 16714 ASSERT(mutex_owned(SD_MUTEX(un))); 16715 ASSERT(bp != NULL); 16716 ASSERT(xp != NULL); 16717 ASSERT(pktp != NULL); 16718 16719 /* 16720 * Blank check is not fatal for removable devices, therefore 16721 * it does not require a console message. 16722 */ 16723 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16724 SCSI_ERR_FATAL; 16725 si.ssi_pfa_flag = FALSE; 16726 16727 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16728 sd_return_failed_command(un, bp, EIO); 16729 } 16730 16731 16732 16733 16734 /* 16735 * Function: sd_sense_key_aborted_command 16736 * 16737 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16738 * 16739 * Context: May be called from interrupt context 16740 */ 16741 16742 static void 16743 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16744 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16745 { 16746 struct sd_sense_info si; 16747 16748 ASSERT(un != NULL); 16749 ASSERT(mutex_owned(SD_MUTEX(un))); 16750 ASSERT(bp != NULL); 16751 ASSERT(xp != NULL); 16752 ASSERT(pktp != NULL); 16753 16754 si.ssi_severity = SCSI_ERR_FATAL; 16755 si.ssi_pfa_flag = FALSE; 16756 16757 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16758 16759 /* 16760 * This really ought to be a fatal error, but we will retry anyway 16761 * as some drives report this as a spurious error. 16762 */ 16763 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16764 &si, EIO, drv_usectohz(100000), NULL); 16765 } 16766 16767 16768 16769 /* 16770 * Function: sd_sense_key_default 16771 * 16772 * Description: Default recovery action for several SCSI sense keys (basically 16773 * attempts a retry). 16774 * 16775 * Context: May be called from interrupt context 16776 */ 16777 16778 static void 16779 sd_sense_key_default(struct sd_lun *un, 16780 uint8_t *sense_datap, 16781 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16782 { 16783 struct sd_sense_info si; 16784 uint8_t sense_key = scsi_sense_key(sense_datap); 16785 16786 ASSERT(un != NULL); 16787 ASSERT(mutex_owned(SD_MUTEX(un))); 16788 ASSERT(bp != NULL); 16789 ASSERT(xp != NULL); 16790 ASSERT(pktp != NULL); 16791 16792 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16793 16794 /* 16795 * Undecoded sense key. Attempt retries and hope that will fix 16796 * the problem. Otherwise, we're dead. 16797 */ 16798 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16799 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16800 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16801 } 16802 16803 si.ssi_severity = SCSI_ERR_FATAL; 16804 si.ssi_pfa_flag = FALSE; 16805 16806 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16807 &si, EIO, (clock_t)0, NULL); 16808 } 16809 16810 16811 16812 /* 16813 * Function: sd_print_retry_msg 16814 * 16815 * Description: Print a message indicating the retry action being taken. 16816 * 16817 * Arguments: un - ptr to associated softstate 16818 * bp - ptr to buf(9S) for the command 16819 * arg - not used. 16820 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16821 * or SD_NO_RETRY_ISSUED 16822 * 16823 * Context: May be called from interrupt context 16824 */ 16825 /* ARGSUSED */ 16826 static void 16827 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16828 { 16829 struct sd_xbuf *xp; 16830 struct scsi_pkt *pktp; 16831 char *reasonp; 16832 char *msgp; 16833 16834 ASSERT(un != NULL); 16835 ASSERT(mutex_owned(SD_MUTEX(un))); 16836 ASSERT(bp != NULL); 16837 pktp = SD_GET_PKTP(bp); 16838 ASSERT(pktp != NULL); 16839 xp = SD_GET_XBUF(bp); 16840 ASSERT(xp != NULL); 16841 16842 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16843 mutex_enter(&un->un_pm_mutex); 16844 if ((un->un_state == SD_STATE_SUSPENDED) || 16845 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16846 (pktp->pkt_flags & FLAG_SILENT)) { 16847 mutex_exit(&un->un_pm_mutex); 16848 goto update_pkt_reason; 16849 } 16850 mutex_exit(&un->un_pm_mutex); 16851 16852 /* 16853 * Suppress messages if they are all the same pkt_reason; with 16854 * TQ, many (up to 256) are returned with the same pkt_reason. 16855 * If we are in panic, then suppress the retry messages. 16856 */ 16857 switch (flag) { 16858 case SD_NO_RETRY_ISSUED: 16859 msgp = "giving up"; 16860 break; 16861 case SD_IMMEDIATE_RETRY_ISSUED: 16862 case SD_DELAYED_RETRY_ISSUED: 16863 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16864 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16865 (sd_error_level != SCSI_ERR_ALL))) { 16866 return; 16867 } 16868 msgp = "retrying command"; 16869 break; 16870 default: 16871 goto update_pkt_reason; 16872 } 16873 16874 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16875 scsi_rname(pktp->pkt_reason)); 16876 16877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16878 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16879 16880 update_pkt_reason: 16881 /* 16882 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16883 * This is to prevent multiple console messages for the same failure 16884 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16885 * when the command is retried successfully because there still may be 16886 * more commands coming back with the same value of pktp->pkt_reason. 16887 */ 16888 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16889 un->un_last_pkt_reason = pktp->pkt_reason; 16890 } 16891 } 16892 16893 16894 /* 16895 * Function: sd_print_cmd_incomplete_msg 16896 * 16897 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16898 * 16899 * Arguments: un - ptr to associated softstate 16900 * bp - ptr to buf(9S) for the command 16901 * arg - passed to sd_print_retry_msg() 16902 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16903 * or SD_NO_RETRY_ISSUED 16904 * 16905 * Context: May be called from interrupt context 16906 */ 16907 16908 static void 16909 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16910 int code) 16911 { 16912 dev_info_t *dip; 16913 16914 ASSERT(un != NULL); 16915 ASSERT(mutex_owned(SD_MUTEX(un))); 16916 ASSERT(bp != NULL); 16917 16918 switch (code) { 16919 case SD_NO_RETRY_ISSUED: 16920 /* Command was failed. Someone turned off this target? */ 16921 if (un->un_state != SD_STATE_OFFLINE) { 16922 /* 16923 * Suppress message if we are detaching and 16924 * device has been disconnected 16925 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16926 * private interface and not part of the DDI 16927 */ 16928 dip = un->un_sd->sd_dev; 16929 if (!(DEVI_IS_DETACHING(dip) && 16930 DEVI_IS_DEVICE_REMOVED(dip))) { 16931 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16932 "disk not responding to selection\n"); 16933 } 16934 New_state(un, SD_STATE_OFFLINE); 16935 } 16936 break; 16937 16938 case SD_DELAYED_RETRY_ISSUED: 16939 case SD_IMMEDIATE_RETRY_ISSUED: 16940 default: 16941 /* Command was successfully queued for retry */ 16942 sd_print_retry_msg(un, bp, arg, code); 16943 break; 16944 } 16945 } 16946 16947 16948 /* 16949 * Function: sd_pkt_reason_cmd_incomplete 16950 * 16951 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16952 * 16953 * Context: May be called from interrupt context 16954 */ 16955 16956 static void 16957 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16958 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16959 { 16960 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16961 16962 ASSERT(un != NULL); 16963 ASSERT(mutex_owned(SD_MUTEX(un))); 16964 ASSERT(bp != NULL); 16965 ASSERT(xp != NULL); 16966 ASSERT(pktp != NULL); 16967 16968 /* Do not do a reset if selection did not complete */ 16969 /* Note: Should this not just check the bit? */ 16970 if (pktp->pkt_state != STATE_GOT_BUS) { 16971 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16972 sd_reset_target(un, pktp); 16973 } 16974 16975 /* 16976 * If the target was not successfully selected, then set 16977 * SD_RETRIES_FAILFAST to indicate that we lost communication 16978 * with the target, and further retries and/or commands are 16979 * likely to take a long time. 16980 */ 16981 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16982 flag |= SD_RETRIES_FAILFAST; 16983 } 16984 16985 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16986 16987 sd_retry_command(un, bp, flag, 16988 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16989 } 16990 16991 16992 16993 /* 16994 * Function: sd_pkt_reason_cmd_tran_err 16995 * 16996 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16997 * 16998 * Context: May be called from interrupt context 16999 */ 17000 17001 static void 17002 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17003 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17004 { 17005 ASSERT(un != NULL); 17006 ASSERT(mutex_owned(SD_MUTEX(un))); 17007 ASSERT(bp != NULL); 17008 ASSERT(xp != NULL); 17009 ASSERT(pktp != NULL); 17010 17011 /* 17012 * Do not reset if we got a parity error, or if 17013 * selection did not complete. 17014 */ 17015 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17016 /* Note: Should this not just check the bit for pkt_state? */ 17017 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17018 (pktp->pkt_state != STATE_GOT_BUS)) { 17019 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17020 sd_reset_target(un, pktp); 17021 } 17022 17023 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17024 17025 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17026 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17027 } 17028 17029 17030 17031 /* 17032 * Function: sd_pkt_reason_cmd_reset 17033 * 17034 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17035 * 17036 * Context: May be called from interrupt context 17037 */ 17038 17039 static void 17040 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17041 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17042 { 17043 ASSERT(un != NULL); 17044 ASSERT(mutex_owned(SD_MUTEX(un))); 17045 ASSERT(bp != NULL); 17046 ASSERT(xp != NULL); 17047 ASSERT(pktp != NULL); 17048 17049 /* The target may still be running the command, so try to reset. */ 17050 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17051 sd_reset_target(un, pktp); 17052 17053 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17054 17055 /* 17056 * If pkt_reason is CMD_RESET chances are that this pkt got 17057 * reset because another target on this bus caused it. The target 17058 * that caused it should get CMD_TIMEOUT with pkt_statistics 17059 * of STAT_TIMEOUT/STAT_DEV_RESET. 17060 */ 17061 17062 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17063 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17064 } 17065 17066 17067 17068 17069 /* 17070 * Function: sd_pkt_reason_cmd_aborted 17071 * 17072 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17073 * 17074 * Context: May be called from interrupt context 17075 */ 17076 17077 static void 17078 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17079 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17080 { 17081 ASSERT(un != NULL); 17082 ASSERT(mutex_owned(SD_MUTEX(un))); 17083 ASSERT(bp != NULL); 17084 ASSERT(xp != NULL); 17085 ASSERT(pktp != NULL); 17086 17087 /* The target may still be running the command, so try to reset. */ 17088 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17089 sd_reset_target(un, pktp); 17090 17091 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17092 17093 /* 17094 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17095 * aborted because another target on this bus caused it. The target 17096 * that caused it should get CMD_TIMEOUT with pkt_statistics 17097 * of STAT_TIMEOUT/STAT_DEV_RESET. 17098 */ 17099 17100 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17101 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17102 } 17103 17104 17105 17106 /* 17107 * Function: sd_pkt_reason_cmd_timeout 17108 * 17109 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17110 * 17111 * Context: May be called from interrupt context 17112 */ 17113 17114 static void 17115 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17116 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17117 { 17118 ASSERT(un != NULL); 17119 ASSERT(mutex_owned(SD_MUTEX(un))); 17120 ASSERT(bp != NULL); 17121 ASSERT(xp != NULL); 17122 ASSERT(pktp != NULL); 17123 17124 17125 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17126 sd_reset_target(un, pktp); 17127 17128 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17129 17130 /* 17131 * A command timeout indicates that we could not establish 17132 * communication with the target, so set SD_RETRIES_FAILFAST 17133 * as further retries/commands are likely to take a long time. 17134 */ 17135 sd_retry_command(un, bp, 17136 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17137 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17138 } 17139 17140 17141 17142 /* 17143 * Function: sd_pkt_reason_cmd_unx_bus_free 17144 * 17145 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17146 * 17147 * Context: May be called from interrupt context 17148 */ 17149 17150 static void 17151 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17152 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17153 { 17154 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17155 17156 ASSERT(un != NULL); 17157 ASSERT(mutex_owned(SD_MUTEX(un))); 17158 ASSERT(bp != NULL); 17159 ASSERT(xp != NULL); 17160 ASSERT(pktp != NULL); 17161 17162 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17163 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17164 17165 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17166 sd_print_retry_msg : NULL; 17167 17168 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17169 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17170 } 17171 17172 17173 /* 17174 * Function: sd_pkt_reason_cmd_tag_reject 17175 * 17176 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17177 * 17178 * Context: May be called from interrupt context 17179 */ 17180 17181 static void 17182 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17183 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17184 { 17185 ASSERT(un != NULL); 17186 ASSERT(mutex_owned(SD_MUTEX(un))); 17187 ASSERT(bp != NULL); 17188 ASSERT(xp != NULL); 17189 ASSERT(pktp != NULL); 17190 17191 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17192 pktp->pkt_flags = 0; 17193 un->un_tagflags = 0; 17194 if (un->un_f_opt_queueing == TRUE) { 17195 un->un_throttle = min(un->un_throttle, 3); 17196 } else { 17197 un->un_throttle = 1; 17198 } 17199 mutex_exit(SD_MUTEX(un)); 17200 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17201 mutex_enter(SD_MUTEX(un)); 17202 17203 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17204 17205 /* Legacy behavior not to check retry counts here. */ 17206 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17207 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17208 } 17209 17210 17211 /* 17212 * Function: sd_pkt_reason_default 17213 * 17214 * Description: Default recovery actions for SCSA pkt_reason values that 17215 * do not have more explicit recovery actions. 17216 * 17217 * Context: May be called from interrupt context 17218 */ 17219 17220 static void 17221 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17222 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17223 { 17224 ASSERT(un != NULL); 17225 ASSERT(mutex_owned(SD_MUTEX(un))); 17226 ASSERT(bp != NULL); 17227 ASSERT(xp != NULL); 17228 ASSERT(pktp != NULL); 17229 17230 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17231 sd_reset_target(un, pktp); 17232 17233 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17234 17235 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17236 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17237 } 17238 17239 17240 17241 /* 17242 * Function: sd_pkt_status_check_condition 17243 * 17244 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17245 * 17246 * Context: May be called from interrupt context 17247 */ 17248 17249 static void 17250 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17251 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17252 { 17253 ASSERT(un != NULL); 17254 ASSERT(mutex_owned(SD_MUTEX(un))); 17255 ASSERT(bp != NULL); 17256 ASSERT(xp != NULL); 17257 ASSERT(pktp != NULL); 17258 17259 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17260 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17261 17262 /* 17263 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17264 * command will be retried after the request sense). Otherwise, retry 17265 * the command. Note: we are issuing the request sense even though the 17266 * retry limit may have been reached for the failed command. 17267 */ 17268 if (un->un_f_arq_enabled == FALSE) { 17269 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17270 "no ARQ, sending request sense command\n"); 17271 sd_send_request_sense_command(un, bp, pktp); 17272 } else { 17273 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17274 "ARQ,retrying request sense command\n"); 17275 #if defined(__i386) || defined(__amd64) 17276 /* 17277 * The SD_RETRY_DELAY value need to be adjusted here 17278 * when SD_RETRY_DELAY change in sddef.h 17279 */ 17280 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17281 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17282 NULL); 17283 #else 17284 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17285 EIO, SD_RETRY_DELAY, NULL); 17286 #endif 17287 } 17288 17289 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17290 } 17291 17292 17293 /* 17294 * Function: sd_pkt_status_busy 17295 * 17296 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17297 * 17298 * Context: May be called from interrupt context 17299 */ 17300 17301 static void 17302 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17303 struct scsi_pkt *pktp) 17304 { 17305 ASSERT(un != NULL); 17306 ASSERT(mutex_owned(SD_MUTEX(un))); 17307 ASSERT(bp != NULL); 17308 ASSERT(xp != NULL); 17309 ASSERT(pktp != NULL); 17310 17311 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17312 "sd_pkt_status_busy: entry\n"); 17313 17314 /* If retries are exhausted, just fail the command. */ 17315 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17316 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17317 "device busy too long\n"); 17318 sd_return_failed_command(un, bp, EIO); 17319 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17320 "sd_pkt_status_busy: exit\n"); 17321 return; 17322 } 17323 xp->xb_retry_count++; 17324 17325 /* 17326 * Try to reset the target. However, we do not want to perform 17327 * more than one reset if the device continues to fail. The reset 17328 * will be performed when the retry count reaches the reset 17329 * threshold. This threshold should be set such that at least 17330 * one retry is issued before the reset is performed. 17331 */ 17332 if (xp->xb_retry_count == 17333 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17334 int rval = 0; 17335 mutex_exit(SD_MUTEX(un)); 17336 if (un->un_f_allow_bus_device_reset == TRUE) { 17337 /* 17338 * First try to reset the LUN; if we cannot then 17339 * try to reset the target. 17340 */ 17341 if (un->un_f_lun_reset_enabled == TRUE) { 17342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17343 "sd_pkt_status_busy: RESET_LUN\n"); 17344 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17345 } 17346 if (rval == 0) { 17347 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17348 "sd_pkt_status_busy: RESET_TARGET\n"); 17349 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17350 } 17351 } 17352 if (rval == 0) { 17353 /* 17354 * If the RESET_LUN and/or RESET_TARGET failed, 17355 * try RESET_ALL 17356 */ 17357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17358 "sd_pkt_status_busy: RESET_ALL\n"); 17359 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17360 } 17361 mutex_enter(SD_MUTEX(un)); 17362 if (rval == 0) { 17363 /* 17364 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17365 * At this point we give up & fail the command. 17366 */ 17367 sd_return_failed_command(un, bp, EIO); 17368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17369 "sd_pkt_status_busy: exit (failed cmd)\n"); 17370 return; 17371 } 17372 } 17373 17374 /* 17375 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17376 * we have already checked the retry counts above. 17377 */ 17378 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17379 EIO, SD_BSY_TIMEOUT, NULL); 17380 17381 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17382 "sd_pkt_status_busy: exit\n"); 17383 } 17384 17385 17386 /* 17387 * Function: sd_pkt_status_reservation_conflict 17388 * 17389 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17390 * command status. 17391 * 17392 * Context: May be called from interrupt context 17393 */ 17394 17395 static void 17396 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17397 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17398 { 17399 ASSERT(un != NULL); 17400 ASSERT(mutex_owned(SD_MUTEX(un))); 17401 ASSERT(bp != NULL); 17402 ASSERT(xp != NULL); 17403 ASSERT(pktp != NULL); 17404 17405 /* 17406 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17407 * conflict could be due to various reasons like incorrect keys, not 17408 * registered or not reserved etc. So, we return EACCES to the caller. 17409 */ 17410 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17411 int cmd = SD_GET_PKT_OPCODE(pktp); 17412 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17413 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17414 sd_return_failed_command(un, bp, EACCES); 17415 return; 17416 } 17417 } 17418 17419 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17420 17421 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17422 if (sd_failfast_enable != 0) { 17423 /* By definition, we must panic here.... */ 17424 sd_panic_for_res_conflict(un); 17425 /*NOTREACHED*/ 17426 } 17427 SD_ERROR(SD_LOG_IO, un, 17428 "sd_handle_resv_conflict: Disk Reserved\n"); 17429 sd_return_failed_command(un, bp, EACCES); 17430 return; 17431 } 17432 17433 /* 17434 * 1147670: retry only if sd_retry_on_reservation_conflict 17435 * property is set (default is 1). Retries will not succeed 17436 * on a disk reserved by another initiator. HA systems 17437 * may reset this via sd.conf to avoid these retries. 17438 * 17439 * Note: The legacy return code for this failure is EIO, however EACCES 17440 * seems more appropriate for a reservation conflict. 17441 */ 17442 if (sd_retry_on_reservation_conflict == 0) { 17443 SD_ERROR(SD_LOG_IO, un, 17444 "sd_handle_resv_conflict: Device Reserved\n"); 17445 sd_return_failed_command(un, bp, EIO); 17446 return; 17447 } 17448 17449 /* 17450 * Retry the command if we can. 17451 * 17452 * Note: The legacy return code for this failure is EIO, however EACCES 17453 * seems more appropriate for a reservation conflict. 17454 */ 17455 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17456 (clock_t)2, NULL); 17457 } 17458 17459 17460 17461 /* 17462 * Function: sd_pkt_status_qfull 17463 * 17464 * Description: Handle a QUEUE FULL condition from the target. This can 17465 * occur if the HBA does not handle the queue full condition. 17466 * (Basically this means third-party HBAs as Sun HBAs will 17467 * handle the queue full condition.) Note that if there are 17468 * some commands already in the transport, then the queue full 17469 * has occurred because the queue for this nexus is actually 17470 * full. If there are no commands in the transport, then the 17471 * queue full is resulting from some other initiator or lun 17472 * consuming all the resources at the target. 17473 * 17474 * Context: May be called from interrupt context 17475 */ 17476 17477 static void 17478 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17479 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17480 { 17481 ASSERT(un != NULL); 17482 ASSERT(mutex_owned(SD_MUTEX(un))); 17483 ASSERT(bp != NULL); 17484 ASSERT(xp != NULL); 17485 ASSERT(pktp != NULL); 17486 17487 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17488 "sd_pkt_status_qfull: entry\n"); 17489 17490 /* 17491 * Just lower the QFULL throttle and retry the command. Note that 17492 * we do not limit the number of retries here. 17493 */ 17494 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17495 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17496 SD_RESTART_TIMEOUT, NULL); 17497 17498 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17499 "sd_pkt_status_qfull: exit\n"); 17500 } 17501 17502 17503 /* 17504 * Function: sd_reset_target 17505 * 17506 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17507 * RESET_TARGET, or RESET_ALL. 17508 * 17509 * Context: May be called under interrupt context. 17510 */ 17511 17512 static void 17513 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17514 { 17515 int rval = 0; 17516 17517 ASSERT(un != NULL); 17518 ASSERT(mutex_owned(SD_MUTEX(un))); 17519 ASSERT(pktp != NULL); 17520 17521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17522 17523 /* 17524 * No need to reset if the transport layer has already done so. 17525 */ 17526 if ((pktp->pkt_statistics & 17527 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17528 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17529 "sd_reset_target: no reset\n"); 17530 return; 17531 } 17532 17533 mutex_exit(SD_MUTEX(un)); 17534 17535 if (un->un_f_allow_bus_device_reset == TRUE) { 17536 if (un->un_f_lun_reset_enabled == TRUE) { 17537 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17538 "sd_reset_target: RESET_LUN\n"); 17539 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17540 } 17541 if (rval == 0) { 17542 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17543 "sd_reset_target: RESET_TARGET\n"); 17544 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17545 } 17546 } 17547 17548 if (rval == 0) { 17549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17550 "sd_reset_target: RESET_ALL\n"); 17551 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17552 } 17553 17554 mutex_enter(SD_MUTEX(un)); 17555 17556 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17557 } 17558 17559 /* 17560 * Function: sd_target_change_task 17561 * 17562 * Description: Handle dynamic target change 17563 * 17564 * Context: Executes in a taskq() thread context 17565 */ 17566 static void 17567 sd_target_change_task(void *arg) 17568 { 17569 struct sd_lun *un = arg; 17570 uint64_t capacity; 17571 diskaddr_t label_cap; 17572 uint_t lbasize; 17573 17574 ASSERT(un != NULL); 17575 ASSERT(!mutex_owned(SD_MUTEX(un))); 17576 17577 if ((un->un_f_blockcount_is_valid == FALSE) || 17578 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17579 return; 17580 } 17581 17582 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17583 &lbasize, SD_PATH_DIRECT) != 0) { 17584 SD_ERROR(SD_LOG_ERROR, un, 17585 "sd_target_change_task: fail to read capacity\n"); 17586 return; 17587 } 17588 17589 mutex_enter(SD_MUTEX(un)); 17590 if (capacity <= un->un_blockcount) { 17591 mutex_exit(SD_MUTEX(un)); 17592 return; 17593 } 17594 17595 sd_update_block_info(un, lbasize, capacity); 17596 mutex_exit(SD_MUTEX(un)); 17597 17598 /* 17599 * If lun is EFI labeled and lun capacity is greater than the 17600 * capacity contained in the label, log a sys event. 17601 */ 17602 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17603 (void*)SD_PATH_DIRECT) == 0) { 17604 mutex_enter(SD_MUTEX(un)); 17605 if (un->un_f_blockcount_is_valid && 17606 un->un_blockcount > label_cap) { 17607 mutex_exit(SD_MUTEX(un)); 17608 sd_log_lun_expansion_event(un, KM_SLEEP); 17609 } else { 17610 mutex_exit(SD_MUTEX(un)); 17611 } 17612 } 17613 } 17614 17615 /* 17616 * Function: sd_log_lun_expansion_event 17617 * 17618 * Description: Log lun expansion sys event 17619 * 17620 * Context: Never called from interrupt context 17621 */ 17622 static void 17623 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17624 { 17625 int err; 17626 char *path; 17627 nvlist_t *dle_attr_list; 17628 17629 /* Allocate and build sysevent attribute list */ 17630 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17631 if (err != 0) { 17632 SD_ERROR(SD_LOG_ERROR, un, 17633 "sd_log_lun_expansion_event: fail to allocate space\n"); 17634 return; 17635 } 17636 17637 path = kmem_alloc(MAXPATHLEN, km_flag); 17638 if (path == NULL) { 17639 nvlist_free(dle_attr_list); 17640 SD_ERROR(SD_LOG_ERROR, un, 17641 "sd_log_lun_expansion_event: fail to allocate space\n"); 17642 return; 17643 } 17644 /* 17645 * Add path attribute to identify the lun. 17646 * We are using minor node 'a' as the sysevent attribute. 17647 */ 17648 (void) snprintf(path, MAXPATHLEN, "/devices"); 17649 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17650 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17651 ":a"); 17652 17653 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17654 if (err != 0) { 17655 nvlist_free(dle_attr_list); 17656 kmem_free(path, MAXPATHLEN); 17657 SD_ERROR(SD_LOG_ERROR, un, 17658 "sd_log_lun_expansion_event: fail to add attribute\n"); 17659 return; 17660 } 17661 17662 /* Log dynamic lun expansion sysevent */ 17663 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17664 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17665 if (err != DDI_SUCCESS) { 17666 SD_ERROR(SD_LOG_ERROR, un, 17667 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17668 } 17669 17670 nvlist_free(dle_attr_list); 17671 kmem_free(path, MAXPATHLEN); 17672 } 17673 17674 /* 17675 * Function: sd_media_change_task 17676 * 17677 * Description: Recovery action for CDROM to become available. 17678 * 17679 * Context: Executes in a taskq() thread context 17680 */ 17681 17682 static void 17683 sd_media_change_task(void *arg) 17684 { 17685 struct scsi_pkt *pktp = arg; 17686 struct sd_lun *un; 17687 struct buf *bp; 17688 struct sd_xbuf *xp; 17689 int err = 0; 17690 int retry_count = 0; 17691 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17692 struct sd_sense_info si; 17693 17694 ASSERT(pktp != NULL); 17695 bp = (struct buf *)pktp->pkt_private; 17696 ASSERT(bp != NULL); 17697 xp = SD_GET_XBUF(bp); 17698 ASSERT(xp != NULL); 17699 un = SD_GET_UN(bp); 17700 ASSERT(un != NULL); 17701 ASSERT(!mutex_owned(SD_MUTEX(un))); 17702 ASSERT(un->un_f_monitor_media_state); 17703 17704 si.ssi_severity = SCSI_ERR_INFO; 17705 si.ssi_pfa_flag = FALSE; 17706 17707 /* 17708 * When a reset is issued on a CDROM, it takes a long time to 17709 * recover. First few attempts to read capacity and other things 17710 * related to handling unit attention fail (with a ASC 0x4 and 17711 * ASCQ 0x1). In that case we want to do enough retries and we want 17712 * to limit the retries in other cases of genuine failures like 17713 * no media in drive. 17714 */ 17715 while (retry_count++ < retry_limit) { 17716 if ((err = sd_handle_mchange(un)) == 0) { 17717 break; 17718 } 17719 if (err == EAGAIN) { 17720 retry_limit = SD_UNIT_ATTENTION_RETRY; 17721 } 17722 /* Sleep for 0.5 sec. & try again */ 17723 delay(drv_usectohz(500000)); 17724 } 17725 17726 /* 17727 * Dispatch (retry or fail) the original command here, 17728 * along with appropriate console messages.... 17729 * 17730 * Must grab the mutex before calling sd_retry_command, 17731 * sd_print_sense_msg and sd_return_failed_command. 17732 */ 17733 mutex_enter(SD_MUTEX(un)); 17734 if (err != SD_CMD_SUCCESS) { 17735 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17736 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17737 si.ssi_severity = SCSI_ERR_FATAL; 17738 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17739 sd_return_failed_command(un, bp, EIO); 17740 } else { 17741 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17742 &si, EIO, (clock_t)0, NULL); 17743 } 17744 mutex_exit(SD_MUTEX(un)); 17745 } 17746 17747 17748 17749 /* 17750 * Function: sd_handle_mchange 17751 * 17752 * Description: Perform geometry validation & other recovery when CDROM 17753 * has been removed from drive. 17754 * 17755 * Return Code: 0 for success 17756 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17757 * sd_send_scsi_READ_CAPACITY() 17758 * 17759 * Context: Executes in a taskq() thread context 17760 */ 17761 17762 static int 17763 sd_handle_mchange(struct sd_lun *un) 17764 { 17765 uint64_t capacity; 17766 uint32_t lbasize; 17767 int rval; 17768 17769 ASSERT(!mutex_owned(SD_MUTEX(un))); 17770 ASSERT(un->un_f_monitor_media_state); 17771 17772 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17773 SD_PATH_DIRECT_PRIORITY)) != 0) { 17774 return (rval); 17775 } 17776 17777 mutex_enter(SD_MUTEX(un)); 17778 sd_update_block_info(un, lbasize, capacity); 17779 17780 if (un->un_errstats != NULL) { 17781 struct sd_errstats *stp = 17782 (struct sd_errstats *)un->un_errstats->ks_data; 17783 stp->sd_capacity.value.ui64 = (uint64_t) 17784 ((uint64_t)un->un_blockcount * 17785 (uint64_t)un->un_tgt_blocksize); 17786 } 17787 17788 17789 /* 17790 * Check if the media in the device is writable or not 17791 */ 17792 if (ISCD(un)) 17793 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17794 17795 /* 17796 * Note: Maybe let the strategy/partitioning chain worry about getting 17797 * valid geometry. 17798 */ 17799 mutex_exit(SD_MUTEX(un)); 17800 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17801 17802 17803 if (cmlb_validate(un->un_cmlbhandle, 0, 17804 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17805 return (EIO); 17806 } else { 17807 if (un->un_f_pkstats_enabled) { 17808 sd_set_pstats(un); 17809 SD_TRACE(SD_LOG_IO_PARTITION, un, 17810 "sd_handle_mchange: un:0x%p pstats created and " 17811 "set\n", un); 17812 } 17813 } 17814 17815 17816 /* 17817 * Try to lock the door 17818 */ 17819 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17820 SD_PATH_DIRECT_PRIORITY)); 17821 } 17822 17823 17824 /* 17825 * Function: sd_send_scsi_DOORLOCK 17826 * 17827 * Description: Issue the scsi DOOR LOCK command 17828 * 17829 * Arguments: un - pointer to driver soft state (unit) structure for 17830 * this target. 17831 * flag - SD_REMOVAL_ALLOW 17832 * SD_REMOVAL_PREVENT 17833 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17834 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17835 * to use the USCSI "direct" chain and bypass the normal 17836 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17837 * command is issued as part of an error recovery action. 17838 * 17839 * Return Code: 0 - Success 17840 * errno return code from sd_send_scsi_cmd() 17841 * 17842 * Context: Can sleep. 17843 */ 17844 17845 static int 17846 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17847 { 17848 union scsi_cdb cdb; 17849 struct uscsi_cmd ucmd_buf; 17850 struct scsi_extended_sense sense_buf; 17851 int status; 17852 17853 ASSERT(un != NULL); 17854 ASSERT(!mutex_owned(SD_MUTEX(un))); 17855 17856 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17857 17858 /* already determined doorlock is not supported, fake success */ 17859 if (un->un_f_doorlock_supported == FALSE) { 17860 return (0); 17861 } 17862 17863 /* 17864 * If we are ejecting and see an SD_REMOVAL_PREVENT 17865 * ignore the command so we can complete the eject 17866 * operation. 17867 */ 17868 if (flag == SD_REMOVAL_PREVENT) { 17869 mutex_enter(SD_MUTEX(un)); 17870 if (un->un_f_ejecting == TRUE) { 17871 mutex_exit(SD_MUTEX(un)); 17872 return (EAGAIN); 17873 } 17874 mutex_exit(SD_MUTEX(un)); 17875 } 17876 17877 bzero(&cdb, sizeof (cdb)); 17878 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17879 17880 cdb.scc_cmd = SCMD_DOORLOCK; 17881 cdb.cdb_opaque[4] = (uchar_t)flag; 17882 17883 ucmd_buf.uscsi_cdb = (char *)&cdb; 17884 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17885 ucmd_buf.uscsi_bufaddr = NULL; 17886 ucmd_buf.uscsi_buflen = 0; 17887 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17888 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17889 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17890 ucmd_buf.uscsi_timeout = 15; 17891 17892 SD_TRACE(SD_LOG_IO, un, 17893 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17894 17895 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17896 UIO_SYSSPACE, path_flag); 17897 17898 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17899 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17900 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17901 /* fake success and skip subsequent doorlock commands */ 17902 un->un_f_doorlock_supported = FALSE; 17903 return (0); 17904 } 17905 17906 return (status); 17907 } 17908 17909 /* 17910 * Function: sd_send_scsi_READ_CAPACITY 17911 * 17912 * Description: This routine uses the scsi READ CAPACITY command to determine 17913 * the device capacity in number of blocks and the device native 17914 * block size. If this function returns a failure, then the 17915 * values in *capp and *lbap are undefined. If the capacity 17916 * returned is 0xffffffff then the lun is too large for a 17917 * normal READ CAPACITY command and the results of a 17918 * READ CAPACITY 16 will be used instead. 17919 * 17920 * Arguments: un - ptr to soft state struct for the target 17921 * capp - ptr to unsigned 64-bit variable to receive the 17922 * capacity value from the command. 17923 * lbap - ptr to unsigned 32-bit varaible to receive the 17924 * block size value from the command 17925 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17926 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17927 * to use the USCSI "direct" chain and bypass the normal 17928 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17929 * command is issued as part of an error recovery action. 17930 * 17931 * Return Code: 0 - Success 17932 * EIO - IO error 17933 * EACCES - Reservation conflict detected 17934 * EAGAIN - Device is becoming ready 17935 * errno return code from sd_send_scsi_cmd() 17936 * 17937 * Context: Can sleep. Blocks until command completes. 17938 */ 17939 17940 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17941 17942 static int 17943 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17944 int path_flag) 17945 { 17946 struct scsi_extended_sense sense_buf; 17947 struct uscsi_cmd ucmd_buf; 17948 union scsi_cdb cdb; 17949 uint32_t *capacity_buf; 17950 uint64_t capacity; 17951 uint32_t lbasize; 17952 int status; 17953 17954 ASSERT(un != NULL); 17955 ASSERT(!mutex_owned(SD_MUTEX(un))); 17956 ASSERT(capp != NULL); 17957 ASSERT(lbap != NULL); 17958 17959 SD_TRACE(SD_LOG_IO, un, 17960 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17961 17962 /* 17963 * First send a READ_CAPACITY command to the target. 17964 * (This command is mandatory under SCSI-2.) 17965 * 17966 * Set up the CDB for the READ_CAPACITY command. The Partial 17967 * Medium Indicator bit is cleared. The address field must be 17968 * zero if the PMI bit is zero. 17969 */ 17970 bzero(&cdb, sizeof (cdb)); 17971 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17972 17973 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17974 17975 cdb.scc_cmd = SCMD_READ_CAPACITY; 17976 17977 ucmd_buf.uscsi_cdb = (char *)&cdb; 17978 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17979 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17980 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17981 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17982 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17983 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17984 ucmd_buf.uscsi_timeout = 60; 17985 17986 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17987 UIO_SYSSPACE, path_flag); 17988 17989 switch (status) { 17990 case 0: 17991 /* Return failure if we did not get valid capacity data. */ 17992 if (ucmd_buf.uscsi_resid != 0) { 17993 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17994 return (EIO); 17995 } 17996 17997 /* 17998 * Read capacity and block size from the READ CAPACITY 10 data. 17999 * This data may be adjusted later due to device specific 18000 * issues. 18001 * 18002 * According to the SCSI spec, the READ CAPACITY 10 18003 * command returns the following: 18004 * 18005 * bytes 0-3: Maximum logical block address available. 18006 * (MSB in byte:0 & LSB in byte:3) 18007 * 18008 * bytes 4-7: Block length in bytes 18009 * (MSB in byte:4 & LSB in byte:7) 18010 * 18011 */ 18012 capacity = BE_32(capacity_buf[0]); 18013 lbasize = BE_32(capacity_buf[1]); 18014 18015 /* 18016 * Done with capacity_buf 18017 */ 18018 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18019 18020 /* 18021 * if the reported capacity is set to all 0xf's, then 18022 * this disk is too large and requires SBC-2 commands. 18023 * Reissue the request using READ CAPACITY 16. 18024 */ 18025 if (capacity == 0xffffffff) { 18026 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18027 &lbasize, path_flag); 18028 if (status != 0) { 18029 return (status); 18030 } 18031 } 18032 break; /* Success! */ 18033 case EIO: 18034 switch (ucmd_buf.uscsi_status) { 18035 case STATUS_RESERVATION_CONFLICT: 18036 status = EACCES; 18037 break; 18038 case STATUS_CHECK: 18039 /* 18040 * Check condition; look for ASC/ASCQ of 0x04/0x01 18041 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18042 */ 18043 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18044 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18045 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18046 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18047 return (EAGAIN); 18048 } 18049 break; 18050 default: 18051 break; 18052 } 18053 /* FALLTHRU */ 18054 default: 18055 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18056 return (status); 18057 } 18058 18059 /* 18060 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18061 * (2352 and 0 are common) so for these devices always force the value 18062 * to 2048 as required by the ATAPI specs. 18063 */ 18064 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18065 lbasize = 2048; 18066 } 18067 18068 /* 18069 * Get the maximum LBA value from the READ CAPACITY data. 18070 * Here we assume that the Partial Medium Indicator (PMI) bit 18071 * was cleared when issuing the command. This means that the LBA 18072 * returned from the device is the LBA of the last logical block 18073 * on the logical unit. The actual logical block count will be 18074 * this value plus one. 18075 * 18076 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18077 * so scale the capacity value to reflect this. 18078 */ 18079 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18080 18081 /* 18082 * Copy the values from the READ CAPACITY command into the space 18083 * provided by the caller. 18084 */ 18085 *capp = capacity; 18086 *lbap = lbasize; 18087 18088 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18089 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18090 18091 /* 18092 * Both the lbasize and capacity from the device must be nonzero, 18093 * otherwise we assume that the values are not valid and return 18094 * failure to the caller. (4203735) 18095 */ 18096 if ((capacity == 0) || (lbasize == 0)) { 18097 return (EIO); 18098 } 18099 18100 return (0); 18101 } 18102 18103 /* 18104 * Function: sd_send_scsi_READ_CAPACITY_16 18105 * 18106 * Description: This routine uses the scsi READ CAPACITY 16 command to 18107 * determine the device capacity in number of blocks and the 18108 * device native block size. If this function returns a failure, 18109 * then the values in *capp and *lbap are undefined. 18110 * This routine should always be called by 18111 * sd_send_scsi_READ_CAPACITY which will appy any device 18112 * specific adjustments to capacity and lbasize. 18113 * 18114 * Arguments: un - ptr to soft state struct for the target 18115 * capp - ptr to unsigned 64-bit variable to receive the 18116 * capacity value from the command. 18117 * lbap - ptr to unsigned 32-bit varaible to receive the 18118 * block size value from the command 18119 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18120 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18121 * to use the USCSI "direct" chain and bypass the normal 18122 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18123 * this command is issued as part of an error recovery 18124 * action. 18125 * 18126 * Return Code: 0 - Success 18127 * EIO - IO error 18128 * EACCES - Reservation conflict detected 18129 * EAGAIN - Device is becoming ready 18130 * errno return code from sd_send_scsi_cmd() 18131 * 18132 * Context: Can sleep. Blocks until command completes. 18133 */ 18134 18135 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18136 18137 static int 18138 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18139 uint32_t *lbap, int path_flag) 18140 { 18141 struct scsi_extended_sense sense_buf; 18142 struct uscsi_cmd ucmd_buf; 18143 union scsi_cdb cdb; 18144 uint64_t *capacity16_buf; 18145 uint64_t capacity; 18146 uint32_t lbasize; 18147 int status; 18148 18149 ASSERT(un != NULL); 18150 ASSERT(!mutex_owned(SD_MUTEX(un))); 18151 ASSERT(capp != NULL); 18152 ASSERT(lbap != NULL); 18153 18154 SD_TRACE(SD_LOG_IO, un, 18155 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18156 18157 /* 18158 * First send a READ_CAPACITY_16 command to the target. 18159 * 18160 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18161 * Medium Indicator bit is cleared. The address field must be 18162 * zero if the PMI bit is zero. 18163 */ 18164 bzero(&cdb, sizeof (cdb)); 18165 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18166 18167 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18168 18169 ucmd_buf.uscsi_cdb = (char *)&cdb; 18170 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18171 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18172 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18173 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18174 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18175 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18176 ucmd_buf.uscsi_timeout = 60; 18177 18178 /* 18179 * Read Capacity (16) is a Service Action In command. One 18180 * command byte (0x9E) is overloaded for multiple operations, 18181 * with the second CDB byte specifying the desired operation 18182 */ 18183 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18184 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18185 18186 /* 18187 * Fill in allocation length field 18188 */ 18189 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18190 18191 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18192 UIO_SYSSPACE, path_flag); 18193 18194 switch (status) { 18195 case 0: 18196 /* Return failure if we did not get valid capacity data. */ 18197 if (ucmd_buf.uscsi_resid > 20) { 18198 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18199 return (EIO); 18200 } 18201 18202 /* 18203 * Read capacity and block size from the READ CAPACITY 10 data. 18204 * This data may be adjusted later due to device specific 18205 * issues. 18206 * 18207 * According to the SCSI spec, the READ CAPACITY 10 18208 * command returns the following: 18209 * 18210 * bytes 0-7: Maximum logical block address available. 18211 * (MSB in byte:0 & LSB in byte:7) 18212 * 18213 * bytes 8-11: Block length in bytes 18214 * (MSB in byte:8 & LSB in byte:11) 18215 * 18216 */ 18217 capacity = BE_64(capacity16_buf[0]); 18218 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18219 18220 /* 18221 * Done with capacity16_buf 18222 */ 18223 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18224 18225 /* 18226 * if the reported capacity is set to all 0xf's, then 18227 * this disk is too large. This could only happen with 18228 * a device that supports LBAs larger than 64 bits which 18229 * are not defined by any current T10 standards. 18230 */ 18231 if (capacity == 0xffffffffffffffff) { 18232 return (EIO); 18233 } 18234 break; /* Success! */ 18235 case EIO: 18236 switch (ucmd_buf.uscsi_status) { 18237 case STATUS_RESERVATION_CONFLICT: 18238 status = EACCES; 18239 break; 18240 case STATUS_CHECK: 18241 /* 18242 * Check condition; look for ASC/ASCQ of 0x04/0x01 18243 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18244 */ 18245 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18246 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18247 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18248 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18249 return (EAGAIN); 18250 } 18251 break; 18252 default: 18253 break; 18254 } 18255 /* FALLTHRU */ 18256 default: 18257 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18258 return (status); 18259 } 18260 18261 *capp = capacity; 18262 *lbap = lbasize; 18263 18264 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18265 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18266 18267 return (0); 18268 } 18269 18270 18271 /* 18272 * Function: sd_send_scsi_START_STOP_UNIT 18273 * 18274 * Description: Issue a scsi START STOP UNIT command to the target. 18275 * 18276 * Arguments: un - pointer to driver soft state (unit) structure for 18277 * this target. 18278 * flag - SD_TARGET_START 18279 * SD_TARGET_STOP 18280 * SD_TARGET_EJECT 18281 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18282 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18283 * to use the USCSI "direct" chain and bypass the normal 18284 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18285 * command is issued as part of an error recovery action. 18286 * 18287 * Return Code: 0 - Success 18288 * EIO - IO error 18289 * EACCES - Reservation conflict detected 18290 * ENXIO - Not Ready, medium not present 18291 * errno return code from sd_send_scsi_cmd() 18292 * 18293 * Context: Can sleep. 18294 */ 18295 18296 static int 18297 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18298 { 18299 struct scsi_extended_sense sense_buf; 18300 union scsi_cdb cdb; 18301 struct uscsi_cmd ucmd_buf; 18302 int status; 18303 18304 ASSERT(un != NULL); 18305 ASSERT(!mutex_owned(SD_MUTEX(un))); 18306 18307 SD_TRACE(SD_LOG_IO, un, 18308 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18309 18310 if (un->un_f_check_start_stop && 18311 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18312 (un->un_f_start_stop_supported != TRUE)) { 18313 return (0); 18314 } 18315 18316 /* 18317 * If we are performing an eject operation and 18318 * we receive any command other than SD_TARGET_EJECT 18319 * we should immediately return. 18320 */ 18321 if (flag != SD_TARGET_EJECT) { 18322 mutex_enter(SD_MUTEX(un)); 18323 if (un->un_f_ejecting == TRUE) { 18324 mutex_exit(SD_MUTEX(un)); 18325 return (EAGAIN); 18326 } 18327 mutex_exit(SD_MUTEX(un)); 18328 } 18329 18330 bzero(&cdb, sizeof (cdb)); 18331 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18332 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18333 18334 cdb.scc_cmd = SCMD_START_STOP; 18335 cdb.cdb_opaque[4] = (uchar_t)flag; 18336 18337 ucmd_buf.uscsi_cdb = (char *)&cdb; 18338 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18339 ucmd_buf.uscsi_bufaddr = NULL; 18340 ucmd_buf.uscsi_buflen = 0; 18341 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18342 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18343 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18344 ucmd_buf.uscsi_timeout = 200; 18345 18346 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18347 UIO_SYSSPACE, path_flag); 18348 18349 switch (status) { 18350 case 0: 18351 break; /* Success! */ 18352 case EIO: 18353 switch (ucmd_buf.uscsi_status) { 18354 case STATUS_RESERVATION_CONFLICT: 18355 status = EACCES; 18356 break; 18357 case STATUS_CHECK: 18358 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18359 switch (scsi_sense_key( 18360 (uint8_t *)&sense_buf)) { 18361 case KEY_ILLEGAL_REQUEST: 18362 status = ENOTSUP; 18363 break; 18364 case KEY_NOT_READY: 18365 if (scsi_sense_asc( 18366 (uint8_t *)&sense_buf) 18367 == 0x3A) { 18368 status = ENXIO; 18369 } 18370 break; 18371 default: 18372 break; 18373 } 18374 } 18375 break; 18376 default: 18377 break; 18378 } 18379 break; 18380 default: 18381 break; 18382 } 18383 18384 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18385 18386 return (status); 18387 } 18388 18389 18390 /* 18391 * Function: sd_start_stop_unit_callback 18392 * 18393 * Description: timeout(9F) callback to begin recovery process for a 18394 * device that has spun down. 18395 * 18396 * Arguments: arg - pointer to associated softstate struct. 18397 * 18398 * Context: Executes in a timeout(9F) thread context 18399 */ 18400 18401 static void 18402 sd_start_stop_unit_callback(void *arg) 18403 { 18404 struct sd_lun *un = arg; 18405 ASSERT(un != NULL); 18406 ASSERT(!mutex_owned(SD_MUTEX(un))); 18407 18408 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18409 18410 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18411 } 18412 18413 18414 /* 18415 * Function: sd_start_stop_unit_task 18416 * 18417 * Description: Recovery procedure when a drive is spun down. 18418 * 18419 * Arguments: arg - pointer to associated softstate struct. 18420 * 18421 * Context: Executes in a taskq() thread context 18422 */ 18423 18424 static void 18425 sd_start_stop_unit_task(void *arg) 18426 { 18427 struct sd_lun *un = arg; 18428 18429 ASSERT(un != NULL); 18430 ASSERT(!mutex_owned(SD_MUTEX(un))); 18431 18432 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18433 18434 /* 18435 * Some unformatted drives report not ready error, no need to 18436 * restart if format has been initiated. 18437 */ 18438 mutex_enter(SD_MUTEX(un)); 18439 if (un->un_f_format_in_progress == TRUE) { 18440 mutex_exit(SD_MUTEX(un)); 18441 return; 18442 } 18443 mutex_exit(SD_MUTEX(un)); 18444 18445 /* 18446 * When a START STOP command is issued from here, it is part of a 18447 * failure recovery operation and must be issued before any other 18448 * commands, including any pending retries. Thus it must be sent 18449 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18450 * succeeds or not, we will start I/O after the attempt. 18451 */ 18452 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18453 SD_PATH_DIRECT_PRIORITY); 18454 18455 /* 18456 * The above call blocks until the START_STOP_UNIT command completes. 18457 * Now that it has completed, we must re-try the original IO that 18458 * received the NOT READY condition in the first place. There are 18459 * three possible conditions here: 18460 * 18461 * (1) The original IO is on un_retry_bp. 18462 * (2) The original IO is on the regular wait queue, and un_retry_bp 18463 * is NULL. 18464 * (3) The original IO is on the regular wait queue, and un_retry_bp 18465 * points to some other, unrelated bp. 18466 * 18467 * For each case, we must call sd_start_cmds() with un_retry_bp 18468 * as the argument. If un_retry_bp is NULL, this will initiate 18469 * processing of the regular wait queue. If un_retry_bp is not NULL, 18470 * then this will process the bp on un_retry_bp. That may or may not 18471 * be the original IO, but that does not matter: the important thing 18472 * is to keep the IO processing going at this point. 18473 * 18474 * Note: This is a very specific error recovery sequence associated 18475 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18476 * serialize the I/O with completion of the spin-up. 18477 */ 18478 mutex_enter(SD_MUTEX(un)); 18479 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18480 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18481 un, un->un_retry_bp); 18482 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18483 sd_start_cmds(un, un->un_retry_bp); 18484 mutex_exit(SD_MUTEX(un)); 18485 18486 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18487 } 18488 18489 18490 /* 18491 * Function: sd_send_scsi_INQUIRY 18492 * 18493 * Description: Issue the scsi INQUIRY command. 18494 * 18495 * Arguments: un 18496 * bufaddr 18497 * buflen 18498 * evpd 18499 * page_code 18500 * page_length 18501 * 18502 * Return Code: 0 - Success 18503 * errno return code from sd_send_scsi_cmd() 18504 * 18505 * Context: Can sleep. Does not return until command is completed. 18506 */ 18507 18508 static int 18509 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18510 uchar_t evpd, uchar_t page_code, size_t *residp) 18511 { 18512 union scsi_cdb cdb; 18513 struct uscsi_cmd ucmd_buf; 18514 int status; 18515 18516 ASSERT(un != NULL); 18517 ASSERT(!mutex_owned(SD_MUTEX(un))); 18518 ASSERT(bufaddr != NULL); 18519 18520 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18521 18522 bzero(&cdb, sizeof (cdb)); 18523 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18524 bzero(bufaddr, buflen); 18525 18526 cdb.scc_cmd = SCMD_INQUIRY; 18527 cdb.cdb_opaque[1] = evpd; 18528 cdb.cdb_opaque[2] = page_code; 18529 FORMG0COUNT(&cdb, buflen); 18530 18531 ucmd_buf.uscsi_cdb = (char *)&cdb; 18532 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18533 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18534 ucmd_buf.uscsi_buflen = buflen; 18535 ucmd_buf.uscsi_rqbuf = NULL; 18536 ucmd_buf.uscsi_rqlen = 0; 18537 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18538 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18539 18540 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18541 UIO_SYSSPACE, SD_PATH_DIRECT); 18542 18543 if ((status == 0) && (residp != NULL)) { 18544 *residp = ucmd_buf.uscsi_resid; 18545 } 18546 18547 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18548 18549 return (status); 18550 } 18551 18552 18553 /* 18554 * Function: sd_send_scsi_TEST_UNIT_READY 18555 * 18556 * Description: Issue the scsi TEST UNIT READY command. 18557 * This routine can be told to set the flag USCSI_DIAGNOSE to 18558 * prevent retrying failed commands. Use this when the intent 18559 * is either to check for device readiness, to clear a Unit 18560 * Attention, or to clear any outstanding sense data. 18561 * However under specific conditions the expected behavior 18562 * is for retries to bring a device ready, so use the flag 18563 * with caution. 18564 * 18565 * Arguments: un 18566 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18567 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18568 * 0: dont check for media present, do retries on cmd. 18569 * 18570 * Return Code: 0 - Success 18571 * EIO - IO error 18572 * EACCES - Reservation conflict detected 18573 * ENXIO - Not Ready, medium not present 18574 * errno return code from sd_send_scsi_cmd() 18575 * 18576 * Context: Can sleep. Does not return until command is completed. 18577 */ 18578 18579 static int 18580 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18581 { 18582 struct scsi_extended_sense sense_buf; 18583 union scsi_cdb cdb; 18584 struct uscsi_cmd ucmd_buf; 18585 int status; 18586 18587 ASSERT(un != NULL); 18588 ASSERT(!mutex_owned(SD_MUTEX(un))); 18589 18590 SD_TRACE(SD_LOG_IO, un, 18591 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18592 18593 /* 18594 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18595 * timeouts when they receive a TUR and the queue is not empty. Check 18596 * the configuration flag set during attach (indicating the drive has 18597 * this firmware bug) and un_ncmds_in_transport before issuing the 18598 * TUR. If there are 18599 * pending commands return success, this is a bit arbitrary but is ok 18600 * for non-removables (i.e. the eliteI disks) and non-clustering 18601 * configurations. 18602 */ 18603 if (un->un_f_cfg_tur_check == TRUE) { 18604 mutex_enter(SD_MUTEX(un)); 18605 if (un->un_ncmds_in_transport != 0) { 18606 mutex_exit(SD_MUTEX(un)); 18607 return (0); 18608 } 18609 mutex_exit(SD_MUTEX(un)); 18610 } 18611 18612 bzero(&cdb, sizeof (cdb)); 18613 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18614 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18615 18616 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18617 18618 ucmd_buf.uscsi_cdb = (char *)&cdb; 18619 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18620 ucmd_buf.uscsi_bufaddr = NULL; 18621 ucmd_buf.uscsi_buflen = 0; 18622 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18623 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18624 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18625 18626 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18627 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18628 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18629 } 18630 ucmd_buf.uscsi_timeout = 60; 18631 18632 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18633 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18634 SD_PATH_STANDARD)); 18635 18636 switch (status) { 18637 case 0: 18638 break; /* Success! */ 18639 case EIO: 18640 switch (ucmd_buf.uscsi_status) { 18641 case STATUS_RESERVATION_CONFLICT: 18642 status = EACCES; 18643 break; 18644 case STATUS_CHECK: 18645 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18646 break; 18647 } 18648 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18649 (scsi_sense_key((uint8_t *)&sense_buf) == 18650 KEY_NOT_READY) && 18651 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18652 status = ENXIO; 18653 } 18654 break; 18655 default: 18656 break; 18657 } 18658 break; 18659 default: 18660 break; 18661 } 18662 18663 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18664 18665 return (status); 18666 } 18667 18668 18669 /* 18670 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18671 * 18672 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18673 * 18674 * Arguments: un 18675 * 18676 * Return Code: 0 - Success 18677 * EACCES 18678 * ENOTSUP 18679 * errno return code from sd_send_scsi_cmd() 18680 * 18681 * Context: Can sleep. Does not return until command is completed. 18682 */ 18683 18684 static int 18685 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18686 uint16_t data_len, uchar_t *data_bufp) 18687 { 18688 struct scsi_extended_sense sense_buf; 18689 union scsi_cdb cdb; 18690 struct uscsi_cmd ucmd_buf; 18691 int status; 18692 int no_caller_buf = FALSE; 18693 18694 ASSERT(un != NULL); 18695 ASSERT(!mutex_owned(SD_MUTEX(un))); 18696 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18697 18698 SD_TRACE(SD_LOG_IO, un, 18699 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18700 18701 bzero(&cdb, sizeof (cdb)); 18702 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18703 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18704 if (data_bufp == NULL) { 18705 /* Allocate a default buf if the caller did not give one */ 18706 ASSERT(data_len == 0); 18707 data_len = MHIOC_RESV_KEY_SIZE; 18708 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18709 no_caller_buf = TRUE; 18710 } 18711 18712 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18713 cdb.cdb_opaque[1] = usr_cmd; 18714 FORMG1COUNT(&cdb, data_len); 18715 18716 ucmd_buf.uscsi_cdb = (char *)&cdb; 18717 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18718 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18719 ucmd_buf.uscsi_buflen = data_len; 18720 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18721 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18722 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18723 ucmd_buf.uscsi_timeout = 60; 18724 18725 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18726 UIO_SYSSPACE, SD_PATH_STANDARD); 18727 18728 switch (status) { 18729 case 0: 18730 break; /* Success! */ 18731 case EIO: 18732 switch (ucmd_buf.uscsi_status) { 18733 case STATUS_RESERVATION_CONFLICT: 18734 status = EACCES; 18735 break; 18736 case STATUS_CHECK: 18737 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18738 (scsi_sense_key((uint8_t *)&sense_buf) == 18739 KEY_ILLEGAL_REQUEST)) { 18740 status = ENOTSUP; 18741 } 18742 break; 18743 default: 18744 break; 18745 } 18746 break; 18747 default: 18748 break; 18749 } 18750 18751 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18752 18753 if (no_caller_buf == TRUE) { 18754 kmem_free(data_bufp, data_len); 18755 } 18756 18757 return (status); 18758 } 18759 18760 18761 /* 18762 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18763 * 18764 * Description: This routine is the driver entry point for handling CD-ROM 18765 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18766 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18767 * device. 18768 * 18769 * Arguments: un - Pointer to soft state struct for the target. 18770 * usr_cmd SCSI-3 reservation facility command (one of 18771 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18772 * SD_SCSI3_PREEMPTANDABORT) 18773 * usr_bufp - user provided pointer register, reserve descriptor or 18774 * preempt and abort structure (mhioc_register_t, 18775 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18776 * 18777 * Return Code: 0 - Success 18778 * EACCES 18779 * ENOTSUP 18780 * errno return code from sd_send_scsi_cmd() 18781 * 18782 * Context: Can sleep. Does not return until command is completed. 18783 */ 18784 18785 static int 18786 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18787 uchar_t *usr_bufp) 18788 { 18789 struct scsi_extended_sense sense_buf; 18790 union scsi_cdb cdb; 18791 struct uscsi_cmd ucmd_buf; 18792 int status; 18793 uchar_t data_len = sizeof (sd_prout_t); 18794 sd_prout_t *prp; 18795 18796 ASSERT(un != NULL); 18797 ASSERT(!mutex_owned(SD_MUTEX(un))); 18798 ASSERT(data_len == 24); /* required by scsi spec */ 18799 18800 SD_TRACE(SD_LOG_IO, un, 18801 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18802 18803 if (usr_bufp == NULL) { 18804 return (EINVAL); 18805 } 18806 18807 bzero(&cdb, sizeof (cdb)); 18808 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18809 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18810 prp = kmem_zalloc(data_len, KM_SLEEP); 18811 18812 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18813 cdb.cdb_opaque[1] = usr_cmd; 18814 FORMG1COUNT(&cdb, data_len); 18815 18816 ucmd_buf.uscsi_cdb = (char *)&cdb; 18817 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18818 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18819 ucmd_buf.uscsi_buflen = data_len; 18820 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18821 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18822 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18823 ucmd_buf.uscsi_timeout = 60; 18824 18825 switch (usr_cmd) { 18826 case SD_SCSI3_REGISTER: { 18827 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18828 18829 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18830 bcopy(ptr->newkey.key, prp->service_key, 18831 MHIOC_RESV_KEY_SIZE); 18832 prp->aptpl = ptr->aptpl; 18833 break; 18834 } 18835 case SD_SCSI3_RESERVE: 18836 case SD_SCSI3_RELEASE: { 18837 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18838 18839 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18840 prp->scope_address = BE_32(ptr->scope_specific_addr); 18841 cdb.cdb_opaque[2] = ptr->type; 18842 break; 18843 } 18844 case SD_SCSI3_PREEMPTANDABORT: { 18845 mhioc_preemptandabort_t *ptr = 18846 (mhioc_preemptandabort_t *)usr_bufp; 18847 18848 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18849 bcopy(ptr->victim_key.key, prp->service_key, 18850 MHIOC_RESV_KEY_SIZE); 18851 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18852 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18853 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18854 break; 18855 } 18856 case SD_SCSI3_REGISTERANDIGNOREKEY: 18857 { 18858 mhioc_registerandignorekey_t *ptr; 18859 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18860 bcopy(ptr->newkey.key, 18861 prp->service_key, MHIOC_RESV_KEY_SIZE); 18862 prp->aptpl = ptr->aptpl; 18863 break; 18864 } 18865 default: 18866 ASSERT(FALSE); 18867 break; 18868 } 18869 18870 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18871 UIO_SYSSPACE, SD_PATH_STANDARD); 18872 18873 switch (status) { 18874 case 0: 18875 break; /* Success! */ 18876 case EIO: 18877 switch (ucmd_buf.uscsi_status) { 18878 case STATUS_RESERVATION_CONFLICT: 18879 status = EACCES; 18880 break; 18881 case STATUS_CHECK: 18882 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18883 (scsi_sense_key((uint8_t *)&sense_buf) == 18884 KEY_ILLEGAL_REQUEST)) { 18885 status = ENOTSUP; 18886 } 18887 break; 18888 default: 18889 break; 18890 } 18891 break; 18892 default: 18893 break; 18894 } 18895 18896 kmem_free(prp, data_len); 18897 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18898 return (status); 18899 } 18900 18901 18902 /* 18903 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18904 * 18905 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18906 * 18907 * Arguments: un - pointer to the target's soft state struct 18908 * dkc - pointer to the callback structure 18909 * 18910 * Return Code: 0 - success 18911 * errno-type error code 18912 * 18913 * Context: kernel thread context only. 18914 * 18915 * _______________________________________________________________ 18916 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18917 * |FLUSH_VOLATILE| | operation | 18918 * |______________|______________|_________________________________| 18919 * | 0 | NULL | Synchronous flush on both | 18920 * | | | volatile and non-volatile cache | 18921 * |______________|______________|_________________________________| 18922 * | 1 | NULL | Synchronous flush on volatile | 18923 * | | | cache; disk drivers may suppress| 18924 * | | | flush if disk table indicates | 18925 * | | | non-volatile cache | 18926 * |______________|______________|_________________________________| 18927 * | 0 | !NULL | Asynchronous flush on both | 18928 * | | | volatile and non-volatile cache;| 18929 * |______________|______________|_________________________________| 18930 * | 1 | !NULL | Asynchronous flush on volatile | 18931 * | | | cache; disk drivers may suppress| 18932 * | | | flush if disk table indicates | 18933 * | | | non-volatile cache | 18934 * |______________|______________|_________________________________| 18935 * 18936 */ 18937 18938 static int 18939 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18940 { 18941 struct sd_uscsi_info *uip; 18942 struct uscsi_cmd *uscmd; 18943 union scsi_cdb *cdb; 18944 struct buf *bp; 18945 int rval = 0; 18946 int is_async; 18947 18948 SD_TRACE(SD_LOG_IO, un, 18949 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18950 18951 ASSERT(un != NULL); 18952 ASSERT(!mutex_owned(SD_MUTEX(un))); 18953 18954 if (dkc == NULL || dkc->dkc_callback == NULL) { 18955 is_async = FALSE; 18956 } else { 18957 is_async = TRUE; 18958 } 18959 18960 mutex_enter(SD_MUTEX(un)); 18961 /* check whether cache flush should be suppressed */ 18962 if (un->un_f_suppress_cache_flush == TRUE) { 18963 mutex_exit(SD_MUTEX(un)); 18964 /* 18965 * suppress the cache flush if the device is told to do 18966 * so by sd.conf or disk table 18967 */ 18968 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18969 skip the cache flush since suppress_cache_flush is %d!\n", 18970 un->un_f_suppress_cache_flush); 18971 18972 if (is_async == TRUE) { 18973 /* invoke callback for asynchronous flush */ 18974 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18975 } 18976 return (rval); 18977 } 18978 mutex_exit(SD_MUTEX(un)); 18979 18980 /* 18981 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18982 * set properly 18983 */ 18984 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18985 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18986 18987 mutex_enter(SD_MUTEX(un)); 18988 if (dkc != NULL && un->un_f_sync_nv_supported && 18989 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18990 /* 18991 * if the device supports SYNC_NV bit, turn on 18992 * the SYNC_NV bit to only flush volatile cache 18993 */ 18994 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18995 } 18996 mutex_exit(SD_MUTEX(un)); 18997 18998 /* 18999 * First get some memory for the uscsi_cmd struct and cdb 19000 * and initialize for SYNCHRONIZE_CACHE cmd. 19001 */ 19002 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19003 uscmd->uscsi_cdblen = CDB_GROUP1; 19004 uscmd->uscsi_cdb = (caddr_t)cdb; 19005 uscmd->uscsi_bufaddr = NULL; 19006 uscmd->uscsi_buflen = 0; 19007 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19008 uscmd->uscsi_rqlen = SENSE_LENGTH; 19009 uscmd->uscsi_rqresid = SENSE_LENGTH; 19010 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19011 uscmd->uscsi_timeout = sd_io_time; 19012 19013 /* 19014 * Allocate an sd_uscsi_info struct and fill it with the info 19015 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19016 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19017 * since we allocate the buf here in this function, we do not 19018 * need to preserve the prior contents of b_private. 19019 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19020 */ 19021 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19022 uip->ui_flags = SD_PATH_DIRECT; 19023 uip->ui_cmdp = uscmd; 19024 19025 bp = getrbuf(KM_SLEEP); 19026 bp->b_private = uip; 19027 19028 /* 19029 * Setup buffer to carry uscsi request. 19030 */ 19031 bp->b_flags = B_BUSY; 19032 bp->b_bcount = 0; 19033 bp->b_blkno = 0; 19034 19035 if (is_async == TRUE) { 19036 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19037 uip->ui_dkc = *dkc; 19038 } 19039 19040 bp->b_edev = SD_GET_DEV(un); 19041 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19042 19043 (void) sd_uscsi_strategy(bp); 19044 19045 /* 19046 * If synchronous request, wait for completion 19047 * If async just return and let b_iodone callback 19048 * cleanup. 19049 * NOTE: On return, u_ncmds_in_driver will be decremented, 19050 * but it was also incremented in sd_uscsi_strategy(), so 19051 * we should be ok. 19052 */ 19053 if (is_async == FALSE) { 19054 (void) biowait(bp); 19055 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19056 } 19057 19058 return (rval); 19059 } 19060 19061 19062 static int 19063 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19064 { 19065 struct sd_uscsi_info *uip; 19066 struct uscsi_cmd *uscmd; 19067 uint8_t *sense_buf; 19068 struct sd_lun *un; 19069 int status; 19070 union scsi_cdb *cdb; 19071 19072 uip = (struct sd_uscsi_info *)(bp->b_private); 19073 ASSERT(uip != NULL); 19074 19075 uscmd = uip->ui_cmdp; 19076 ASSERT(uscmd != NULL); 19077 19078 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19079 ASSERT(sense_buf != NULL); 19080 19081 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19082 ASSERT(un != NULL); 19083 19084 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19085 19086 status = geterror(bp); 19087 switch (status) { 19088 case 0: 19089 break; /* Success! */ 19090 case EIO: 19091 switch (uscmd->uscsi_status) { 19092 case STATUS_RESERVATION_CONFLICT: 19093 /* Ignore reservation conflict */ 19094 status = 0; 19095 goto done; 19096 19097 case STATUS_CHECK: 19098 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19099 (scsi_sense_key(sense_buf) == 19100 KEY_ILLEGAL_REQUEST)) { 19101 /* Ignore Illegal Request error */ 19102 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19103 mutex_enter(SD_MUTEX(un)); 19104 un->un_f_sync_nv_supported = FALSE; 19105 mutex_exit(SD_MUTEX(un)); 19106 status = 0; 19107 SD_TRACE(SD_LOG_IO, un, 19108 "un_f_sync_nv_supported \ 19109 is set to false.\n"); 19110 goto done; 19111 } 19112 19113 mutex_enter(SD_MUTEX(un)); 19114 un->un_f_sync_cache_supported = FALSE; 19115 mutex_exit(SD_MUTEX(un)); 19116 SD_TRACE(SD_LOG_IO, un, 19117 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19118 un_f_sync_cache_supported set to false \ 19119 with asc = %x, ascq = %x\n", 19120 scsi_sense_asc(sense_buf), 19121 scsi_sense_ascq(sense_buf)); 19122 status = ENOTSUP; 19123 goto done; 19124 } 19125 break; 19126 default: 19127 break; 19128 } 19129 /* FALLTHRU */ 19130 default: 19131 /* 19132 * Don't log an error message if this device 19133 * has removable media. 19134 */ 19135 if (!un->un_f_has_removable_media) { 19136 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19137 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19138 } 19139 break; 19140 } 19141 19142 done: 19143 if (uip->ui_dkc.dkc_callback != NULL) { 19144 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19145 } 19146 19147 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19148 freerbuf(bp); 19149 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19150 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19151 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19152 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19153 19154 return (status); 19155 } 19156 19157 19158 /* 19159 * Function: sd_send_scsi_GET_CONFIGURATION 19160 * 19161 * Description: Issues the get configuration command to the device. 19162 * Called from sd_check_for_writable_cd & sd_get_media_info 19163 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19164 * Arguments: un 19165 * ucmdbuf 19166 * rqbuf 19167 * rqbuflen 19168 * bufaddr 19169 * buflen 19170 * path_flag 19171 * 19172 * Return Code: 0 - Success 19173 * errno return code from sd_send_scsi_cmd() 19174 * 19175 * Context: Can sleep. Does not return until command is completed. 19176 * 19177 */ 19178 19179 static int 19180 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19181 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19182 int path_flag) 19183 { 19184 char cdb[CDB_GROUP1]; 19185 int status; 19186 19187 ASSERT(un != NULL); 19188 ASSERT(!mutex_owned(SD_MUTEX(un))); 19189 ASSERT(bufaddr != NULL); 19190 ASSERT(ucmdbuf != NULL); 19191 ASSERT(rqbuf != NULL); 19192 19193 SD_TRACE(SD_LOG_IO, un, 19194 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19195 19196 bzero(cdb, sizeof (cdb)); 19197 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19198 bzero(rqbuf, rqbuflen); 19199 bzero(bufaddr, buflen); 19200 19201 /* 19202 * Set up cdb field for the get configuration command. 19203 */ 19204 cdb[0] = SCMD_GET_CONFIGURATION; 19205 cdb[1] = 0x02; /* Requested Type */ 19206 cdb[8] = SD_PROFILE_HEADER_LEN; 19207 ucmdbuf->uscsi_cdb = cdb; 19208 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19209 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19210 ucmdbuf->uscsi_buflen = buflen; 19211 ucmdbuf->uscsi_timeout = sd_io_time; 19212 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19213 ucmdbuf->uscsi_rqlen = rqbuflen; 19214 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19215 19216 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19217 UIO_SYSSPACE, path_flag); 19218 19219 switch (status) { 19220 case 0: 19221 break; /* Success! */ 19222 case EIO: 19223 switch (ucmdbuf->uscsi_status) { 19224 case STATUS_RESERVATION_CONFLICT: 19225 status = EACCES; 19226 break; 19227 default: 19228 break; 19229 } 19230 break; 19231 default: 19232 break; 19233 } 19234 19235 if (status == 0) { 19236 SD_DUMP_MEMORY(un, SD_LOG_IO, 19237 "sd_send_scsi_GET_CONFIGURATION: data", 19238 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19239 } 19240 19241 SD_TRACE(SD_LOG_IO, un, 19242 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19243 19244 return (status); 19245 } 19246 19247 /* 19248 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19249 * 19250 * Description: Issues the get configuration command to the device to 19251 * retrieve a specific feature. Called from 19252 * sd_check_for_writable_cd & sd_set_mmc_caps. 19253 * Arguments: un 19254 * ucmdbuf 19255 * rqbuf 19256 * rqbuflen 19257 * bufaddr 19258 * buflen 19259 * feature 19260 * 19261 * Return Code: 0 - Success 19262 * errno return code from sd_send_scsi_cmd() 19263 * 19264 * Context: Can sleep. Does not return until command is completed. 19265 * 19266 */ 19267 static int 19268 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19269 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19270 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19271 { 19272 char cdb[CDB_GROUP1]; 19273 int status; 19274 19275 ASSERT(un != NULL); 19276 ASSERT(!mutex_owned(SD_MUTEX(un))); 19277 ASSERT(bufaddr != NULL); 19278 ASSERT(ucmdbuf != NULL); 19279 ASSERT(rqbuf != NULL); 19280 19281 SD_TRACE(SD_LOG_IO, un, 19282 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19283 19284 bzero(cdb, sizeof (cdb)); 19285 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19286 bzero(rqbuf, rqbuflen); 19287 bzero(bufaddr, buflen); 19288 19289 /* 19290 * Set up cdb field for the get configuration command. 19291 */ 19292 cdb[0] = SCMD_GET_CONFIGURATION; 19293 cdb[1] = 0x02; /* Requested Type */ 19294 cdb[3] = feature; 19295 cdb[8] = buflen; 19296 ucmdbuf->uscsi_cdb = cdb; 19297 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19298 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19299 ucmdbuf->uscsi_buflen = buflen; 19300 ucmdbuf->uscsi_timeout = sd_io_time; 19301 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19302 ucmdbuf->uscsi_rqlen = rqbuflen; 19303 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19304 19305 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19306 UIO_SYSSPACE, path_flag); 19307 19308 switch (status) { 19309 case 0: 19310 break; /* Success! */ 19311 case EIO: 19312 switch (ucmdbuf->uscsi_status) { 19313 case STATUS_RESERVATION_CONFLICT: 19314 status = EACCES; 19315 break; 19316 default: 19317 break; 19318 } 19319 break; 19320 default: 19321 break; 19322 } 19323 19324 if (status == 0) { 19325 SD_DUMP_MEMORY(un, SD_LOG_IO, 19326 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19327 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19328 } 19329 19330 SD_TRACE(SD_LOG_IO, un, 19331 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19332 19333 return (status); 19334 } 19335 19336 19337 /* 19338 * Function: sd_send_scsi_MODE_SENSE 19339 * 19340 * Description: Utility function for issuing a scsi MODE SENSE command. 19341 * Note: This routine uses a consistent implementation for Group0, 19342 * Group1, and Group2 commands across all platforms. ATAPI devices 19343 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19344 * 19345 * Arguments: un - pointer to the softstate struct for the target. 19346 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19347 * CDB_GROUP[1|2] (10 byte). 19348 * bufaddr - buffer for page data retrieved from the target. 19349 * buflen - size of page to be retrieved. 19350 * page_code - page code of data to be retrieved from the target. 19351 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19352 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19353 * to use the USCSI "direct" chain and bypass the normal 19354 * command waitq. 19355 * 19356 * Return Code: 0 - Success 19357 * errno return code from sd_send_scsi_cmd() 19358 * 19359 * Context: Can sleep. Does not return until command is completed. 19360 */ 19361 19362 static int 19363 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19364 size_t buflen, uchar_t page_code, int path_flag) 19365 { 19366 struct scsi_extended_sense sense_buf; 19367 union scsi_cdb cdb; 19368 struct uscsi_cmd ucmd_buf; 19369 int status; 19370 int headlen; 19371 19372 ASSERT(un != NULL); 19373 ASSERT(!mutex_owned(SD_MUTEX(un))); 19374 ASSERT(bufaddr != NULL); 19375 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19376 (cdbsize == CDB_GROUP2)); 19377 19378 SD_TRACE(SD_LOG_IO, un, 19379 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19380 19381 bzero(&cdb, sizeof (cdb)); 19382 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19383 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19384 bzero(bufaddr, buflen); 19385 19386 if (cdbsize == CDB_GROUP0) { 19387 cdb.scc_cmd = SCMD_MODE_SENSE; 19388 cdb.cdb_opaque[2] = page_code; 19389 FORMG0COUNT(&cdb, buflen); 19390 headlen = MODE_HEADER_LENGTH; 19391 } else { 19392 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19393 cdb.cdb_opaque[2] = page_code; 19394 FORMG1COUNT(&cdb, buflen); 19395 headlen = MODE_HEADER_LENGTH_GRP2; 19396 } 19397 19398 ASSERT(headlen <= buflen); 19399 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19400 19401 ucmd_buf.uscsi_cdb = (char *)&cdb; 19402 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19403 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19404 ucmd_buf.uscsi_buflen = buflen; 19405 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19406 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19407 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19408 ucmd_buf.uscsi_timeout = 60; 19409 19410 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19411 UIO_SYSSPACE, path_flag); 19412 19413 switch (status) { 19414 case 0: 19415 /* 19416 * sr_check_wp() uses 0x3f page code and check the header of 19417 * mode page to determine if target device is write-protected. 19418 * But some USB devices return 0 bytes for 0x3f page code. For 19419 * this case, make sure that mode page header is returned at 19420 * least. 19421 */ 19422 if (buflen - ucmd_buf.uscsi_resid < headlen) 19423 status = EIO; 19424 break; /* Success! */ 19425 case EIO: 19426 switch (ucmd_buf.uscsi_status) { 19427 case STATUS_RESERVATION_CONFLICT: 19428 status = EACCES; 19429 break; 19430 default: 19431 break; 19432 } 19433 break; 19434 default: 19435 break; 19436 } 19437 19438 if (status == 0) { 19439 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19440 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19441 } 19442 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19443 19444 return (status); 19445 } 19446 19447 19448 /* 19449 * Function: sd_send_scsi_MODE_SELECT 19450 * 19451 * Description: Utility function for issuing a scsi MODE SELECT command. 19452 * Note: This routine uses a consistent implementation for Group0, 19453 * Group1, and Group2 commands across all platforms. ATAPI devices 19454 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19455 * 19456 * Arguments: un - pointer to the softstate struct for the target. 19457 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19458 * CDB_GROUP[1|2] (10 byte). 19459 * bufaddr - buffer for page data retrieved from the target. 19460 * buflen - size of page to be retrieved. 19461 * save_page - boolean to determin if SP bit should be set. 19462 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19463 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19464 * to use the USCSI "direct" chain and bypass the normal 19465 * command waitq. 19466 * 19467 * Return Code: 0 - Success 19468 * errno return code from sd_send_scsi_cmd() 19469 * 19470 * Context: Can sleep. Does not return until command is completed. 19471 */ 19472 19473 static int 19474 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19475 size_t buflen, uchar_t save_page, int path_flag) 19476 { 19477 struct scsi_extended_sense sense_buf; 19478 union scsi_cdb cdb; 19479 struct uscsi_cmd ucmd_buf; 19480 int status; 19481 19482 ASSERT(un != NULL); 19483 ASSERT(!mutex_owned(SD_MUTEX(un))); 19484 ASSERT(bufaddr != NULL); 19485 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19486 (cdbsize == CDB_GROUP2)); 19487 19488 SD_TRACE(SD_LOG_IO, un, 19489 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19490 19491 bzero(&cdb, sizeof (cdb)); 19492 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19493 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19494 19495 /* Set the PF bit for many third party drives */ 19496 cdb.cdb_opaque[1] = 0x10; 19497 19498 /* Set the savepage(SP) bit if given */ 19499 if (save_page == SD_SAVE_PAGE) { 19500 cdb.cdb_opaque[1] |= 0x01; 19501 } 19502 19503 if (cdbsize == CDB_GROUP0) { 19504 cdb.scc_cmd = SCMD_MODE_SELECT; 19505 FORMG0COUNT(&cdb, buflen); 19506 } else { 19507 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19508 FORMG1COUNT(&cdb, buflen); 19509 } 19510 19511 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19512 19513 ucmd_buf.uscsi_cdb = (char *)&cdb; 19514 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19515 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19516 ucmd_buf.uscsi_buflen = buflen; 19517 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19518 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19519 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19520 ucmd_buf.uscsi_timeout = 60; 19521 19522 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19523 UIO_SYSSPACE, path_flag); 19524 19525 switch (status) { 19526 case 0: 19527 break; /* Success! */ 19528 case EIO: 19529 switch (ucmd_buf.uscsi_status) { 19530 case STATUS_RESERVATION_CONFLICT: 19531 status = EACCES; 19532 break; 19533 default: 19534 break; 19535 } 19536 break; 19537 default: 19538 break; 19539 } 19540 19541 if (status == 0) { 19542 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19543 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19544 } 19545 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19546 19547 return (status); 19548 } 19549 19550 19551 /* 19552 * Function: sd_send_scsi_RDWR 19553 * 19554 * Description: Issue a scsi READ or WRITE command with the given parameters. 19555 * 19556 * Arguments: un: Pointer to the sd_lun struct for the target. 19557 * cmd: SCMD_READ or SCMD_WRITE 19558 * bufaddr: Address of caller's buffer to receive the RDWR data 19559 * buflen: Length of caller's buffer receive the RDWR data. 19560 * start_block: Block number for the start of the RDWR operation. 19561 * (Assumes target-native block size.) 19562 * residp: Pointer to variable to receive the redisual of the 19563 * RDWR operation (may be NULL of no residual requested). 19564 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19565 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19566 * to use the USCSI "direct" chain and bypass the normal 19567 * command waitq. 19568 * 19569 * Return Code: 0 - Success 19570 * errno return code from sd_send_scsi_cmd() 19571 * 19572 * Context: Can sleep. Does not return until command is completed. 19573 */ 19574 19575 static int 19576 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19577 size_t buflen, daddr_t start_block, int path_flag) 19578 { 19579 struct scsi_extended_sense sense_buf; 19580 union scsi_cdb cdb; 19581 struct uscsi_cmd ucmd_buf; 19582 uint32_t block_count; 19583 int status; 19584 int cdbsize; 19585 uchar_t flag; 19586 19587 ASSERT(un != NULL); 19588 ASSERT(!mutex_owned(SD_MUTEX(un))); 19589 ASSERT(bufaddr != NULL); 19590 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19591 19592 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19593 19594 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19595 return (EINVAL); 19596 } 19597 19598 mutex_enter(SD_MUTEX(un)); 19599 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19600 mutex_exit(SD_MUTEX(un)); 19601 19602 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19603 19604 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19605 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19606 bufaddr, buflen, start_block, block_count); 19607 19608 bzero(&cdb, sizeof (cdb)); 19609 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19610 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19611 19612 /* Compute CDB size to use */ 19613 if (start_block > 0xffffffff) 19614 cdbsize = CDB_GROUP4; 19615 else if ((start_block & 0xFFE00000) || 19616 (un->un_f_cfg_is_atapi == TRUE)) 19617 cdbsize = CDB_GROUP1; 19618 else 19619 cdbsize = CDB_GROUP0; 19620 19621 switch (cdbsize) { 19622 case CDB_GROUP0: /* 6-byte CDBs */ 19623 cdb.scc_cmd = cmd; 19624 FORMG0ADDR(&cdb, start_block); 19625 FORMG0COUNT(&cdb, block_count); 19626 break; 19627 case CDB_GROUP1: /* 10-byte CDBs */ 19628 cdb.scc_cmd = cmd | SCMD_GROUP1; 19629 FORMG1ADDR(&cdb, start_block); 19630 FORMG1COUNT(&cdb, block_count); 19631 break; 19632 case CDB_GROUP4: /* 16-byte CDBs */ 19633 cdb.scc_cmd = cmd | SCMD_GROUP4; 19634 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19635 FORMG4COUNT(&cdb, block_count); 19636 break; 19637 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19638 default: 19639 /* All others reserved */ 19640 return (EINVAL); 19641 } 19642 19643 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19644 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19645 19646 ucmd_buf.uscsi_cdb = (char *)&cdb; 19647 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19648 ucmd_buf.uscsi_bufaddr = bufaddr; 19649 ucmd_buf.uscsi_buflen = buflen; 19650 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19651 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19652 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19653 ucmd_buf.uscsi_timeout = 60; 19654 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19655 UIO_SYSSPACE, path_flag); 19656 switch (status) { 19657 case 0: 19658 break; /* Success! */ 19659 case EIO: 19660 switch (ucmd_buf.uscsi_status) { 19661 case STATUS_RESERVATION_CONFLICT: 19662 status = EACCES; 19663 break; 19664 default: 19665 break; 19666 } 19667 break; 19668 default: 19669 break; 19670 } 19671 19672 if (status == 0) { 19673 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19674 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19675 } 19676 19677 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19678 19679 return (status); 19680 } 19681 19682 19683 /* 19684 * Function: sd_send_scsi_LOG_SENSE 19685 * 19686 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19687 * 19688 * Arguments: un: Pointer to the sd_lun struct for the target. 19689 * 19690 * Return Code: 0 - Success 19691 * errno return code from sd_send_scsi_cmd() 19692 * 19693 * Context: Can sleep. Does not return until command is completed. 19694 */ 19695 19696 static int 19697 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19698 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19699 int path_flag) 19700 19701 { 19702 struct scsi_extended_sense sense_buf; 19703 union scsi_cdb cdb; 19704 struct uscsi_cmd ucmd_buf; 19705 int status; 19706 19707 ASSERT(un != NULL); 19708 ASSERT(!mutex_owned(SD_MUTEX(un))); 19709 19710 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19711 19712 bzero(&cdb, sizeof (cdb)); 19713 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19714 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19715 19716 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19717 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19718 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19719 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19720 FORMG1COUNT(&cdb, buflen); 19721 19722 ucmd_buf.uscsi_cdb = (char *)&cdb; 19723 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19724 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19725 ucmd_buf.uscsi_buflen = buflen; 19726 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19727 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19728 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19729 ucmd_buf.uscsi_timeout = 60; 19730 19731 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19732 UIO_SYSSPACE, path_flag); 19733 19734 switch (status) { 19735 case 0: 19736 break; 19737 case EIO: 19738 switch (ucmd_buf.uscsi_status) { 19739 case STATUS_RESERVATION_CONFLICT: 19740 status = EACCES; 19741 break; 19742 case STATUS_CHECK: 19743 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19744 (scsi_sense_key((uint8_t *)&sense_buf) == 19745 KEY_ILLEGAL_REQUEST) && 19746 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19747 /* 19748 * ASC 0x24: INVALID FIELD IN CDB 19749 */ 19750 switch (page_code) { 19751 case START_STOP_CYCLE_PAGE: 19752 /* 19753 * The start stop cycle counter is 19754 * implemented as page 0x31 in earlier 19755 * generation disks. In new generation 19756 * disks the start stop cycle counter is 19757 * implemented as page 0xE. To properly 19758 * handle this case if an attempt for 19759 * log page 0xE is made and fails we 19760 * will try again using page 0x31. 19761 * 19762 * Network storage BU committed to 19763 * maintain the page 0x31 for this 19764 * purpose and will not have any other 19765 * page implemented with page code 0x31 19766 * until all disks transition to the 19767 * standard page. 19768 */ 19769 mutex_enter(SD_MUTEX(un)); 19770 un->un_start_stop_cycle_page = 19771 START_STOP_CYCLE_VU_PAGE; 19772 cdb.cdb_opaque[2] = 19773 (char)(page_control << 6) | 19774 un->un_start_stop_cycle_page; 19775 mutex_exit(SD_MUTEX(un)); 19776 status = sd_send_scsi_cmd( 19777 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19778 UIO_SYSSPACE, path_flag); 19779 19780 break; 19781 case TEMPERATURE_PAGE: 19782 status = ENOTTY; 19783 break; 19784 default: 19785 break; 19786 } 19787 } 19788 break; 19789 default: 19790 break; 19791 } 19792 break; 19793 default: 19794 break; 19795 } 19796 19797 if (status == 0) { 19798 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19799 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19800 } 19801 19802 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19803 19804 return (status); 19805 } 19806 19807 19808 /* 19809 * Function: sdioctl 19810 * 19811 * Description: Driver's ioctl(9e) entry point function. 19812 * 19813 * Arguments: dev - device number 19814 * cmd - ioctl operation to be performed 19815 * arg - user argument, contains data to be set or reference 19816 * parameter for get 19817 * flag - bit flag, indicating open settings, 32/64 bit type 19818 * cred_p - user credential pointer 19819 * rval_p - calling process return value (OPT) 19820 * 19821 * Return Code: EINVAL 19822 * ENOTTY 19823 * ENXIO 19824 * EIO 19825 * EFAULT 19826 * ENOTSUP 19827 * EPERM 19828 * 19829 * Context: Called from the device switch at normal priority. 19830 */ 19831 19832 static int 19833 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19834 { 19835 struct sd_lun *un = NULL; 19836 int err = 0; 19837 int i = 0; 19838 cred_t *cr; 19839 int tmprval = EINVAL; 19840 int is_valid; 19841 19842 /* 19843 * All device accesses go thru sdstrategy where we check on suspend 19844 * status 19845 */ 19846 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19847 return (ENXIO); 19848 } 19849 19850 ASSERT(!mutex_owned(SD_MUTEX(un))); 19851 19852 19853 is_valid = SD_IS_VALID_LABEL(un); 19854 19855 /* 19856 * Moved this wait from sd_uscsi_strategy to here for 19857 * reasons of deadlock prevention. Internal driver commands, 19858 * specifically those to change a devices power level, result 19859 * in a call to sd_uscsi_strategy. 19860 */ 19861 mutex_enter(SD_MUTEX(un)); 19862 while ((un->un_state == SD_STATE_SUSPENDED) || 19863 (un->un_state == SD_STATE_PM_CHANGING)) { 19864 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19865 } 19866 /* 19867 * Twiddling the counter here protects commands from now 19868 * through to the top of sd_uscsi_strategy. Without the 19869 * counter inc. a power down, for example, could get in 19870 * after the above check for state is made and before 19871 * execution gets to the top of sd_uscsi_strategy. 19872 * That would cause problems. 19873 */ 19874 un->un_ncmds_in_driver++; 19875 19876 if (!is_valid && 19877 (flag & (FNDELAY | FNONBLOCK))) { 19878 switch (cmd) { 19879 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19880 case DKIOCGVTOC: 19881 case DKIOCGAPART: 19882 case DKIOCPARTINFO: 19883 case DKIOCSGEOM: 19884 case DKIOCSAPART: 19885 case DKIOCGETEFI: 19886 case DKIOCPARTITION: 19887 case DKIOCSVTOC: 19888 case DKIOCSETEFI: 19889 case DKIOCGMBOOT: 19890 case DKIOCSMBOOT: 19891 case DKIOCG_PHYGEOM: 19892 case DKIOCG_VIRTGEOM: 19893 /* let cmlb handle it */ 19894 goto skip_ready_valid; 19895 19896 case CDROMPAUSE: 19897 case CDROMRESUME: 19898 case CDROMPLAYMSF: 19899 case CDROMPLAYTRKIND: 19900 case CDROMREADTOCHDR: 19901 case CDROMREADTOCENTRY: 19902 case CDROMSTOP: 19903 case CDROMSTART: 19904 case CDROMVOLCTRL: 19905 case CDROMSUBCHNL: 19906 case CDROMREADMODE2: 19907 case CDROMREADMODE1: 19908 case CDROMREADOFFSET: 19909 case CDROMSBLKMODE: 19910 case CDROMGBLKMODE: 19911 case CDROMGDRVSPEED: 19912 case CDROMSDRVSPEED: 19913 case CDROMCDDA: 19914 case CDROMCDXA: 19915 case CDROMSUBCODE: 19916 if (!ISCD(un)) { 19917 un->un_ncmds_in_driver--; 19918 ASSERT(un->un_ncmds_in_driver >= 0); 19919 mutex_exit(SD_MUTEX(un)); 19920 return (ENOTTY); 19921 } 19922 break; 19923 case FDEJECT: 19924 case DKIOCEJECT: 19925 case CDROMEJECT: 19926 if (!un->un_f_eject_media_supported) { 19927 un->un_ncmds_in_driver--; 19928 ASSERT(un->un_ncmds_in_driver >= 0); 19929 mutex_exit(SD_MUTEX(un)); 19930 return (ENOTTY); 19931 } 19932 break; 19933 case DKIOCFLUSHWRITECACHE: 19934 mutex_exit(SD_MUTEX(un)); 19935 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19936 if (err != 0) { 19937 mutex_enter(SD_MUTEX(un)); 19938 un->un_ncmds_in_driver--; 19939 ASSERT(un->un_ncmds_in_driver >= 0); 19940 mutex_exit(SD_MUTEX(un)); 19941 return (EIO); 19942 } 19943 mutex_enter(SD_MUTEX(un)); 19944 /* FALLTHROUGH */ 19945 case DKIOCREMOVABLE: 19946 case DKIOCHOTPLUGGABLE: 19947 case DKIOCINFO: 19948 case DKIOCGMEDIAINFO: 19949 case MHIOCENFAILFAST: 19950 case MHIOCSTATUS: 19951 case MHIOCTKOWN: 19952 case MHIOCRELEASE: 19953 case MHIOCGRP_INKEYS: 19954 case MHIOCGRP_INRESV: 19955 case MHIOCGRP_REGISTER: 19956 case MHIOCGRP_RESERVE: 19957 case MHIOCGRP_PREEMPTANDABORT: 19958 case MHIOCGRP_REGISTERANDIGNOREKEY: 19959 case CDROMCLOSETRAY: 19960 case USCSICMD: 19961 goto skip_ready_valid; 19962 default: 19963 break; 19964 } 19965 19966 mutex_exit(SD_MUTEX(un)); 19967 err = sd_ready_and_valid(un); 19968 mutex_enter(SD_MUTEX(un)); 19969 19970 if (err != SD_READY_VALID) { 19971 switch (cmd) { 19972 case DKIOCSTATE: 19973 case CDROMGDRVSPEED: 19974 case CDROMSDRVSPEED: 19975 case FDEJECT: /* for eject command */ 19976 case DKIOCEJECT: 19977 case CDROMEJECT: 19978 case DKIOCREMOVABLE: 19979 case DKIOCHOTPLUGGABLE: 19980 break; 19981 default: 19982 if (un->un_f_has_removable_media) { 19983 err = ENXIO; 19984 } else { 19985 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19986 if (err == SD_RESERVED_BY_OTHERS) { 19987 err = EACCES; 19988 } else { 19989 err = EIO; 19990 } 19991 } 19992 un->un_ncmds_in_driver--; 19993 ASSERT(un->un_ncmds_in_driver >= 0); 19994 mutex_exit(SD_MUTEX(un)); 19995 return (err); 19996 } 19997 } 19998 } 19999 20000 skip_ready_valid: 20001 mutex_exit(SD_MUTEX(un)); 20002 20003 switch (cmd) { 20004 case DKIOCINFO: 20005 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20006 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20007 break; 20008 20009 case DKIOCGMEDIAINFO: 20010 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20011 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20012 break; 20013 20014 case DKIOCGGEOM: 20015 case DKIOCGVTOC: 20016 case DKIOCGAPART: 20017 case DKIOCPARTINFO: 20018 case DKIOCSGEOM: 20019 case DKIOCSAPART: 20020 case DKIOCGETEFI: 20021 case DKIOCPARTITION: 20022 case DKIOCSVTOC: 20023 case DKIOCSETEFI: 20024 case DKIOCGMBOOT: 20025 case DKIOCSMBOOT: 20026 case DKIOCG_PHYGEOM: 20027 case DKIOCG_VIRTGEOM: 20028 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20029 20030 /* TUR should spin up */ 20031 20032 if (un->un_f_has_removable_media) 20033 err = sd_send_scsi_TEST_UNIT_READY(un, 20034 SD_CHECK_FOR_MEDIA); 20035 else 20036 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20037 20038 if (err != 0) 20039 break; 20040 20041 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20042 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20043 20044 if ((err == 0) && 20045 ((cmd == DKIOCSETEFI) || 20046 (un->un_f_pkstats_enabled) && 20047 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20048 20049 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20050 (void *)SD_PATH_DIRECT); 20051 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20052 sd_set_pstats(un); 20053 SD_TRACE(SD_LOG_IO_PARTITION, un, 20054 "sd_ioctl: un:0x%p pstats created and " 20055 "set\n", un); 20056 } 20057 } 20058 20059 if ((cmd == DKIOCSVTOC) || 20060 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20061 20062 mutex_enter(SD_MUTEX(un)); 20063 if (un->un_f_devid_supported && 20064 (un->un_f_opt_fab_devid == TRUE)) { 20065 if (un->un_devid == NULL) { 20066 sd_register_devid(un, SD_DEVINFO(un), 20067 SD_TARGET_IS_UNRESERVED); 20068 } else { 20069 /* 20070 * The device id for this disk 20071 * has been fabricated. The 20072 * device id must be preserved 20073 * by writing it back out to 20074 * disk. 20075 */ 20076 if (sd_write_deviceid(un) != 0) { 20077 ddi_devid_free(un->un_devid); 20078 un->un_devid = NULL; 20079 } 20080 } 20081 } 20082 mutex_exit(SD_MUTEX(un)); 20083 } 20084 20085 break; 20086 20087 case DKIOCLOCK: 20088 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20089 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20090 SD_PATH_STANDARD); 20091 break; 20092 20093 case DKIOCUNLOCK: 20094 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20095 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20096 SD_PATH_STANDARD); 20097 break; 20098 20099 case DKIOCSTATE: { 20100 enum dkio_state state; 20101 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20102 20103 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20104 err = EFAULT; 20105 } else { 20106 err = sd_check_media(dev, state); 20107 if (err == 0) { 20108 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20109 sizeof (int), flag) != 0) 20110 err = EFAULT; 20111 } 20112 } 20113 break; 20114 } 20115 20116 case DKIOCREMOVABLE: 20117 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20118 i = un->un_f_has_removable_media ? 1 : 0; 20119 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20120 err = EFAULT; 20121 } else { 20122 err = 0; 20123 } 20124 break; 20125 20126 case DKIOCHOTPLUGGABLE: 20127 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20128 i = un->un_f_is_hotpluggable ? 1 : 0; 20129 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20130 err = EFAULT; 20131 } else { 20132 err = 0; 20133 } 20134 break; 20135 20136 case DKIOCGTEMPERATURE: 20137 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20138 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20139 break; 20140 20141 case MHIOCENFAILFAST: 20142 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20143 if ((err = drv_priv(cred_p)) == 0) { 20144 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20145 } 20146 break; 20147 20148 case MHIOCTKOWN: 20149 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20150 if ((err = drv_priv(cred_p)) == 0) { 20151 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20152 } 20153 break; 20154 20155 case MHIOCRELEASE: 20156 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20157 if ((err = drv_priv(cred_p)) == 0) { 20158 err = sd_mhdioc_release(dev); 20159 } 20160 break; 20161 20162 case MHIOCSTATUS: 20163 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20164 if ((err = drv_priv(cred_p)) == 0) { 20165 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20166 case 0: 20167 err = 0; 20168 break; 20169 case EACCES: 20170 *rval_p = 1; 20171 err = 0; 20172 break; 20173 default: 20174 err = EIO; 20175 break; 20176 } 20177 } 20178 break; 20179 20180 case MHIOCQRESERVE: 20181 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20182 if ((err = drv_priv(cred_p)) == 0) { 20183 err = sd_reserve_release(dev, SD_RESERVE); 20184 } 20185 break; 20186 20187 case MHIOCREREGISTERDEVID: 20188 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20189 if (drv_priv(cred_p) == EPERM) { 20190 err = EPERM; 20191 } else if (!un->un_f_devid_supported) { 20192 err = ENOTTY; 20193 } else { 20194 err = sd_mhdioc_register_devid(dev); 20195 } 20196 break; 20197 20198 case MHIOCGRP_INKEYS: 20199 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20200 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20201 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20202 err = ENOTSUP; 20203 } else { 20204 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20205 flag); 20206 } 20207 } 20208 break; 20209 20210 case MHIOCGRP_INRESV: 20211 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20212 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20213 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20214 err = ENOTSUP; 20215 } else { 20216 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20217 } 20218 } 20219 break; 20220 20221 case MHIOCGRP_REGISTER: 20222 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20223 if ((err = drv_priv(cred_p)) != EPERM) { 20224 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20225 err = ENOTSUP; 20226 } else if (arg != NULL) { 20227 mhioc_register_t reg; 20228 if (ddi_copyin((void *)arg, ®, 20229 sizeof (mhioc_register_t), flag) != 0) { 20230 err = EFAULT; 20231 } else { 20232 err = 20233 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20234 un, SD_SCSI3_REGISTER, 20235 (uchar_t *)®); 20236 } 20237 } 20238 } 20239 break; 20240 20241 case MHIOCGRP_RESERVE: 20242 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20243 if ((err = drv_priv(cred_p)) != EPERM) { 20244 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20245 err = ENOTSUP; 20246 } else if (arg != NULL) { 20247 mhioc_resv_desc_t resv_desc; 20248 if (ddi_copyin((void *)arg, &resv_desc, 20249 sizeof (mhioc_resv_desc_t), flag) != 0) { 20250 err = EFAULT; 20251 } else { 20252 err = 20253 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20254 un, SD_SCSI3_RESERVE, 20255 (uchar_t *)&resv_desc); 20256 } 20257 } 20258 } 20259 break; 20260 20261 case MHIOCGRP_PREEMPTANDABORT: 20262 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20263 if ((err = drv_priv(cred_p)) != EPERM) { 20264 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20265 err = ENOTSUP; 20266 } else if (arg != NULL) { 20267 mhioc_preemptandabort_t preempt_abort; 20268 if (ddi_copyin((void *)arg, &preempt_abort, 20269 sizeof (mhioc_preemptandabort_t), 20270 flag) != 0) { 20271 err = EFAULT; 20272 } else { 20273 err = 20274 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20275 un, SD_SCSI3_PREEMPTANDABORT, 20276 (uchar_t *)&preempt_abort); 20277 } 20278 } 20279 } 20280 break; 20281 20282 case MHIOCGRP_REGISTERANDIGNOREKEY: 20283 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20284 if ((err = drv_priv(cred_p)) != EPERM) { 20285 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20286 err = ENOTSUP; 20287 } else if (arg != NULL) { 20288 mhioc_registerandignorekey_t r_and_i; 20289 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20290 sizeof (mhioc_registerandignorekey_t), 20291 flag) != 0) { 20292 err = EFAULT; 20293 } else { 20294 err = 20295 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20296 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20297 (uchar_t *)&r_and_i); 20298 } 20299 } 20300 } 20301 break; 20302 20303 case USCSICMD: 20304 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20305 cr = ddi_get_cred(); 20306 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20307 err = EPERM; 20308 } else { 20309 enum uio_seg uioseg; 20310 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20311 UIO_USERSPACE; 20312 if (un->un_f_format_in_progress == TRUE) { 20313 err = EAGAIN; 20314 break; 20315 } 20316 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20317 flag, uioseg, SD_PATH_STANDARD); 20318 } 20319 break; 20320 20321 case CDROMPAUSE: 20322 case CDROMRESUME: 20323 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20324 if (!ISCD(un)) { 20325 err = ENOTTY; 20326 } else { 20327 err = sr_pause_resume(dev, cmd); 20328 } 20329 break; 20330 20331 case CDROMPLAYMSF: 20332 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20333 if (!ISCD(un)) { 20334 err = ENOTTY; 20335 } else { 20336 err = sr_play_msf(dev, (caddr_t)arg, flag); 20337 } 20338 break; 20339 20340 case CDROMPLAYTRKIND: 20341 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20342 #if defined(__i386) || defined(__amd64) 20343 /* 20344 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20345 */ 20346 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20347 #else 20348 if (!ISCD(un)) { 20349 #endif 20350 err = ENOTTY; 20351 } else { 20352 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20353 } 20354 break; 20355 20356 case CDROMREADTOCHDR: 20357 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20358 if (!ISCD(un)) { 20359 err = ENOTTY; 20360 } else { 20361 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20362 } 20363 break; 20364 20365 case CDROMREADTOCENTRY: 20366 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20367 if (!ISCD(un)) { 20368 err = ENOTTY; 20369 } else { 20370 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20371 } 20372 break; 20373 20374 case CDROMSTOP: 20375 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20376 if (!ISCD(un)) { 20377 err = ENOTTY; 20378 } else { 20379 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20380 SD_PATH_STANDARD); 20381 } 20382 break; 20383 20384 case CDROMSTART: 20385 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20386 if (!ISCD(un)) { 20387 err = ENOTTY; 20388 } else { 20389 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20390 SD_PATH_STANDARD); 20391 } 20392 break; 20393 20394 case CDROMCLOSETRAY: 20395 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20396 if (!ISCD(un)) { 20397 err = ENOTTY; 20398 } else { 20399 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20400 SD_PATH_STANDARD); 20401 } 20402 break; 20403 20404 case FDEJECT: /* for eject command */ 20405 case DKIOCEJECT: 20406 case CDROMEJECT: 20407 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20408 if (!un->un_f_eject_media_supported) { 20409 err = ENOTTY; 20410 } else { 20411 err = sr_eject(dev); 20412 } 20413 break; 20414 20415 case CDROMVOLCTRL: 20416 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20417 if (!ISCD(un)) { 20418 err = ENOTTY; 20419 } else { 20420 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20421 } 20422 break; 20423 20424 case CDROMSUBCHNL: 20425 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20426 if (!ISCD(un)) { 20427 err = ENOTTY; 20428 } else { 20429 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20430 } 20431 break; 20432 20433 case CDROMREADMODE2: 20434 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20435 if (!ISCD(un)) { 20436 err = ENOTTY; 20437 } else if (un->un_f_cfg_is_atapi == TRUE) { 20438 /* 20439 * If the drive supports READ CD, use that instead of 20440 * switching the LBA size via a MODE SELECT 20441 * Block Descriptor 20442 */ 20443 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20444 } else { 20445 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20446 } 20447 break; 20448 20449 case CDROMREADMODE1: 20450 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20451 if (!ISCD(un)) { 20452 err = ENOTTY; 20453 } else { 20454 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20455 } 20456 break; 20457 20458 case CDROMREADOFFSET: 20459 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20460 if (!ISCD(un)) { 20461 err = ENOTTY; 20462 } else { 20463 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20464 flag); 20465 } 20466 break; 20467 20468 case CDROMSBLKMODE: 20469 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20470 /* 20471 * There is no means of changing block size in case of atapi 20472 * drives, thus return ENOTTY if drive type is atapi 20473 */ 20474 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20475 err = ENOTTY; 20476 } else if (un->un_f_mmc_cap == TRUE) { 20477 20478 /* 20479 * MMC Devices do not support changing the 20480 * logical block size 20481 * 20482 * Note: EINVAL is being returned instead of ENOTTY to 20483 * maintain consistancy with the original mmc 20484 * driver update. 20485 */ 20486 err = EINVAL; 20487 } else { 20488 mutex_enter(SD_MUTEX(un)); 20489 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20490 (un->un_ncmds_in_transport > 0)) { 20491 mutex_exit(SD_MUTEX(un)); 20492 err = EINVAL; 20493 } else { 20494 mutex_exit(SD_MUTEX(un)); 20495 err = sr_change_blkmode(dev, cmd, arg, flag); 20496 } 20497 } 20498 break; 20499 20500 case CDROMGBLKMODE: 20501 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20502 if (!ISCD(un)) { 20503 err = ENOTTY; 20504 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20505 (un->un_f_blockcount_is_valid != FALSE)) { 20506 /* 20507 * Drive is an ATAPI drive so return target block 20508 * size for ATAPI drives since we cannot change the 20509 * blocksize on ATAPI drives. Used primarily to detect 20510 * if an ATAPI cdrom is present. 20511 */ 20512 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20513 sizeof (int), flag) != 0) { 20514 err = EFAULT; 20515 } else { 20516 err = 0; 20517 } 20518 20519 } else { 20520 /* 20521 * Drive supports changing block sizes via a Mode 20522 * Select. 20523 */ 20524 err = sr_change_blkmode(dev, cmd, arg, flag); 20525 } 20526 break; 20527 20528 case CDROMGDRVSPEED: 20529 case CDROMSDRVSPEED: 20530 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20531 if (!ISCD(un)) { 20532 err = ENOTTY; 20533 } else if (un->un_f_mmc_cap == TRUE) { 20534 /* 20535 * Note: In the future the driver implementation 20536 * for getting and 20537 * setting cd speed should entail: 20538 * 1) If non-mmc try the Toshiba mode page 20539 * (sr_change_speed) 20540 * 2) If mmc but no support for Real Time Streaming try 20541 * the SET CD SPEED (0xBB) command 20542 * (sr_atapi_change_speed) 20543 * 3) If mmc and support for Real Time Streaming 20544 * try the GET PERFORMANCE and SET STREAMING 20545 * commands (not yet implemented, 4380808) 20546 */ 20547 /* 20548 * As per recent MMC spec, CD-ROM speed is variable 20549 * and changes with LBA. Since there is no such 20550 * things as drive speed now, fail this ioctl. 20551 * 20552 * Note: EINVAL is returned for consistancy of original 20553 * implementation which included support for getting 20554 * the drive speed of mmc devices but not setting 20555 * the drive speed. Thus EINVAL would be returned 20556 * if a set request was made for an mmc device. 20557 * We no longer support get or set speed for 20558 * mmc but need to remain consistent with regard 20559 * to the error code returned. 20560 */ 20561 err = EINVAL; 20562 } else if (un->un_f_cfg_is_atapi == TRUE) { 20563 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20564 } else { 20565 err = sr_change_speed(dev, cmd, arg, flag); 20566 } 20567 break; 20568 20569 case CDROMCDDA: 20570 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20571 if (!ISCD(un)) { 20572 err = ENOTTY; 20573 } else { 20574 err = sr_read_cdda(dev, (void *)arg, flag); 20575 } 20576 break; 20577 20578 case CDROMCDXA: 20579 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20580 if (!ISCD(un)) { 20581 err = ENOTTY; 20582 } else { 20583 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20584 } 20585 break; 20586 20587 case CDROMSUBCODE: 20588 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20589 if (!ISCD(un)) { 20590 err = ENOTTY; 20591 } else { 20592 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20593 } 20594 break; 20595 20596 20597 #ifdef SDDEBUG 20598 /* RESET/ABORTS testing ioctls */ 20599 case DKIOCRESET: { 20600 int reset_level; 20601 20602 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20603 err = EFAULT; 20604 } else { 20605 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20606 "reset_level = 0x%lx\n", reset_level); 20607 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20608 err = 0; 20609 } else { 20610 err = EIO; 20611 } 20612 } 20613 break; 20614 } 20615 20616 case DKIOCABORT: 20617 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20618 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20619 err = 0; 20620 } else { 20621 err = EIO; 20622 } 20623 break; 20624 #endif 20625 20626 #ifdef SD_FAULT_INJECTION 20627 /* SDIOC FaultInjection testing ioctls */ 20628 case SDIOCSTART: 20629 case SDIOCSTOP: 20630 case SDIOCINSERTPKT: 20631 case SDIOCINSERTXB: 20632 case SDIOCINSERTUN: 20633 case SDIOCINSERTARQ: 20634 case SDIOCPUSH: 20635 case SDIOCRETRIEVE: 20636 case SDIOCRUN: 20637 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20638 "SDIOC detected cmd:0x%X:\n", cmd); 20639 /* call error generator */ 20640 sd_faultinjection_ioctl(cmd, arg, un); 20641 err = 0; 20642 break; 20643 20644 #endif /* SD_FAULT_INJECTION */ 20645 20646 case DKIOCFLUSHWRITECACHE: 20647 { 20648 struct dk_callback *dkc = (struct dk_callback *)arg; 20649 20650 mutex_enter(SD_MUTEX(un)); 20651 if (!un->un_f_sync_cache_supported || 20652 !un->un_f_write_cache_enabled) { 20653 err = un->un_f_sync_cache_supported ? 20654 0 : ENOTSUP; 20655 mutex_exit(SD_MUTEX(un)); 20656 if ((flag & FKIOCTL) && dkc != NULL && 20657 dkc->dkc_callback != NULL) { 20658 (*dkc->dkc_callback)(dkc->dkc_cookie, 20659 err); 20660 /* 20661 * Did callback and reported error. 20662 * Since we did a callback, ioctl 20663 * should return 0. 20664 */ 20665 err = 0; 20666 } 20667 break; 20668 } 20669 mutex_exit(SD_MUTEX(un)); 20670 20671 if ((flag & FKIOCTL) && dkc != NULL && 20672 dkc->dkc_callback != NULL) { 20673 /* async SYNC CACHE request */ 20674 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20675 } else { 20676 /* synchronous SYNC CACHE request */ 20677 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20678 } 20679 } 20680 break; 20681 20682 case DKIOCGETWCE: { 20683 20684 int wce; 20685 20686 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20687 break; 20688 } 20689 20690 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20691 err = EFAULT; 20692 } 20693 break; 20694 } 20695 20696 case DKIOCSETWCE: { 20697 20698 int wce, sync_supported; 20699 20700 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20701 err = EFAULT; 20702 break; 20703 } 20704 20705 /* 20706 * Synchronize multiple threads trying to enable 20707 * or disable the cache via the un_f_wcc_cv 20708 * condition variable. 20709 */ 20710 mutex_enter(SD_MUTEX(un)); 20711 20712 /* 20713 * Don't allow the cache to be enabled if the 20714 * config file has it disabled. 20715 */ 20716 if (un->un_f_opt_disable_cache && wce) { 20717 mutex_exit(SD_MUTEX(un)); 20718 err = EINVAL; 20719 break; 20720 } 20721 20722 /* 20723 * Wait for write cache change in progress 20724 * bit to be clear before proceeding. 20725 */ 20726 while (un->un_f_wcc_inprog) 20727 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20728 20729 un->un_f_wcc_inprog = 1; 20730 20731 if (un->un_f_write_cache_enabled && wce == 0) { 20732 /* 20733 * Disable the write cache. Don't clear 20734 * un_f_write_cache_enabled until after 20735 * the mode select and flush are complete. 20736 */ 20737 sync_supported = un->un_f_sync_cache_supported; 20738 20739 /* 20740 * If cache flush is suppressed, we assume that the 20741 * controller firmware will take care of managing the 20742 * write cache for us: no need to explicitly 20743 * disable it. 20744 */ 20745 if (!un->un_f_suppress_cache_flush) { 20746 mutex_exit(SD_MUTEX(un)); 20747 if ((err = sd_cache_control(un, 20748 SD_CACHE_NOCHANGE, 20749 SD_CACHE_DISABLE)) == 0 && 20750 sync_supported) { 20751 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20752 NULL); 20753 } 20754 } else { 20755 mutex_exit(SD_MUTEX(un)); 20756 } 20757 20758 mutex_enter(SD_MUTEX(un)); 20759 if (err == 0) { 20760 un->un_f_write_cache_enabled = 0; 20761 } 20762 20763 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20764 /* 20765 * Set un_f_write_cache_enabled first, so there is 20766 * no window where the cache is enabled, but the 20767 * bit says it isn't. 20768 */ 20769 un->un_f_write_cache_enabled = 1; 20770 20771 /* 20772 * If cache flush is suppressed, we assume that the 20773 * controller firmware will take care of managing the 20774 * write cache for us: no need to explicitly 20775 * enable it. 20776 */ 20777 if (!un->un_f_suppress_cache_flush) { 20778 mutex_exit(SD_MUTEX(un)); 20779 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20780 SD_CACHE_ENABLE); 20781 } else { 20782 mutex_exit(SD_MUTEX(un)); 20783 } 20784 20785 mutex_enter(SD_MUTEX(un)); 20786 20787 if (err) { 20788 un->un_f_write_cache_enabled = 0; 20789 } 20790 } 20791 20792 un->un_f_wcc_inprog = 0; 20793 cv_broadcast(&un->un_wcc_cv); 20794 mutex_exit(SD_MUTEX(un)); 20795 break; 20796 } 20797 20798 default: 20799 err = ENOTTY; 20800 break; 20801 } 20802 mutex_enter(SD_MUTEX(un)); 20803 un->un_ncmds_in_driver--; 20804 ASSERT(un->un_ncmds_in_driver >= 0); 20805 mutex_exit(SD_MUTEX(un)); 20806 20807 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20808 return (err); 20809 } 20810 20811 20812 /* 20813 * Function: sd_dkio_ctrl_info 20814 * 20815 * Description: This routine is the driver entry point for handling controller 20816 * information ioctl requests (DKIOCINFO). 20817 * 20818 * Arguments: dev - the device number 20819 * arg - pointer to user provided dk_cinfo structure 20820 * specifying the controller type and attributes. 20821 * flag - this argument is a pass through to ddi_copyxxx() 20822 * directly from the mode argument of ioctl(). 20823 * 20824 * Return Code: 0 20825 * EFAULT 20826 * ENXIO 20827 */ 20828 20829 static int 20830 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20831 { 20832 struct sd_lun *un = NULL; 20833 struct dk_cinfo *info; 20834 dev_info_t *pdip; 20835 int lun, tgt; 20836 20837 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20838 return (ENXIO); 20839 } 20840 20841 info = (struct dk_cinfo *) 20842 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20843 20844 switch (un->un_ctype) { 20845 case CTYPE_CDROM: 20846 info->dki_ctype = DKC_CDROM; 20847 break; 20848 default: 20849 info->dki_ctype = DKC_SCSI_CCS; 20850 break; 20851 } 20852 pdip = ddi_get_parent(SD_DEVINFO(un)); 20853 info->dki_cnum = ddi_get_instance(pdip); 20854 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20855 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20856 } else { 20857 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20858 DK_DEVLEN - 1); 20859 } 20860 20861 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20862 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20863 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20864 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20865 20866 /* Unit Information */ 20867 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20868 info->dki_slave = ((tgt << 3) | lun); 20869 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20870 DK_DEVLEN - 1); 20871 info->dki_flags = DKI_FMTVOL; 20872 info->dki_partition = SDPART(dev); 20873 20874 /* Max Transfer size of this device in blocks */ 20875 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20876 info->dki_addr = 0; 20877 info->dki_space = 0; 20878 info->dki_prio = 0; 20879 info->dki_vec = 0; 20880 20881 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20882 kmem_free(info, sizeof (struct dk_cinfo)); 20883 return (EFAULT); 20884 } else { 20885 kmem_free(info, sizeof (struct dk_cinfo)); 20886 return (0); 20887 } 20888 } 20889 20890 20891 /* 20892 * Function: sd_get_media_info 20893 * 20894 * Description: This routine is the driver entry point for handling ioctl 20895 * requests for the media type or command set profile used by the 20896 * drive to operate on the media (DKIOCGMEDIAINFO). 20897 * 20898 * Arguments: dev - the device number 20899 * arg - pointer to user provided dk_minfo structure 20900 * specifying the media type, logical block size and 20901 * drive capacity. 20902 * flag - this argument is a pass through to ddi_copyxxx() 20903 * directly from the mode argument of ioctl(). 20904 * 20905 * Return Code: 0 20906 * EACCESS 20907 * EFAULT 20908 * ENXIO 20909 * EIO 20910 */ 20911 20912 static int 20913 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20914 { 20915 struct sd_lun *un = NULL; 20916 struct uscsi_cmd com; 20917 struct scsi_inquiry *sinq; 20918 struct dk_minfo media_info; 20919 u_longlong_t media_capacity; 20920 uint64_t capacity; 20921 uint_t lbasize; 20922 uchar_t *out_data; 20923 uchar_t *rqbuf; 20924 int rval = 0; 20925 int rtn; 20926 20927 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20928 (un->un_state == SD_STATE_OFFLINE)) { 20929 return (ENXIO); 20930 } 20931 20932 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20933 20934 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20935 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20936 20937 /* Issue a TUR to determine if the drive is ready with media present */ 20938 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20939 if (rval == ENXIO) { 20940 goto done; 20941 } 20942 20943 /* Now get configuration data */ 20944 if (ISCD(un)) { 20945 media_info.dki_media_type = DK_CDROM; 20946 20947 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20948 if (un->un_f_mmc_cap == TRUE) { 20949 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20950 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20951 SD_PATH_STANDARD); 20952 20953 if (rtn) { 20954 /* 20955 * Failed for other than an illegal request 20956 * or command not supported 20957 */ 20958 if ((com.uscsi_status == STATUS_CHECK) && 20959 (com.uscsi_rqstatus == STATUS_GOOD)) { 20960 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20961 (rqbuf[12] != 0x20)) { 20962 rval = EIO; 20963 goto done; 20964 } 20965 } 20966 } else { 20967 /* 20968 * The GET CONFIGURATION command succeeded 20969 * so set the media type according to the 20970 * returned data 20971 */ 20972 media_info.dki_media_type = out_data[6]; 20973 media_info.dki_media_type <<= 8; 20974 media_info.dki_media_type |= out_data[7]; 20975 } 20976 } 20977 } else { 20978 /* 20979 * The profile list is not available, so we attempt to identify 20980 * the media type based on the inquiry data 20981 */ 20982 sinq = un->un_sd->sd_inq; 20983 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20984 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20985 /* This is a direct access device or optical disk */ 20986 media_info.dki_media_type = DK_FIXED_DISK; 20987 20988 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20989 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20990 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20991 media_info.dki_media_type = DK_ZIP; 20992 } else if ( 20993 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20994 media_info.dki_media_type = DK_JAZ; 20995 } 20996 } 20997 } else { 20998 /* 20999 * Not a CD, direct access or optical disk so return 21000 * unknown media 21001 */ 21002 media_info.dki_media_type = DK_UNKNOWN; 21003 } 21004 } 21005 21006 /* Now read the capacity so we can provide the lbasize and capacity */ 21007 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21008 SD_PATH_DIRECT)) { 21009 case 0: 21010 break; 21011 case EACCES: 21012 rval = EACCES; 21013 goto done; 21014 default: 21015 rval = EIO; 21016 goto done; 21017 } 21018 21019 /* 21020 * If lun is expanded dynamically, update the un structure. 21021 */ 21022 mutex_enter(SD_MUTEX(un)); 21023 if ((un->un_f_blockcount_is_valid == TRUE) && 21024 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21025 (capacity > un->un_blockcount)) { 21026 sd_update_block_info(un, lbasize, capacity); 21027 } 21028 mutex_exit(SD_MUTEX(un)); 21029 21030 media_info.dki_lbsize = lbasize; 21031 media_capacity = capacity; 21032 21033 /* 21034 * sd_send_scsi_READ_CAPACITY() reports capacity in 21035 * un->un_sys_blocksize chunks. So we need to convert it into 21036 * cap.lbasize chunks. 21037 */ 21038 media_capacity *= un->un_sys_blocksize; 21039 media_capacity /= lbasize; 21040 media_info.dki_capacity = media_capacity; 21041 21042 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21043 rval = EFAULT; 21044 /* Put goto. Anybody might add some code below in future */ 21045 goto done; 21046 } 21047 done: 21048 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21049 kmem_free(rqbuf, SENSE_LENGTH); 21050 return (rval); 21051 } 21052 21053 21054 /* 21055 * Function: sd_check_media 21056 * 21057 * Description: This utility routine implements the functionality for the 21058 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21059 * driver state changes from that specified by the user 21060 * (inserted or ejected). For example, if the user specifies 21061 * DKIO_EJECTED and the current media state is inserted this 21062 * routine will immediately return DKIO_INSERTED. However, if the 21063 * current media state is not inserted the user thread will be 21064 * blocked until the drive state changes. If DKIO_NONE is specified 21065 * the user thread will block until a drive state change occurs. 21066 * 21067 * Arguments: dev - the device number 21068 * state - user pointer to a dkio_state, updated with the current 21069 * drive state at return. 21070 * 21071 * Return Code: ENXIO 21072 * EIO 21073 * EAGAIN 21074 * EINTR 21075 */ 21076 21077 static int 21078 sd_check_media(dev_t dev, enum dkio_state state) 21079 { 21080 struct sd_lun *un = NULL; 21081 enum dkio_state prev_state; 21082 opaque_t token = NULL; 21083 int rval = 0; 21084 21085 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21086 return (ENXIO); 21087 } 21088 21089 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21090 21091 mutex_enter(SD_MUTEX(un)); 21092 21093 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21094 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21095 21096 prev_state = un->un_mediastate; 21097 21098 /* is there anything to do? */ 21099 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21100 /* 21101 * submit the request to the scsi_watch service; 21102 * scsi_media_watch_cb() does the real work 21103 */ 21104 mutex_exit(SD_MUTEX(un)); 21105 21106 /* 21107 * This change handles the case where a scsi watch request is 21108 * added to a device that is powered down. To accomplish this 21109 * we power up the device before adding the scsi watch request, 21110 * since the scsi watch sends a TUR directly to the device 21111 * which the device cannot handle if it is powered down. 21112 */ 21113 if (sd_pm_entry(un) != DDI_SUCCESS) { 21114 mutex_enter(SD_MUTEX(un)); 21115 goto done; 21116 } 21117 21118 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21119 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21120 (caddr_t)dev); 21121 21122 sd_pm_exit(un); 21123 21124 mutex_enter(SD_MUTEX(un)); 21125 if (token == NULL) { 21126 rval = EAGAIN; 21127 goto done; 21128 } 21129 21130 /* 21131 * This is a special case IOCTL that doesn't return 21132 * until the media state changes. Routine sdpower 21133 * knows about and handles this so don't count it 21134 * as an active cmd in the driver, which would 21135 * keep the device busy to the pm framework. 21136 * If the count isn't decremented the device can't 21137 * be powered down. 21138 */ 21139 un->un_ncmds_in_driver--; 21140 ASSERT(un->un_ncmds_in_driver >= 0); 21141 21142 /* 21143 * if a prior request had been made, this will be the same 21144 * token, as scsi_watch was designed that way. 21145 */ 21146 un->un_swr_token = token; 21147 un->un_specified_mediastate = state; 21148 21149 /* 21150 * now wait for media change 21151 * we will not be signalled unless mediastate == state but it is 21152 * still better to test for this condition, since there is a 21153 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21154 */ 21155 SD_TRACE(SD_LOG_COMMON, un, 21156 "sd_check_media: waiting for media state change\n"); 21157 while (un->un_mediastate == state) { 21158 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21159 SD_TRACE(SD_LOG_COMMON, un, 21160 "sd_check_media: waiting for media state " 21161 "was interrupted\n"); 21162 un->un_ncmds_in_driver++; 21163 rval = EINTR; 21164 goto done; 21165 } 21166 SD_TRACE(SD_LOG_COMMON, un, 21167 "sd_check_media: received signal, state=%x\n", 21168 un->un_mediastate); 21169 } 21170 /* 21171 * Inc the counter to indicate the device once again 21172 * has an active outstanding cmd. 21173 */ 21174 un->un_ncmds_in_driver++; 21175 } 21176 21177 /* invalidate geometry */ 21178 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21179 sr_ejected(un); 21180 } 21181 21182 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21183 uint64_t capacity; 21184 uint_t lbasize; 21185 21186 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21187 mutex_exit(SD_MUTEX(un)); 21188 /* 21189 * Since the following routines use SD_PATH_DIRECT, we must 21190 * call PM directly before the upcoming disk accesses. This 21191 * may cause the disk to be power/spin up. 21192 */ 21193 21194 if (sd_pm_entry(un) == DDI_SUCCESS) { 21195 rval = sd_send_scsi_READ_CAPACITY(un, 21196 &capacity, 21197 &lbasize, SD_PATH_DIRECT); 21198 if (rval != 0) { 21199 sd_pm_exit(un); 21200 mutex_enter(SD_MUTEX(un)); 21201 goto done; 21202 } 21203 } else { 21204 rval = EIO; 21205 mutex_enter(SD_MUTEX(un)); 21206 goto done; 21207 } 21208 mutex_enter(SD_MUTEX(un)); 21209 21210 sd_update_block_info(un, lbasize, capacity); 21211 21212 /* 21213 * Check if the media in the device is writable or not 21214 */ 21215 if (ISCD(un)) 21216 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21217 21218 mutex_exit(SD_MUTEX(un)); 21219 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21220 if ((cmlb_validate(un->un_cmlbhandle, 0, 21221 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21222 sd_set_pstats(un); 21223 SD_TRACE(SD_LOG_IO_PARTITION, un, 21224 "sd_check_media: un:0x%p pstats created and " 21225 "set\n", un); 21226 } 21227 21228 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21229 SD_PATH_DIRECT); 21230 sd_pm_exit(un); 21231 21232 mutex_enter(SD_MUTEX(un)); 21233 } 21234 done: 21235 un->un_f_watcht_stopped = FALSE; 21236 if (un->un_swr_token) { 21237 /* 21238 * Use of this local token and the mutex ensures that we avoid 21239 * some race conditions associated with terminating the 21240 * scsi watch. 21241 */ 21242 token = un->un_swr_token; 21243 un->un_swr_token = (opaque_t)NULL; 21244 mutex_exit(SD_MUTEX(un)); 21245 (void) scsi_watch_request_terminate(token, 21246 SCSI_WATCH_TERMINATE_WAIT); 21247 mutex_enter(SD_MUTEX(un)); 21248 } 21249 21250 /* 21251 * Update the capacity kstat value, if no media previously 21252 * (capacity kstat is 0) and a media has been inserted 21253 * (un_f_blockcount_is_valid == TRUE) 21254 */ 21255 if (un->un_errstats) { 21256 struct sd_errstats *stp = NULL; 21257 21258 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21259 if ((stp->sd_capacity.value.ui64 == 0) && 21260 (un->un_f_blockcount_is_valid == TRUE)) { 21261 stp->sd_capacity.value.ui64 = 21262 (uint64_t)((uint64_t)un->un_blockcount * 21263 un->un_sys_blocksize); 21264 } 21265 } 21266 mutex_exit(SD_MUTEX(un)); 21267 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21268 return (rval); 21269 } 21270 21271 21272 /* 21273 * Function: sd_delayed_cv_broadcast 21274 * 21275 * Description: Delayed cv_broadcast to allow for target to recover from media 21276 * insertion. 21277 * 21278 * Arguments: arg - driver soft state (unit) structure 21279 */ 21280 21281 static void 21282 sd_delayed_cv_broadcast(void *arg) 21283 { 21284 struct sd_lun *un = arg; 21285 21286 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21287 21288 mutex_enter(SD_MUTEX(un)); 21289 un->un_dcvb_timeid = NULL; 21290 cv_broadcast(&un->un_state_cv); 21291 mutex_exit(SD_MUTEX(un)); 21292 } 21293 21294 21295 /* 21296 * Function: sd_media_watch_cb 21297 * 21298 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21299 * routine processes the TUR sense data and updates the driver 21300 * state if a transition has occurred. The user thread 21301 * (sd_check_media) is then signalled. 21302 * 21303 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21304 * among multiple watches that share this callback function 21305 * resultp - scsi watch facility result packet containing scsi 21306 * packet, status byte and sense data 21307 * 21308 * Return Code: 0 for success, -1 for failure 21309 */ 21310 21311 static int 21312 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21313 { 21314 struct sd_lun *un; 21315 struct scsi_status *statusp = resultp->statusp; 21316 uint8_t *sensep = (uint8_t *)resultp->sensep; 21317 enum dkio_state state = DKIO_NONE; 21318 dev_t dev = (dev_t)arg; 21319 uchar_t actual_sense_length; 21320 uint8_t skey, asc, ascq; 21321 21322 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21323 return (-1); 21324 } 21325 actual_sense_length = resultp->actual_sense_length; 21326 21327 mutex_enter(SD_MUTEX(un)); 21328 SD_TRACE(SD_LOG_COMMON, un, 21329 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21330 *((char *)statusp), (void *)sensep, actual_sense_length); 21331 21332 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21333 un->un_mediastate = DKIO_DEV_GONE; 21334 cv_broadcast(&un->un_state_cv); 21335 mutex_exit(SD_MUTEX(un)); 21336 21337 return (0); 21338 } 21339 21340 /* 21341 * If there was a check condition then sensep points to valid sense data 21342 * If status was not a check condition but a reservation or busy status 21343 * then the new state is DKIO_NONE 21344 */ 21345 if (sensep != NULL) { 21346 skey = scsi_sense_key(sensep); 21347 asc = scsi_sense_asc(sensep); 21348 ascq = scsi_sense_ascq(sensep); 21349 21350 SD_INFO(SD_LOG_COMMON, un, 21351 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21352 skey, asc, ascq); 21353 /* This routine only uses up to 13 bytes of sense data. */ 21354 if (actual_sense_length >= 13) { 21355 if (skey == KEY_UNIT_ATTENTION) { 21356 if (asc == 0x28) { 21357 state = DKIO_INSERTED; 21358 } 21359 } else if (skey == KEY_NOT_READY) { 21360 /* 21361 * if 02/04/02 means that the host 21362 * should send start command. Explicitly 21363 * leave the media state as is 21364 * (inserted) as the media is inserted 21365 * and host has stopped device for PM 21366 * reasons. Upon next true read/write 21367 * to this media will bring the 21368 * device to the right state good for 21369 * media access. 21370 */ 21371 if (asc == 0x3a) { 21372 state = DKIO_EJECTED; 21373 } else { 21374 /* 21375 * If the drive is busy with an 21376 * operation or long write, keep the 21377 * media in an inserted state. 21378 */ 21379 21380 if ((asc == 0x04) && 21381 ((ascq == 0x02) || 21382 (ascq == 0x07) || 21383 (ascq == 0x08))) { 21384 state = DKIO_INSERTED; 21385 } 21386 } 21387 } else if (skey == KEY_NO_SENSE) { 21388 if ((asc == 0x00) && (ascq == 0x00)) { 21389 /* 21390 * Sense Data 00/00/00 does not provide 21391 * any information about the state of 21392 * the media. Ignore it. 21393 */ 21394 mutex_exit(SD_MUTEX(un)); 21395 return (0); 21396 } 21397 } 21398 } 21399 } else if ((*((char *)statusp) == STATUS_GOOD) && 21400 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21401 state = DKIO_INSERTED; 21402 } 21403 21404 SD_TRACE(SD_LOG_COMMON, un, 21405 "sd_media_watch_cb: state=%x, specified=%x\n", 21406 state, un->un_specified_mediastate); 21407 21408 /* 21409 * now signal the waiting thread if this is *not* the specified state; 21410 * delay the signal if the state is DKIO_INSERTED to allow the target 21411 * to recover 21412 */ 21413 if (state != un->un_specified_mediastate) { 21414 un->un_mediastate = state; 21415 if (state == DKIO_INSERTED) { 21416 /* 21417 * delay the signal to give the drive a chance 21418 * to do what it apparently needs to do 21419 */ 21420 SD_TRACE(SD_LOG_COMMON, un, 21421 "sd_media_watch_cb: delayed cv_broadcast\n"); 21422 if (un->un_dcvb_timeid == NULL) { 21423 un->un_dcvb_timeid = 21424 timeout(sd_delayed_cv_broadcast, un, 21425 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21426 } 21427 } else { 21428 SD_TRACE(SD_LOG_COMMON, un, 21429 "sd_media_watch_cb: immediate cv_broadcast\n"); 21430 cv_broadcast(&un->un_state_cv); 21431 } 21432 } 21433 mutex_exit(SD_MUTEX(un)); 21434 return (0); 21435 } 21436 21437 21438 /* 21439 * Function: sd_dkio_get_temp 21440 * 21441 * Description: This routine is the driver entry point for handling ioctl 21442 * requests to get the disk temperature. 21443 * 21444 * Arguments: dev - the device number 21445 * arg - pointer to user provided dk_temperature structure. 21446 * flag - this argument is a pass through to ddi_copyxxx() 21447 * directly from the mode argument of ioctl(). 21448 * 21449 * Return Code: 0 21450 * EFAULT 21451 * ENXIO 21452 * EAGAIN 21453 */ 21454 21455 static int 21456 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21457 { 21458 struct sd_lun *un = NULL; 21459 struct dk_temperature *dktemp = NULL; 21460 uchar_t *temperature_page; 21461 int rval = 0; 21462 int path_flag = SD_PATH_STANDARD; 21463 21464 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21465 return (ENXIO); 21466 } 21467 21468 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21469 21470 /* copyin the disk temp argument to get the user flags */ 21471 if (ddi_copyin((void *)arg, dktemp, 21472 sizeof (struct dk_temperature), flag) != 0) { 21473 rval = EFAULT; 21474 goto done; 21475 } 21476 21477 /* Initialize the temperature to invalid. */ 21478 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21479 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21480 21481 /* 21482 * Note: Investigate removing the "bypass pm" semantic. 21483 * Can we just bypass PM always? 21484 */ 21485 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21486 path_flag = SD_PATH_DIRECT; 21487 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21488 mutex_enter(&un->un_pm_mutex); 21489 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21490 /* 21491 * If DKT_BYPASS_PM is set, and the drive happens to be 21492 * in low power mode, we can not wake it up, Need to 21493 * return EAGAIN. 21494 */ 21495 mutex_exit(&un->un_pm_mutex); 21496 rval = EAGAIN; 21497 goto done; 21498 } else { 21499 /* 21500 * Indicate to PM the device is busy. This is required 21501 * to avoid a race - i.e. the ioctl is issuing a 21502 * command and the pm framework brings down the device 21503 * to low power mode (possible power cut-off on some 21504 * platforms). 21505 */ 21506 mutex_exit(&un->un_pm_mutex); 21507 if (sd_pm_entry(un) != DDI_SUCCESS) { 21508 rval = EAGAIN; 21509 goto done; 21510 } 21511 } 21512 } 21513 21514 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21515 21516 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21517 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21518 goto done2; 21519 } 21520 21521 /* 21522 * For the current temperature verify that the parameter length is 0x02 21523 * and the parameter code is 0x00 21524 */ 21525 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21526 (temperature_page[5] == 0x00)) { 21527 if (temperature_page[9] == 0xFF) { 21528 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21529 } else { 21530 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21531 } 21532 } 21533 21534 /* 21535 * For the reference temperature verify that the parameter 21536 * length is 0x02 and the parameter code is 0x01 21537 */ 21538 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21539 (temperature_page[11] == 0x01)) { 21540 if (temperature_page[15] == 0xFF) { 21541 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21542 } else { 21543 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21544 } 21545 } 21546 21547 /* Do the copyout regardless of the temperature commands status. */ 21548 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21549 flag) != 0) { 21550 rval = EFAULT; 21551 } 21552 21553 done2: 21554 if (path_flag == SD_PATH_DIRECT) { 21555 sd_pm_exit(un); 21556 } 21557 21558 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21559 done: 21560 if (dktemp != NULL) { 21561 kmem_free(dktemp, sizeof (struct dk_temperature)); 21562 } 21563 21564 return (rval); 21565 } 21566 21567 21568 /* 21569 * Function: sd_log_page_supported 21570 * 21571 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21572 * supported log pages. 21573 * 21574 * Arguments: un - 21575 * log_page - 21576 * 21577 * Return Code: -1 - on error (log sense is optional and may not be supported). 21578 * 0 - log page not found. 21579 * 1 - log page found. 21580 */ 21581 21582 static int 21583 sd_log_page_supported(struct sd_lun *un, int log_page) 21584 { 21585 uchar_t *log_page_data; 21586 int i; 21587 int match = 0; 21588 int log_size; 21589 21590 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21591 21592 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21593 SD_PATH_DIRECT) != 0) { 21594 SD_ERROR(SD_LOG_COMMON, un, 21595 "sd_log_page_supported: failed log page retrieval\n"); 21596 kmem_free(log_page_data, 0xFF); 21597 return (-1); 21598 } 21599 log_size = log_page_data[3]; 21600 21601 /* 21602 * The list of supported log pages start from the fourth byte. Check 21603 * until we run out of log pages or a match is found. 21604 */ 21605 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21606 if (log_page_data[i] == log_page) { 21607 match++; 21608 } 21609 } 21610 kmem_free(log_page_data, 0xFF); 21611 return (match); 21612 } 21613 21614 21615 /* 21616 * Function: sd_mhdioc_failfast 21617 * 21618 * Description: This routine is the driver entry point for handling ioctl 21619 * requests to enable/disable the multihost failfast option. 21620 * (MHIOCENFAILFAST) 21621 * 21622 * Arguments: dev - the device number 21623 * arg - user specified probing interval. 21624 * flag - this argument is a pass through to ddi_copyxxx() 21625 * directly from the mode argument of ioctl(). 21626 * 21627 * Return Code: 0 21628 * EFAULT 21629 * ENXIO 21630 */ 21631 21632 static int 21633 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21634 { 21635 struct sd_lun *un = NULL; 21636 int mh_time; 21637 int rval = 0; 21638 21639 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21640 return (ENXIO); 21641 } 21642 21643 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21644 return (EFAULT); 21645 21646 if (mh_time) { 21647 mutex_enter(SD_MUTEX(un)); 21648 un->un_resvd_status |= SD_FAILFAST; 21649 mutex_exit(SD_MUTEX(un)); 21650 /* 21651 * If mh_time is INT_MAX, then this ioctl is being used for 21652 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21653 */ 21654 if (mh_time != INT_MAX) { 21655 rval = sd_check_mhd(dev, mh_time); 21656 } 21657 } else { 21658 (void) sd_check_mhd(dev, 0); 21659 mutex_enter(SD_MUTEX(un)); 21660 un->un_resvd_status &= ~SD_FAILFAST; 21661 mutex_exit(SD_MUTEX(un)); 21662 } 21663 return (rval); 21664 } 21665 21666 21667 /* 21668 * Function: sd_mhdioc_takeown 21669 * 21670 * Description: This routine is the driver entry point for handling ioctl 21671 * requests to forcefully acquire exclusive access rights to the 21672 * multihost disk (MHIOCTKOWN). 21673 * 21674 * Arguments: dev - the device number 21675 * arg - user provided structure specifying the delay 21676 * parameters in milliseconds 21677 * flag - this argument is a pass through to ddi_copyxxx() 21678 * directly from the mode argument of ioctl(). 21679 * 21680 * Return Code: 0 21681 * EFAULT 21682 * ENXIO 21683 */ 21684 21685 static int 21686 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21687 { 21688 struct sd_lun *un = NULL; 21689 struct mhioctkown *tkown = NULL; 21690 int rval = 0; 21691 21692 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21693 return (ENXIO); 21694 } 21695 21696 if (arg != NULL) { 21697 tkown = (struct mhioctkown *) 21698 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21699 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21700 if (rval != 0) { 21701 rval = EFAULT; 21702 goto error; 21703 } 21704 } 21705 21706 rval = sd_take_ownership(dev, tkown); 21707 mutex_enter(SD_MUTEX(un)); 21708 if (rval == 0) { 21709 un->un_resvd_status |= SD_RESERVE; 21710 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21711 sd_reinstate_resv_delay = 21712 tkown->reinstate_resv_delay * 1000; 21713 } else { 21714 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21715 } 21716 /* 21717 * Give the scsi_watch routine interval set by 21718 * the MHIOCENFAILFAST ioctl precedence here. 21719 */ 21720 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21721 mutex_exit(SD_MUTEX(un)); 21722 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21723 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21724 "sd_mhdioc_takeown : %d\n", 21725 sd_reinstate_resv_delay); 21726 } else { 21727 mutex_exit(SD_MUTEX(un)); 21728 } 21729 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21730 sd_mhd_reset_notify_cb, (caddr_t)un); 21731 } else { 21732 un->un_resvd_status &= ~SD_RESERVE; 21733 mutex_exit(SD_MUTEX(un)); 21734 } 21735 21736 error: 21737 if (tkown != NULL) { 21738 kmem_free(tkown, sizeof (struct mhioctkown)); 21739 } 21740 return (rval); 21741 } 21742 21743 21744 /* 21745 * Function: sd_mhdioc_release 21746 * 21747 * Description: This routine is the driver entry point for handling ioctl 21748 * requests to release exclusive access rights to the multihost 21749 * disk (MHIOCRELEASE). 21750 * 21751 * Arguments: dev - the device number 21752 * 21753 * Return Code: 0 21754 * ENXIO 21755 */ 21756 21757 static int 21758 sd_mhdioc_release(dev_t dev) 21759 { 21760 struct sd_lun *un = NULL; 21761 timeout_id_t resvd_timeid_save; 21762 int resvd_status_save; 21763 int rval = 0; 21764 21765 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21766 return (ENXIO); 21767 } 21768 21769 mutex_enter(SD_MUTEX(un)); 21770 resvd_status_save = un->un_resvd_status; 21771 un->un_resvd_status &= 21772 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21773 if (un->un_resvd_timeid) { 21774 resvd_timeid_save = un->un_resvd_timeid; 21775 un->un_resvd_timeid = NULL; 21776 mutex_exit(SD_MUTEX(un)); 21777 (void) untimeout(resvd_timeid_save); 21778 } else { 21779 mutex_exit(SD_MUTEX(un)); 21780 } 21781 21782 /* 21783 * destroy any pending timeout thread that may be attempting to 21784 * reinstate reservation on this device. 21785 */ 21786 sd_rmv_resv_reclaim_req(dev); 21787 21788 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21789 mutex_enter(SD_MUTEX(un)); 21790 if ((un->un_mhd_token) && 21791 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21792 mutex_exit(SD_MUTEX(un)); 21793 (void) sd_check_mhd(dev, 0); 21794 } else { 21795 mutex_exit(SD_MUTEX(un)); 21796 } 21797 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21798 sd_mhd_reset_notify_cb, (caddr_t)un); 21799 } else { 21800 /* 21801 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21802 */ 21803 mutex_enter(SD_MUTEX(un)); 21804 un->un_resvd_status = resvd_status_save; 21805 mutex_exit(SD_MUTEX(un)); 21806 } 21807 return (rval); 21808 } 21809 21810 21811 /* 21812 * Function: sd_mhdioc_register_devid 21813 * 21814 * Description: This routine is the driver entry point for handling ioctl 21815 * requests to register the device id (MHIOCREREGISTERDEVID). 21816 * 21817 * Note: The implementation for this ioctl has been updated to 21818 * be consistent with the original PSARC case (1999/357) 21819 * (4375899, 4241671, 4220005) 21820 * 21821 * Arguments: dev - the device number 21822 * 21823 * Return Code: 0 21824 * ENXIO 21825 */ 21826 21827 static int 21828 sd_mhdioc_register_devid(dev_t dev) 21829 { 21830 struct sd_lun *un = NULL; 21831 int rval = 0; 21832 21833 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21834 return (ENXIO); 21835 } 21836 21837 ASSERT(!mutex_owned(SD_MUTEX(un))); 21838 21839 mutex_enter(SD_MUTEX(un)); 21840 21841 /* If a devid already exists, de-register it */ 21842 if (un->un_devid != NULL) { 21843 ddi_devid_unregister(SD_DEVINFO(un)); 21844 /* 21845 * After unregister devid, needs to free devid memory 21846 */ 21847 ddi_devid_free(un->un_devid); 21848 un->un_devid = NULL; 21849 } 21850 21851 /* Check for reservation conflict */ 21852 mutex_exit(SD_MUTEX(un)); 21853 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21854 mutex_enter(SD_MUTEX(un)); 21855 21856 switch (rval) { 21857 case 0: 21858 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21859 break; 21860 case EACCES: 21861 break; 21862 default: 21863 rval = EIO; 21864 } 21865 21866 mutex_exit(SD_MUTEX(un)); 21867 return (rval); 21868 } 21869 21870 21871 /* 21872 * Function: sd_mhdioc_inkeys 21873 * 21874 * Description: This routine is the driver entry point for handling ioctl 21875 * requests to issue the SCSI-3 Persistent In Read Keys command 21876 * to the device (MHIOCGRP_INKEYS). 21877 * 21878 * Arguments: dev - the device number 21879 * arg - user provided in_keys structure 21880 * flag - this argument is a pass through to ddi_copyxxx() 21881 * directly from the mode argument of ioctl(). 21882 * 21883 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21884 * ENXIO 21885 * EFAULT 21886 */ 21887 21888 static int 21889 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21890 { 21891 struct sd_lun *un; 21892 mhioc_inkeys_t inkeys; 21893 int rval = 0; 21894 21895 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21896 return (ENXIO); 21897 } 21898 21899 #ifdef _MULTI_DATAMODEL 21900 switch (ddi_model_convert_from(flag & FMODELS)) { 21901 case DDI_MODEL_ILP32: { 21902 struct mhioc_inkeys32 inkeys32; 21903 21904 if (ddi_copyin(arg, &inkeys32, 21905 sizeof (struct mhioc_inkeys32), flag) != 0) { 21906 return (EFAULT); 21907 } 21908 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21909 if ((rval = sd_persistent_reservation_in_read_keys(un, 21910 &inkeys, flag)) != 0) { 21911 return (rval); 21912 } 21913 inkeys32.generation = inkeys.generation; 21914 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21915 flag) != 0) { 21916 return (EFAULT); 21917 } 21918 break; 21919 } 21920 case DDI_MODEL_NONE: 21921 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21922 flag) != 0) { 21923 return (EFAULT); 21924 } 21925 if ((rval = sd_persistent_reservation_in_read_keys(un, 21926 &inkeys, flag)) != 0) { 21927 return (rval); 21928 } 21929 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21930 flag) != 0) { 21931 return (EFAULT); 21932 } 21933 break; 21934 } 21935 21936 #else /* ! _MULTI_DATAMODEL */ 21937 21938 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21939 return (EFAULT); 21940 } 21941 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21942 if (rval != 0) { 21943 return (rval); 21944 } 21945 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21946 return (EFAULT); 21947 } 21948 21949 #endif /* _MULTI_DATAMODEL */ 21950 21951 return (rval); 21952 } 21953 21954 21955 /* 21956 * Function: sd_mhdioc_inresv 21957 * 21958 * Description: This routine is the driver entry point for handling ioctl 21959 * requests to issue the SCSI-3 Persistent In Read Reservations 21960 * command to the device (MHIOCGRP_INKEYS). 21961 * 21962 * Arguments: dev - the device number 21963 * arg - user provided in_resv structure 21964 * flag - this argument is a pass through to ddi_copyxxx() 21965 * directly from the mode argument of ioctl(). 21966 * 21967 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21968 * ENXIO 21969 * EFAULT 21970 */ 21971 21972 static int 21973 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21974 { 21975 struct sd_lun *un; 21976 mhioc_inresvs_t inresvs; 21977 int rval = 0; 21978 21979 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21980 return (ENXIO); 21981 } 21982 21983 #ifdef _MULTI_DATAMODEL 21984 21985 switch (ddi_model_convert_from(flag & FMODELS)) { 21986 case DDI_MODEL_ILP32: { 21987 struct mhioc_inresvs32 inresvs32; 21988 21989 if (ddi_copyin(arg, &inresvs32, 21990 sizeof (struct mhioc_inresvs32), flag) != 0) { 21991 return (EFAULT); 21992 } 21993 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21994 if ((rval = sd_persistent_reservation_in_read_resv(un, 21995 &inresvs, flag)) != 0) { 21996 return (rval); 21997 } 21998 inresvs32.generation = inresvs.generation; 21999 if (ddi_copyout(&inresvs32, arg, 22000 sizeof (struct mhioc_inresvs32), flag) != 0) { 22001 return (EFAULT); 22002 } 22003 break; 22004 } 22005 case DDI_MODEL_NONE: 22006 if (ddi_copyin(arg, &inresvs, 22007 sizeof (mhioc_inresvs_t), flag) != 0) { 22008 return (EFAULT); 22009 } 22010 if ((rval = sd_persistent_reservation_in_read_resv(un, 22011 &inresvs, flag)) != 0) { 22012 return (rval); 22013 } 22014 if (ddi_copyout(&inresvs, arg, 22015 sizeof (mhioc_inresvs_t), flag) != 0) { 22016 return (EFAULT); 22017 } 22018 break; 22019 } 22020 22021 #else /* ! _MULTI_DATAMODEL */ 22022 22023 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22024 return (EFAULT); 22025 } 22026 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22027 if (rval != 0) { 22028 return (rval); 22029 } 22030 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22031 return (EFAULT); 22032 } 22033 22034 #endif /* ! _MULTI_DATAMODEL */ 22035 22036 return (rval); 22037 } 22038 22039 22040 /* 22041 * The following routines support the clustering functionality described below 22042 * and implement lost reservation reclaim functionality. 22043 * 22044 * Clustering 22045 * ---------- 22046 * The clustering code uses two different, independent forms of SCSI 22047 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22048 * Persistent Group Reservations. For any particular disk, it will use either 22049 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22050 * 22051 * SCSI-2 22052 * The cluster software takes ownership of a multi-hosted disk by issuing the 22053 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22054 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22055 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22056 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22057 * driver. The meaning of failfast is that if the driver (on this host) ever 22058 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22059 * it should immediately panic the host. The motivation for this ioctl is that 22060 * if this host does encounter reservation conflict, the underlying cause is 22061 * that some other host of the cluster has decided that this host is no longer 22062 * in the cluster and has seized control of the disks for itself. Since this 22063 * host is no longer in the cluster, it ought to panic itself. The 22064 * MHIOCENFAILFAST ioctl does two things: 22065 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22066 * error to panic the host 22067 * (b) it sets up a periodic timer to test whether this host still has 22068 * "access" (in that no other host has reserved the device): if the 22069 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22070 * purpose of that periodic timer is to handle scenarios where the host is 22071 * otherwise temporarily quiescent, temporarily doing no real i/o. 22072 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22073 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22074 * the device itself. 22075 * 22076 * SCSI-3 PGR 22077 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22078 * facility is supported through the shared multihost disk ioctls 22079 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22080 * MHIOCGRP_PREEMPTANDABORT) 22081 * 22082 * Reservation Reclaim: 22083 * -------------------- 22084 * To support the lost reservation reclaim operations this driver creates a 22085 * single thread to handle reinstating reservations on all devices that have 22086 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22087 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22088 * and the reservation reclaim thread loops through the requests to regain the 22089 * lost reservations. 22090 */ 22091 22092 /* 22093 * Function: sd_check_mhd() 22094 * 22095 * Description: This function sets up and submits a scsi watch request or 22096 * terminates an existing watch request. This routine is used in 22097 * support of reservation reclaim. 22098 * 22099 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22100 * among multiple watches that share the callback function 22101 * interval - the number of microseconds specifying the watch 22102 * interval for issuing TEST UNIT READY commands. If 22103 * set to 0 the watch should be terminated. If the 22104 * interval is set to 0 and if the device is required 22105 * to hold reservation while disabling failfast, the 22106 * watch is restarted with an interval of 22107 * reinstate_resv_delay. 22108 * 22109 * Return Code: 0 - Successful submit/terminate of scsi watch request 22110 * ENXIO - Indicates an invalid device was specified 22111 * EAGAIN - Unable to submit the scsi watch request 22112 */ 22113 22114 static int 22115 sd_check_mhd(dev_t dev, int interval) 22116 { 22117 struct sd_lun *un; 22118 opaque_t token; 22119 22120 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22121 return (ENXIO); 22122 } 22123 22124 /* is this a watch termination request? */ 22125 if (interval == 0) { 22126 mutex_enter(SD_MUTEX(un)); 22127 /* if there is an existing watch task then terminate it */ 22128 if (un->un_mhd_token) { 22129 token = un->un_mhd_token; 22130 un->un_mhd_token = NULL; 22131 mutex_exit(SD_MUTEX(un)); 22132 (void) scsi_watch_request_terminate(token, 22133 SCSI_WATCH_TERMINATE_WAIT); 22134 mutex_enter(SD_MUTEX(un)); 22135 } else { 22136 mutex_exit(SD_MUTEX(un)); 22137 /* 22138 * Note: If we return here we don't check for the 22139 * failfast case. This is the original legacy 22140 * implementation but perhaps we should be checking 22141 * the failfast case. 22142 */ 22143 return (0); 22144 } 22145 /* 22146 * If the device is required to hold reservation while 22147 * disabling failfast, we need to restart the scsi_watch 22148 * routine with an interval of reinstate_resv_delay. 22149 */ 22150 if (un->un_resvd_status & SD_RESERVE) { 22151 interval = sd_reinstate_resv_delay/1000; 22152 } else { 22153 /* no failfast so bail */ 22154 mutex_exit(SD_MUTEX(un)); 22155 return (0); 22156 } 22157 mutex_exit(SD_MUTEX(un)); 22158 } 22159 22160 /* 22161 * adjust minimum time interval to 1 second, 22162 * and convert from msecs to usecs 22163 */ 22164 if (interval > 0 && interval < 1000) { 22165 interval = 1000; 22166 } 22167 interval *= 1000; 22168 22169 /* 22170 * submit the request to the scsi_watch service 22171 */ 22172 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22173 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22174 if (token == NULL) { 22175 return (EAGAIN); 22176 } 22177 22178 /* 22179 * save token for termination later on 22180 */ 22181 mutex_enter(SD_MUTEX(un)); 22182 un->un_mhd_token = token; 22183 mutex_exit(SD_MUTEX(un)); 22184 return (0); 22185 } 22186 22187 22188 /* 22189 * Function: sd_mhd_watch_cb() 22190 * 22191 * Description: This function is the call back function used by the scsi watch 22192 * facility. The scsi watch facility sends the "Test Unit Ready" 22193 * and processes the status. If applicable (i.e. a "Unit Attention" 22194 * status and automatic "Request Sense" not used) the scsi watch 22195 * facility will send a "Request Sense" and retrieve the sense data 22196 * to be passed to this callback function. In either case the 22197 * automatic "Request Sense" or the facility submitting one, this 22198 * callback is passed the status and sense data. 22199 * 22200 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22201 * among multiple watches that share this callback function 22202 * resultp - scsi watch facility result packet containing scsi 22203 * packet, status byte and sense data 22204 * 22205 * Return Code: 0 - continue the watch task 22206 * non-zero - terminate the watch task 22207 */ 22208 22209 static int 22210 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22211 { 22212 struct sd_lun *un; 22213 struct scsi_status *statusp; 22214 uint8_t *sensep; 22215 struct scsi_pkt *pkt; 22216 uchar_t actual_sense_length; 22217 dev_t dev = (dev_t)arg; 22218 22219 ASSERT(resultp != NULL); 22220 statusp = resultp->statusp; 22221 sensep = (uint8_t *)resultp->sensep; 22222 pkt = resultp->pkt; 22223 actual_sense_length = resultp->actual_sense_length; 22224 22225 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22226 return (ENXIO); 22227 } 22228 22229 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22230 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22231 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22232 22233 /* Begin processing of the status and/or sense data */ 22234 if (pkt->pkt_reason != CMD_CMPLT) { 22235 /* Handle the incomplete packet */ 22236 sd_mhd_watch_incomplete(un, pkt); 22237 return (0); 22238 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22239 if (*((unsigned char *)statusp) 22240 == STATUS_RESERVATION_CONFLICT) { 22241 /* 22242 * Handle a reservation conflict by panicking if 22243 * configured for failfast or by logging the conflict 22244 * and updating the reservation status 22245 */ 22246 mutex_enter(SD_MUTEX(un)); 22247 if ((un->un_resvd_status & SD_FAILFAST) && 22248 (sd_failfast_enable)) { 22249 sd_panic_for_res_conflict(un); 22250 /*NOTREACHED*/ 22251 } 22252 SD_INFO(SD_LOG_IOCTL_MHD, un, 22253 "sd_mhd_watch_cb: Reservation Conflict\n"); 22254 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22255 mutex_exit(SD_MUTEX(un)); 22256 } 22257 } 22258 22259 if (sensep != NULL) { 22260 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22261 mutex_enter(SD_MUTEX(un)); 22262 if ((scsi_sense_asc(sensep) == 22263 SD_SCSI_RESET_SENSE_CODE) && 22264 (un->un_resvd_status & SD_RESERVE)) { 22265 /* 22266 * The additional sense code indicates a power 22267 * on or bus device reset has occurred; update 22268 * the reservation status. 22269 */ 22270 un->un_resvd_status |= 22271 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22272 SD_INFO(SD_LOG_IOCTL_MHD, un, 22273 "sd_mhd_watch_cb: Lost Reservation\n"); 22274 } 22275 } else { 22276 return (0); 22277 } 22278 } else { 22279 mutex_enter(SD_MUTEX(un)); 22280 } 22281 22282 if ((un->un_resvd_status & SD_RESERVE) && 22283 (un->un_resvd_status & SD_LOST_RESERVE)) { 22284 if (un->un_resvd_status & SD_WANT_RESERVE) { 22285 /* 22286 * A reset occurred in between the last probe and this 22287 * one so if a timeout is pending cancel it. 22288 */ 22289 if (un->un_resvd_timeid) { 22290 timeout_id_t temp_id = un->un_resvd_timeid; 22291 un->un_resvd_timeid = NULL; 22292 mutex_exit(SD_MUTEX(un)); 22293 (void) untimeout(temp_id); 22294 mutex_enter(SD_MUTEX(un)); 22295 } 22296 un->un_resvd_status &= ~SD_WANT_RESERVE; 22297 } 22298 if (un->un_resvd_timeid == 0) { 22299 /* Schedule a timeout to handle the lost reservation */ 22300 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22301 (void *)dev, 22302 drv_usectohz(sd_reinstate_resv_delay)); 22303 } 22304 } 22305 mutex_exit(SD_MUTEX(un)); 22306 return (0); 22307 } 22308 22309 22310 /* 22311 * Function: sd_mhd_watch_incomplete() 22312 * 22313 * Description: This function is used to find out why a scsi pkt sent by the 22314 * scsi watch facility was not completed. Under some scenarios this 22315 * routine will return. Otherwise it will send a bus reset to see 22316 * if the drive is still online. 22317 * 22318 * Arguments: un - driver soft state (unit) structure 22319 * pkt - incomplete scsi pkt 22320 */ 22321 22322 static void 22323 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22324 { 22325 int be_chatty; 22326 int perr; 22327 22328 ASSERT(pkt != NULL); 22329 ASSERT(un != NULL); 22330 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22331 perr = (pkt->pkt_statistics & STAT_PERR); 22332 22333 mutex_enter(SD_MUTEX(un)); 22334 if (un->un_state == SD_STATE_DUMPING) { 22335 mutex_exit(SD_MUTEX(un)); 22336 return; 22337 } 22338 22339 switch (pkt->pkt_reason) { 22340 case CMD_UNX_BUS_FREE: 22341 /* 22342 * If we had a parity error that caused the target to drop BSY*, 22343 * don't be chatty about it. 22344 */ 22345 if (perr && be_chatty) { 22346 be_chatty = 0; 22347 } 22348 break; 22349 case CMD_TAG_REJECT: 22350 /* 22351 * The SCSI-2 spec states that a tag reject will be sent by the 22352 * target if tagged queuing is not supported. A tag reject may 22353 * also be sent during certain initialization periods or to 22354 * control internal resources. For the latter case the target 22355 * may also return Queue Full. 22356 * 22357 * If this driver receives a tag reject from a target that is 22358 * going through an init period or controlling internal 22359 * resources tagged queuing will be disabled. This is a less 22360 * than optimal behavior but the driver is unable to determine 22361 * the target state and assumes tagged queueing is not supported 22362 */ 22363 pkt->pkt_flags = 0; 22364 un->un_tagflags = 0; 22365 22366 if (un->un_f_opt_queueing == TRUE) { 22367 un->un_throttle = min(un->un_throttle, 3); 22368 } else { 22369 un->un_throttle = 1; 22370 } 22371 mutex_exit(SD_MUTEX(un)); 22372 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22373 mutex_enter(SD_MUTEX(un)); 22374 break; 22375 case CMD_INCOMPLETE: 22376 /* 22377 * The transport stopped with an abnormal state, fallthrough and 22378 * reset the target and/or bus unless selection did not complete 22379 * (indicated by STATE_GOT_BUS) in which case we don't want to 22380 * go through a target/bus reset 22381 */ 22382 if (pkt->pkt_state == STATE_GOT_BUS) { 22383 break; 22384 } 22385 /*FALLTHROUGH*/ 22386 22387 case CMD_TIMEOUT: 22388 default: 22389 /* 22390 * The lun may still be running the command, so a lun reset 22391 * should be attempted. If the lun reset fails or cannot be 22392 * issued, than try a target reset. Lastly try a bus reset. 22393 */ 22394 if ((pkt->pkt_statistics & 22395 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22396 int reset_retval = 0; 22397 mutex_exit(SD_MUTEX(un)); 22398 if (un->un_f_allow_bus_device_reset == TRUE) { 22399 if (un->un_f_lun_reset_enabled == TRUE) { 22400 reset_retval = 22401 scsi_reset(SD_ADDRESS(un), 22402 RESET_LUN); 22403 } 22404 if (reset_retval == 0) { 22405 reset_retval = 22406 scsi_reset(SD_ADDRESS(un), 22407 RESET_TARGET); 22408 } 22409 } 22410 if (reset_retval == 0) { 22411 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22412 } 22413 mutex_enter(SD_MUTEX(un)); 22414 } 22415 break; 22416 } 22417 22418 /* A device/bus reset has occurred; update the reservation status. */ 22419 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22420 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22421 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22422 un->un_resvd_status |= 22423 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22424 SD_INFO(SD_LOG_IOCTL_MHD, un, 22425 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22426 } 22427 } 22428 22429 /* 22430 * The disk has been turned off; Update the device state. 22431 * 22432 * Note: Should we be offlining the disk here? 22433 */ 22434 if (pkt->pkt_state == STATE_GOT_BUS) { 22435 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22436 "Disk not responding to selection\n"); 22437 if (un->un_state != SD_STATE_OFFLINE) { 22438 New_state(un, SD_STATE_OFFLINE); 22439 } 22440 } else if (be_chatty) { 22441 /* 22442 * suppress messages if they are all the same pkt reason; 22443 * with TQ, many (up to 256) are returned with the same 22444 * pkt_reason 22445 */ 22446 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22447 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22448 "sd_mhd_watch_incomplete: " 22449 "SCSI transport failed: reason '%s'\n", 22450 scsi_rname(pkt->pkt_reason)); 22451 } 22452 } 22453 un->un_last_pkt_reason = pkt->pkt_reason; 22454 mutex_exit(SD_MUTEX(un)); 22455 } 22456 22457 22458 /* 22459 * Function: sd_sname() 22460 * 22461 * Description: This is a simple little routine to return a string containing 22462 * a printable description of command status byte for use in 22463 * logging. 22464 * 22465 * Arguments: status - pointer to a status byte 22466 * 22467 * Return Code: char * - string containing status description. 22468 */ 22469 22470 static char * 22471 sd_sname(uchar_t status) 22472 { 22473 switch (status & STATUS_MASK) { 22474 case STATUS_GOOD: 22475 return ("good status"); 22476 case STATUS_CHECK: 22477 return ("check condition"); 22478 case STATUS_MET: 22479 return ("condition met"); 22480 case STATUS_BUSY: 22481 return ("busy"); 22482 case STATUS_INTERMEDIATE: 22483 return ("intermediate"); 22484 case STATUS_INTERMEDIATE_MET: 22485 return ("intermediate - condition met"); 22486 case STATUS_RESERVATION_CONFLICT: 22487 return ("reservation_conflict"); 22488 case STATUS_TERMINATED: 22489 return ("command terminated"); 22490 case STATUS_QFULL: 22491 return ("queue full"); 22492 default: 22493 return ("<unknown status>"); 22494 } 22495 } 22496 22497 22498 /* 22499 * Function: sd_mhd_resvd_recover() 22500 * 22501 * Description: This function adds a reservation entry to the 22502 * sd_resv_reclaim_request list and signals the reservation 22503 * reclaim thread that there is work pending. If the reservation 22504 * reclaim thread has not been previously created this function 22505 * will kick it off. 22506 * 22507 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22508 * among multiple watches that share this callback function 22509 * 22510 * Context: This routine is called by timeout() and is run in interrupt 22511 * context. It must not sleep or call other functions which may 22512 * sleep. 22513 */ 22514 22515 static void 22516 sd_mhd_resvd_recover(void *arg) 22517 { 22518 dev_t dev = (dev_t)arg; 22519 struct sd_lun *un; 22520 struct sd_thr_request *sd_treq = NULL; 22521 struct sd_thr_request *sd_cur = NULL; 22522 struct sd_thr_request *sd_prev = NULL; 22523 int already_there = 0; 22524 22525 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22526 return; 22527 } 22528 22529 mutex_enter(SD_MUTEX(un)); 22530 un->un_resvd_timeid = NULL; 22531 if (un->un_resvd_status & SD_WANT_RESERVE) { 22532 /* 22533 * There was a reset so don't issue the reserve, allow the 22534 * sd_mhd_watch_cb callback function to notice this and 22535 * reschedule the timeout for reservation. 22536 */ 22537 mutex_exit(SD_MUTEX(un)); 22538 return; 22539 } 22540 mutex_exit(SD_MUTEX(un)); 22541 22542 /* 22543 * Add this device to the sd_resv_reclaim_request list and the 22544 * sd_resv_reclaim_thread should take care of the rest. 22545 * 22546 * Note: We can't sleep in this context so if the memory allocation 22547 * fails allow the sd_mhd_watch_cb callback function to notice this and 22548 * reschedule the timeout for reservation. (4378460) 22549 */ 22550 sd_treq = (struct sd_thr_request *) 22551 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22552 if (sd_treq == NULL) { 22553 return; 22554 } 22555 22556 sd_treq->sd_thr_req_next = NULL; 22557 sd_treq->dev = dev; 22558 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22559 if (sd_tr.srq_thr_req_head == NULL) { 22560 sd_tr.srq_thr_req_head = sd_treq; 22561 } else { 22562 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22563 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22564 if (sd_cur->dev == dev) { 22565 /* 22566 * already in Queue so don't log 22567 * another request for the device 22568 */ 22569 already_there = 1; 22570 break; 22571 } 22572 sd_prev = sd_cur; 22573 } 22574 if (!already_there) { 22575 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22576 "logging request for %lx\n", dev); 22577 sd_prev->sd_thr_req_next = sd_treq; 22578 } else { 22579 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22580 } 22581 } 22582 22583 /* 22584 * Create a kernel thread to do the reservation reclaim and free up this 22585 * thread. We cannot block this thread while we go away to do the 22586 * reservation reclaim 22587 */ 22588 if (sd_tr.srq_resv_reclaim_thread == NULL) 22589 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22590 sd_resv_reclaim_thread, NULL, 22591 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22592 22593 /* Tell the reservation reclaim thread that it has work to do */ 22594 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22595 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22596 } 22597 22598 /* 22599 * Function: sd_resv_reclaim_thread() 22600 * 22601 * Description: This function implements the reservation reclaim operations 22602 * 22603 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22604 * among multiple watches that share this callback function 22605 */ 22606 22607 static void 22608 sd_resv_reclaim_thread() 22609 { 22610 struct sd_lun *un; 22611 struct sd_thr_request *sd_mhreq; 22612 22613 /* Wait for work */ 22614 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22615 if (sd_tr.srq_thr_req_head == NULL) { 22616 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22617 &sd_tr.srq_resv_reclaim_mutex); 22618 } 22619 22620 /* Loop while we have work */ 22621 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22622 un = ddi_get_soft_state(sd_state, 22623 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22624 if (un == NULL) { 22625 /* 22626 * softstate structure is NULL so just 22627 * dequeue the request and continue 22628 */ 22629 sd_tr.srq_thr_req_head = 22630 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22631 kmem_free(sd_tr.srq_thr_cur_req, 22632 sizeof (struct sd_thr_request)); 22633 continue; 22634 } 22635 22636 /* dequeue the request */ 22637 sd_mhreq = sd_tr.srq_thr_cur_req; 22638 sd_tr.srq_thr_req_head = 22639 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22640 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22641 22642 /* 22643 * Reclaim reservation only if SD_RESERVE is still set. There 22644 * may have been a call to MHIOCRELEASE before we got here. 22645 */ 22646 mutex_enter(SD_MUTEX(un)); 22647 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22648 /* 22649 * Note: The SD_LOST_RESERVE flag is cleared before 22650 * reclaiming the reservation. If this is done after the 22651 * call to sd_reserve_release a reservation loss in the 22652 * window between pkt completion of reserve cmd and 22653 * mutex_enter below may not be recognized 22654 */ 22655 un->un_resvd_status &= ~SD_LOST_RESERVE; 22656 mutex_exit(SD_MUTEX(un)); 22657 22658 if (sd_reserve_release(sd_mhreq->dev, 22659 SD_RESERVE) == 0) { 22660 mutex_enter(SD_MUTEX(un)); 22661 un->un_resvd_status |= SD_RESERVE; 22662 mutex_exit(SD_MUTEX(un)); 22663 SD_INFO(SD_LOG_IOCTL_MHD, un, 22664 "sd_resv_reclaim_thread: " 22665 "Reservation Recovered\n"); 22666 } else { 22667 mutex_enter(SD_MUTEX(un)); 22668 un->un_resvd_status |= SD_LOST_RESERVE; 22669 mutex_exit(SD_MUTEX(un)); 22670 SD_INFO(SD_LOG_IOCTL_MHD, un, 22671 "sd_resv_reclaim_thread: Failed " 22672 "Reservation Recovery\n"); 22673 } 22674 } else { 22675 mutex_exit(SD_MUTEX(un)); 22676 } 22677 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22678 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22679 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22680 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22681 /* 22682 * wakeup the destroy thread if anyone is waiting on 22683 * us to complete. 22684 */ 22685 cv_signal(&sd_tr.srq_inprocess_cv); 22686 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22687 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22688 } 22689 22690 /* 22691 * cleanup the sd_tr structure now that this thread will not exist 22692 */ 22693 ASSERT(sd_tr.srq_thr_req_head == NULL); 22694 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22695 sd_tr.srq_resv_reclaim_thread = NULL; 22696 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22697 thread_exit(); 22698 } 22699 22700 22701 /* 22702 * Function: sd_rmv_resv_reclaim_req() 22703 * 22704 * Description: This function removes any pending reservation reclaim requests 22705 * for the specified device. 22706 * 22707 * Arguments: dev - the device 'dev_t' 22708 */ 22709 22710 static void 22711 sd_rmv_resv_reclaim_req(dev_t dev) 22712 { 22713 struct sd_thr_request *sd_mhreq; 22714 struct sd_thr_request *sd_prev; 22715 22716 /* Remove a reservation reclaim request from the list */ 22717 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22718 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22719 /* 22720 * We are attempting to reinstate reservation for 22721 * this device. We wait for sd_reserve_release() 22722 * to return before we return. 22723 */ 22724 cv_wait(&sd_tr.srq_inprocess_cv, 22725 &sd_tr.srq_resv_reclaim_mutex); 22726 } else { 22727 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22728 if (sd_mhreq && sd_mhreq->dev == dev) { 22729 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22730 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22731 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22732 return; 22733 } 22734 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22735 if (sd_mhreq && sd_mhreq->dev == dev) { 22736 break; 22737 } 22738 sd_prev = sd_mhreq; 22739 } 22740 if (sd_mhreq != NULL) { 22741 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22742 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22743 } 22744 } 22745 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22746 } 22747 22748 22749 /* 22750 * Function: sd_mhd_reset_notify_cb() 22751 * 22752 * Description: This is a call back function for scsi_reset_notify. This 22753 * function updates the softstate reserved status and logs the 22754 * reset. The driver scsi watch facility callback function 22755 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22756 * will reclaim the reservation. 22757 * 22758 * Arguments: arg - driver soft state (unit) structure 22759 */ 22760 22761 static void 22762 sd_mhd_reset_notify_cb(caddr_t arg) 22763 { 22764 struct sd_lun *un = (struct sd_lun *)arg; 22765 22766 mutex_enter(SD_MUTEX(un)); 22767 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22768 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22769 SD_INFO(SD_LOG_IOCTL_MHD, un, 22770 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22771 } 22772 mutex_exit(SD_MUTEX(un)); 22773 } 22774 22775 22776 /* 22777 * Function: sd_take_ownership() 22778 * 22779 * Description: This routine implements an algorithm to achieve a stable 22780 * reservation on disks which don't implement priority reserve, 22781 * and makes sure that other host lose re-reservation attempts. 22782 * This algorithm contains of a loop that keeps issuing the RESERVE 22783 * for some period of time (min_ownership_delay, default 6 seconds) 22784 * During that loop, it looks to see if there has been a bus device 22785 * reset or bus reset (both of which cause an existing reservation 22786 * to be lost). If the reservation is lost issue RESERVE until a 22787 * period of min_ownership_delay with no resets has gone by, or 22788 * until max_ownership_delay has expired. This loop ensures that 22789 * the host really did manage to reserve the device, in spite of 22790 * resets. The looping for min_ownership_delay (default six 22791 * seconds) is important to early generation clustering products, 22792 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22793 * MHIOCENFAILFAST periodic timer of two seconds. By having 22794 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22795 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22796 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22797 * have already noticed, via the MHIOCENFAILFAST polling, that it 22798 * no longer "owns" the disk and will have panicked itself. Thus, 22799 * the host issuing the MHIOCTKOWN is assured (with timing 22800 * dependencies) that by the time it actually starts to use the 22801 * disk for real work, the old owner is no longer accessing it. 22802 * 22803 * min_ownership_delay is the minimum amount of time for which the 22804 * disk must be reserved continuously devoid of resets before the 22805 * MHIOCTKOWN ioctl will return success. 22806 * 22807 * max_ownership_delay indicates the amount of time by which the 22808 * take ownership should succeed or timeout with an error. 22809 * 22810 * Arguments: dev - the device 'dev_t' 22811 * *p - struct containing timing info. 22812 * 22813 * Return Code: 0 for success or error code 22814 */ 22815 22816 static int 22817 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22818 { 22819 struct sd_lun *un; 22820 int rval; 22821 int err; 22822 int reservation_count = 0; 22823 int min_ownership_delay = 6000000; /* in usec */ 22824 int max_ownership_delay = 30000000; /* in usec */ 22825 clock_t start_time; /* starting time of this algorithm */ 22826 clock_t end_time; /* time limit for giving up */ 22827 clock_t ownership_time; /* time limit for stable ownership */ 22828 clock_t current_time; 22829 clock_t previous_current_time; 22830 22831 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22832 return (ENXIO); 22833 } 22834 22835 /* 22836 * Attempt a device reservation. A priority reservation is requested. 22837 */ 22838 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22839 != SD_SUCCESS) { 22840 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22841 "sd_take_ownership: return(1)=%d\n", rval); 22842 return (rval); 22843 } 22844 22845 /* Update the softstate reserved status to indicate the reservation */ 22846 mutex_enter(SD_MUTEX(un)); 22847 un->un_resvd_status |= SD_RESERVE; 22848 un->un_resvd_status &= 22849 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22850 mutex_exit(SD_MUTEX(un)); 22851 22852 if (p != NULL) { 22853 if (p->min_ownership_delay != 0) { 22854 min_ownership_delay = p->min_ownership_delay * 1000; 22855 } 22856 if (p->max_ownership_delay != 0) { 22857 max_ownership_delay = p->max_ownership_delay * 1000; 22858 } 22859 } 22860 SD_INFO(SD_LOG_IOCTL_MHD, un, 22861 "sd_take_ownership: min, max delays: %d, %d\n", 22862 min_ownership_delay, max_ownership_delay); 22863 22864 start_time = ddi_get_lbolt(); 22865 current_time = start_time; 22866 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22867 end_time = start_time + drv_usectohz(max_ownership_delay); 22868 22869 while (current_time - end_time < 0) { 22870 delay(drv_usectohz(500000)); 22871 22872 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22873 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22874 mutex_enter(SD_MUTEX(un)); 22875 rval = (un->un_resvd_status & 22876 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22877 mutex_exit(SD_MUTEX(un)); 22878 break; 22879 } 22880 } 22881 previous_current_time = current_time; 22882 current_time = ddi_get_lbolt(); 22883 mutex_enter(SD_MUTEX(un)); 22884 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22885 ownership_time = ddi_get_lbolt() + 22886 drv_usectohz(min_ownership_delay); 22887 reservation_count = 0; 22888 } else { 22889 reservation_count++; 22890 } 22891 un->un_resvd_status |= SD_RESERVE; 22892 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22893 mutex_exit(SD_MUTEX(un)); 22894 22895 SD_INFO(SD_LOG_IOCTL_MHD, un, 22896 "sd_take_ownership: ticks for loop iteration=%ld, " 22897 "reservation=%s\n", (current_time - previous_current_time), 22898 reservation_count ? "ok" : "reclaimed"); 22899 22900 if (current_time - ownership_time >= 0 && 22901 reservation_count >= 4) { 22902 rval = 0; /* Achieved a stable ownership */ 22903 break; 22904 } 22905 if (current_time - end_time >= 0) { 22906 rval = EACCES; /* No ownership in max possible time */ 22907 break; 22908 } 22909 } 22910 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22911 "sd_take_ownership: return(2)=%d\n", rval); 22912 return (rval); 22913 } 22914 22915 22916 /* 22917 * Function: sd_reserve_release() 22918 * 22919 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22920 * PRIORITY RESERVE commands based on a user specified command type 22921 * 22922 * Arguments: dev - the device 'dev_t' 22923 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22924 * SD_RESERVE, SD_RELEASE 22925 * 22926 * Return Code: 0 or Error Code 22927 */ 22928 22929 static int 22930 sd_reserve_release(dev_t dev, int cmd) 22931 { 22932 struct uscsi_cmd *com = NULL; 22933 struct sd_lun *un = NULL; 22934 char cdb[CDB_GROUP0]; 22935 int rval; 22936 22937 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22938 (cmd == SD_PRIORITY_RESERVE)); 22939 22940 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22941 return (ENXIO); 22942 } 22943 22944 /* instantiate and initialize the command and cdb */ 22945 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22946 bzero(cdb, CDB_GROUP0); 22947 com->uscsi_flags = USCSI_SILENT; 22948 com->uscsi_timeout = un->un_reserve_release_time; 22949 com->uscsi_cdblen = CDB_GROUP0; 22950 com->uscsi_cdb = cdb; 22951 if (cmd == SD_RELEASE) { 22952 cdb[0] = SCMD_RELEASE; 22953 } else { 22954 cdb[0] = SCMD_RESERVE; 22955 } 22956 22957 /* Send the command. */ 22958 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22959 SD_PATH_STANDARD); 22960 22961 /* 22962 * "break" a reservation that is held by another host, by issuing a 22963 * reset if priority reserve is desired, and we could not get the 22964 * device. 22965 */ 22966 if ((cmd == SD_PRIORITY_RESERVE) && 22967 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22968 /* 22969 * First try to reset the LUN. If we cannot, then try a target 22970 * reset, followed by a bus reset if the target reset fails. 22971 */ 22972 int reset_retval = 0; 22973 if (un->un_f_lun_reset_enabled == TRUE) { 22974 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22975 } 22976 if (reset_retval == 0) { 22977 /* The LUN reset either failed or was not issued */ 22978 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22979 } 22980 if ((reset_retval == 0) && 22981 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22982 rval = EIO; 22983 kmem_free(com, sizeof (*com)); 22984 return (rval); 22985 } 22986 22987 bzero(com, sizeof (struct uscsi_cmd)); 22988 com->uscsi_flags = USCSI_SILENT; 22989 com->uscsi_cdb = cdb; 22990 com->uscsi_cdblen = CDB_GROUP0; 22991 com->uscsi_timeout = 5; 22992 22993 /* 22994 * Reissue the last reserve command, this time without request 22995 * sense. Assume that it is just a regular reserve command. 22996 */ 22997 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22998 SD_PATH_STANDARD); 22999 } 23000 23001 /* Return an error if still getting a reservation conflict. */ 23002 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23003 rval = EACCES; 23004 } 23005 23006 kmem_free(com, sizeof (*com)); 23007 return (rval); 23008 } 23009 23010 23011 #define SD_NDUMP_RETRIES 12 23012 /* 23013 * System Crash Dump routine 23014 */ 23015 23016 static int 23017 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23018 { 23019 int instance; 23020 int partition; 23021 int i; 23022 int err; 23023 struct sd_lun *un; 23024 struct scsi_pkt *wr_pktp; 23025 struct buf *wr_bp; 23026 struct buf wr_buf; 23027 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23028 daddr_t tgt_blkno; /* rmw - blkno for target */ 23029 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23030 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23031 size_t io_start_offset; 23032 int doing_rmw = FALSE; 23033 int rval; 23034 ssize_t dma_resid; 23035 daddr_t oblkno; 23036 diskaddr_t nblks = 0; 23037 diskaddr_t start_block; 23038 23039 instance = SDUNIT(dev); 23040 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23041 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23042 return (ENXIO); 23043 } 23044 23045 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23046 23047 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23048 23049 partition = SDPART(dev); 23050 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23051 23052 /* Validate blocks to dump at against partition size. */ 23053 23054 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23055 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23056 23057 if ((blkno + nblk) > nblks) { 23058 SD_TRACE(SD_LOG_DUMP, un, 23059 "sddump: dump range larger than partition: " 23060 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23061 blkno, nblk, nblks); 23062 return (EINVAL); 23063 } 23064 23065 mutex_enter(&un->un_pm_mutex); 23066 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23067 struct scsi_pkt *start_pktp; 23068 23069 mutex_exit(&un->un_pm_mutex); 23070 23071 /* 23072 * use pm framework to power on HBA 1st 23073 */ 23074 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23075 23076 /* 23077 * Dump no long uses sdpower to power on a device, it's 23078 * in-line here so it can be done in polled mode. 23079 */ 23080 23081 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23082 23083 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23084 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23085 23086 if (start_pktp == NULL) { 23087 /* We were not given a SCSI packet, fail. */ 23088 return (EIO); 23089 } 23090 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23091 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23092 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23093 start_pktp->pkt_flags = FLAG_NOINTR; 23094 23095 mutex_enter(SD_MUTEX(un)); 23096 SD_FILL_SCSI1_LUN(un, start_pktp); 23097 mutex_exit(SD_MUTEX(un)); 23098 /* 23099 * Scsi_poll returns 0 (success) if the command completes and 23100 * the status block is STATUS_GOOD. 23101 */ 23102 if (sd_scsi_poll(un, start_pktp) != 0) { 23103 scsi_destroy_pkt(start_pktp); 23104 return (EIO); 23105 } 23106 scsi_destroy_pkt(start_pktp); 23107 (void) sd_ddi_pm_resume(un); 23108 } else { 23109 mutex_exit(&un->un_pm_mutex); 23110 } 23111 23112 mutex_enter(SD_MUTEX(un)); 23113 un->un_throttle = 0; 23114 23115 /* 23116 * The first time through, reset the specific target device. 23117 * However, when cpr calls sddump we know that sd is in a 23118 * a good state so no bus reset is required. 23119 * Clear sense data via Request Sense cmd. 23120 * In sddump we don't care about allow_bus_device_reset anymore 23121 */ 23122 23123 if ((un->un_state != SD_STATE_SUSPENDED) && 23124 (un->un_state != SD_STATE_DUMPING)) { 23125 23126 New_state(un, SD_STATE_DUMPING); 23127 23128 if (un->un_f_is_fibre == FALSE) { 23129 mutex_exit(SD_MUTEX(un)); 23130 /* 23131 * Attempt a bus reset for parallel scsi. 23132 * 23133 * Note: A bus reset is required because on some host 23134 * systems (i.e. E420R) a bus device reset is 23135 * insufficient to reset the state of the target. 23136 * 23137 * Note: Don't issue the reset for fibre-channel, 23138 * because this tends to hang the bus (loop) for 23139 * too long while everyone is logging out and in 23140 * and the deadman timer for dumping will fire 23141 * before the dump is complete. 23142 */ 23143 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23144 mutex_enter(SD_MUTEX(un)); 23145 Restore_state(un); 23146 mutex_exit(SD_MUTEX(un)); 23147 return (EIO); 23148 } 23149 23150 /* Delay to give the device some recovery time. */ 23151 drv_usecwait(10000); 23152 23153 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23154 SD_INFO(SD_LOG_DUMP, un, 23155 "sddump: sd_send_polled_RQS failed\n"); 23156 } 23157 mutex_enter(SD_MUTEX(un)); 23158 } 23159 } 23160 23161 /* 23162 * Convert the partition-relative block number to a 23163 * disk physical block number. 23164 */ 23165 blkno += start_block; 23166 23167 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23168 23169 23170 /* 23171 * Check if the device has a non-512 block size. 23172 */ 23173 wr_bp = NULL; 23174 if (NOT_DEVBSIZE(un)) { 23175 tgt_byte_offset = blkno * un->un_sys_blocksize; 23176 tgt_byte_count = nblk * un->un_sys_blocksize; 23177 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23178 (tgt_byte_count % un->un_tgt_blocksize)) { 23179 doing_rmw = TRUE; 23180 /* 23181 * Calculate the block number and number of block 23182 * in terms of the media block size. 23183 */ 23184 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23185 tgt_nblk = 23186 ((tgt_byte_offset + tgt_byte_count + 23187 (un->un_tgt_blocksize - 1)) / 23188 un->un_tgt_blocksize) - tgt_blkno; 23189 23190 /* 23191 * Invoke the routine which is going to do read part 23192 * of read-modify-write. 23193 * Note that this routine returns a pointer to 23194 * a valid bp in wr_bp. 23195 */ 23196 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23197 &wr_bp); 23198 if (err) { 23199 mutex_exit(SD_MUTEX(un)); 23200 return (err); 23201 } 23202 /* 23203 * Offset is being calculated as - 23204 * (original block # * system block size) - 23205 * (new block # * target block size) 23206 */ 23207 io_start_offset = 23208 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23209 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23210 23211 ASSERT((io_start_offset >= 0) && 23212 (io_start_offset < un->un_tgt_blocksize)); 23213 /* 23214 * Do the modify portion of read modify write. 23215 */ 23216 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23217 (size_t)nblk * un->un_sys_blocksize); 23218 } else { 23219 doing_rmw = FALSE; 23220 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23221 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23222 } 23223 23224 /* Convert blkno and nblk to target blocks */ 23225 blkno = tgt_blkno; 23226 nblk = tgt_nblk; 23227 } else { 23228 wr_bp = &wr_buf; 23229 bzero(wr_bp, sizeof (struct buf)); 23230 wr_bp->b_flags = B_BUSY; 23231 wr_bp->b_un.b_addr = addr; 23232 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23233 wr_bp->b_resid = 0; 23234 } 23235 23236 mutex_exit(SD_MUTEX(un)); 23237 23238 /* 23239 * Obtain a SCSI packet for the write command. 23240 * It should be safe to call the allocator here without 23241 * worrying about being locked for DVMA mapping because 23242 * the address we're passed is already a DVMA mapping 23243 * 23244 * We are also not going to worry about semaphore ownership 23245 * in the dump buffer. Dumping is single threaded at present. 23246 */ 23247 23248 wr_pktp = NULL; 23249 23250 dma_resid = wr_bp->b_bcount; 23251 oblkno = blkno; 23252 23253 while (dma_resid != 0) { 23254 23255 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23256 wr_bp->b_flags &= ~B_ERROR; 23257 23258 if (un->un_partial_dma_supported == 1) { 23259 blkno = oblkno + 23260 ((wr_bp->b_bcount - dma_resid) / 23261 un->un_tgt_blocksize); 23262 nblk = dma_resid / un->un_tgt_blocksize; 23263 23264 if (wr_pktp) { 23265 /* 23266 * Partial DMA transfers after initial transfer 23267 */ 23268 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23269 blkno, nblk); 23270 } else { 23271 /* Initial transfer */ 23272 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23273 un->un_pkt_flags, NULL_FUNC, NULL, 23274 blkno, nblk); 23275 } 23276 } else { 23277 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23278 0, NULL_FUNC, NULL, blkno, nblk); 23279 } 23280 23281 if (rval == 0) { 23282 /* We were given a SCSI packet, continue. */ 23283 break; 23284 } 23285 23286 if (i == 0) { 23287 if (wr_bp->b_flags & B_ERROR) { 23288 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23289 "no resources for dumping; " 23290 "error code: 0x%x, retrying", 23291 geterror(wr_bp)); 23292 } else { 23293 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23294 "no resources for dumping; retrying"); 23295 } 23296 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23297 if (wr_bp->b_flags & B_ERROR) { 23298 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23299 "no resources for dumping; error code: " 23300 "0x%x, retrying\n", geterror(wr_bp)); 23301 } 23302 } else { 23303 if (wr_bp->b_flags & B_ERROR) { 23304 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23305 "no resources for dumping; " 23306 "error code: 0x%x, retries failed, " 23307 "giving up.\n", geterror(wr_bp)); 23308 } else { 23309 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23310 "no resources for dumping; " 23311 "retries failed, giving up.\n"); 23312 } 23313 mutex_enter(SD_MUTEX(un)); 23314 Restore_state(un); 23315 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23316 mutex_exit(SD_MUTEX(un)); 23317 scsi_free_consistent_buf(wr_bp); 23318 } else { 23319 mutex_exit(SD_MUTEX(un)); 23320 } 23321 return (EIO); 23322 } 23323 drv_usecwait(10000); 23324 } 23325 23326 if (un->un_partial_dma_supported == 1) { 23327 /* 23328 * save the resid from PARTIAL_DMA 23329 */ 23330 dma_resid = wr_pktp->pkt_resid; 23331 if (dma_resid != 0) 23332 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23333 wr_pktp->pkt_resid = 0; 23334 } else { 23335 dma_resid = 0; 23336 } 23337 23338 /* SunBug 1222170 */ 23339 wr_pktp->pkt_flags = FLAG_NOINTR; 23340 23341 err = EIO; 23342 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23343 23344 /* 23345 * Scsi_poll returns 0 (success) if the command completes and 23346 * the status block is STATUS_GOOD. We should only check 23347 * errors if this condition is not true. Even then we should 23348 * send our own request sense packet only if we have a check 23349 * condition and auto request sense has not been performed by 23350 * the hba. 23351 */ 23352 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23353 23354 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23355 (wr_pktp->pkt_resid == 0)) { 23356 err = SD_SUCCESS; 23357 break; 23358 } 23359 23360 /* 23361 * Check CMD_DEV_GONE 1st, give up if device is gone. 23362 */ 23363 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23365 "Error while dumping state...Device is gone\n"); 23366 break; 23367 } 23368 23369 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23370 SD_INFO(SD_LOG_DUMP, un, 23371 "sddump: write failed with CHECK, try # %d\n", i); 23372 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23373 (void) sd_send_polled_RQS(un); 23374 } 23375 23376 continue; 23377 } 23378 23379 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23380 int reset_retval = 0; 23381 23382 SD_INFO(SD_LOG_DUMP, un, 23383 "sddump: write failed with BUSY, try # %d\n", i); 23384 23385 if (un->un_f_lun_reset_enabled == TRUE) { 23386 reset_retval = scsi_reset(SD_ADDRESS(un), 23387 RESET_LUN); 23388 } 23389 if (reset_retval == 0) { 23390 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23391 } 23392 (void) sd_send_polled_RQS(un); 23393 23394 } else { 23395 SD_INFO(SD_LOG_DUMP, un, 23396 "sddump: write failed with 0x%x, try # %d\n", 23397 SD_GET_PKT_STATUS(wr_pktp), i); 23398 mutex_enter(SD_MUTEX(un)); 23399 sd_reset_target(un, wr_pktp); 23400 mutex_exit(SD_MUTEX(un)); 23401 } 23402 23403 /* 23404 * If we are not getting anywhere with lun/target resets, 23405 * let's reset the bus. 23406 */ 23407 if (i == SD_NDUMP_RETRIES/2) { 23408 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23409 (void) sd_send_polled_RQS(un); 23410 } 23411 } 23412 } 23413 23414 scsi_destroy_pkt(wr_pktp); 23415 mutex_enter(SD_MUTEX(un)); 23416 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23417 mutex_exit(SD_MUTEX(un)); 23418 scsi_free_consistent_buf(wr_bp); 23419 } else { 23420 mutex_exit(SD_MUTEX(un)); 23421 } 23422 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23423 return (err); 23424 } 23425 23426 /* 23427 * Function: sd_scsi_poll() 23428 * 23429 * Description: This is a wrapper for the scsi_poll call. 23430 * 23431 * Arguments: sd_lun - The unit structure 23432 * scsi_pkt - The scsi packet being sent to the device. 23433 * 23434 * Return Code: 0 - Command completed successfully with good status 23435 * -1 - Command failed. This could indicate a check condition 23436 * or other status value requiring recovery action. 23437 * 23438 * NOTE: This code is only called off sddump(). 23439 */ 23440 23441 static int 23442 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23443 { 23444 int status; 23445 23446 ASSERT(un != NULL); 23447 ASSERT(!mutex_owned(SD_MUTEX(un))); 23448 ASSERT(pktp != NULL); 23449 23450 status = SD_SUCCESS; 23451 23452 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23453 pktp->pkt_flags |= un->un_tagflags; 23454 pktp->pkt_flags &= ~FLAG_NODISCON; 23455 } 23456 23457 status = sd_ddi_scsi_poll(pktp); 23458 /* 23459 * Scsi_poll returns 0 (success) if the command completes and the 23460 * status block is STATUS_GOOD. We should only check errors if this 23461 * condition is not true. Even then we should send our own request 23462 * sense packet only if we have a check condition and auto 23463 * request sense has not been performed by the hba. 23464 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23465 */ 23466 if ((status != SD_SUCCESS) && 23467 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23468 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23469 (pktp->pkt_reason != CMD_DEV_GONE)) 23470 (void) sd_send_polled_RQS(un); 23471 23472 return (status); 23473 } 23474 23475 /* 23476 * Function: sd_send_polled_RQS() 23477 * 23478 * Description: This sends the request sense command to a device. 23479 * 23480 * Arguments: sd_lun - The unit structure 23481 * 23482 * Return Code: 0 - Command completed successfully with good status 23483 * -1 - Command failed. 23484 * 23485 */ 23486 23487 static int 23488 sd_send_polled_RQS(struct sd_lun *un) 23489 { 23490 int ret_val; 23491 struct scsi_pkt *rqs_pktp; 23492 struct buf *rqs_bp; 23493 23494 ASSERT(un != NULL); 23495 ASSERT(!mutex_owned(SD_MUTEX(un))); 23496 23497 ret_val = SD_SUCCESS; 23498 23499 rqs_pktp = un->un_rqs_pktp; 23500 rqs_bp = un->un_rqs_bp; 23501 23502 mutex_enter(SD_MUTEX(un)); 23503 23504 if (un->un_sense_isbusy) { 23505 ret_val = SD_FAILURE; 23506 mutex_exit(SD_MUTEX(un)); 23507 return (ret_val); 23508 } 23509 23510 /* 23511 * If the request sense buffer (and packet) is not in use, 23512 * let's set the un_sense_isbusy and send our packet 23513 */ 23514 un->un_sense_isbusy = 1; 23515 rqs_pktp->pkt_resid = 0; 23516 rqs_pktp->pkt_reason = 0; 23517 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23518 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23519 23520 mutex_exit(SD_MUTEX(un)); 23521 23522 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23523 " 0x%p\n", rqs_bp->b_un.b_addr); 23524 23525 /* 23526 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23527 * axle - it has a call into us! 23528 */ 23529 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23530 SD_INFO(SD_LOG_COMMON, un, 23531 "sd_send_polled_RQS: RQS failed\n"); 23532 } 23533 23534 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23535 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23536 23537 mutex_enter(SD_MUTEX(un)); 23538 un->un_sense_isbusy = 0; 23539 mutex_exit(SD_MUTEX(un)); 23540 23541 return (ret_val); 23542 } 23543 23544 /* 23545 * Defines needed for localized version of the scsi_poll routine. 23546 */ 23547 #define CSEC 10000 /* usecs */ 23548 #define SEC_TO_CSEC (1000000/CSEC) 23549 23550 /* 23551 * Function: sd_ddi_scsi_poll() 23552 * 23553 * Description: Localized version of the scsi_poll routine. The purpose is to 23554 * send a scsi_pkt to a device as a polled command. This version 23555 * is to ensure more robust handling of transport errors. 23556 * Specifically this routine cures not ready, coming ready 23557 * transition for power up and reset of sonoma's. This can take 23558 * up to 45 seconds for power-on and 20 seconds for reset of a 23559 * sonoma lun. 23560 * 23561 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23562 * 23563 * Return Code: 0 - Command completed successfully with good status 23564 * -1 - Command failed. 23565 * 23566 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23567 * be fixed (removing this code), we need to determine how to handle the 23568 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23569 * 23570 * NOTE: This code is only called off sddump(). 23571 */ 23572 static int 23573 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23574 { 23575 int rval = -1; 23576 int savef; 23577 long savet; 23578 void (*savec)(); 23579 int timeout; 23580 int busy_count; 23581 int poll_delay; 23582 int rc; 23583 uint8_t *sensep; 23584 struct scsi_arq_status *arqstat; 23585 extern int do_polled_io; 23586 23587 ASSERT(pkt->pkt_scbp); 23588 23589 /* 23590 * save old flags.. 23591 */ 23592 savef = pkt->pkt_flags; 23593 savec = pkt->pkt_comp; 23594 savet = pkt->pkt_time; 23595 23596 pkt->pkt_flags |= FLAG_NOINTR; 23597 23598 /* 23599 * XXX there is nothing in the SCSA spec that states that we should not 23600 * do a callback for polled cmds; however, removing this will break sd 23601 * and probably other target drivers 23602 */ 23603 pkt->pkt_comp = NULL; 23604 23605 /* 23606 * we don't like a polled command without timeout. 23607 * 60 seconds seems long enough. 23608 */ 23609 if (pkt->pkt_time == 0) 23610 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23611 23612 /* 23613 * Send polled cmd. 23614 * 23615 * We do some error recovery for various errors. Tran_busy, 23616 * queue full, and non-dispatched commands are retried every 10 msec. 23617 * as they are typically transient failures. Busy status and Not 23618 * Ready are retried every second as this status takes a while to 23619 * change. 23620 */ 23621 timeout = pkt->pkt_time * SEC_TO_CSEC; 23622 23623 for (busy_count = 0; busy_count < timeout; busy_count++) { 23624 /* 23625 * Initialize pkt status variables. 23626 */ 23627 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23628 23629 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23630 if (rc != TRAN_BUSY) { 23631 /* Transport failed - give up. */ 23632 break; 23633 } else { 23634 /* Transport busy - try again. */ 23635 poll_delay = 1 * CSEC; /* 10 msec. */ 23636 } 23637 } else { 23638 /* 23639 * Transport accepted - check pkt status. 23640 */ 23641 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23642 if ((pkt->pkt_reason == CMD_CMPLT) && 23643 (rc == STATUS_CHECK) && 23644 (pkt->pkt_state & STATE_ARQ_DONE)) { 23645 arqstat = 23646 (struct scsi_arq_status *)(pkt->pkt_scbp); 23647 sensep = (uint8_t *)&arqstat->sts_sensedata; 23648 } else { 23649 sensep = NULL; 23650 } 23651 23652 if ((pkt->pkt_reason == CMD_CMPLT) && 23653 (rc == STATUS_GOOD)) { 23654 /* No error - we're done */ 23655 rval = 0; 23656 break; 23657 23658 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23659 /* Lost connection - give up */ 23660 break; 23661 23662 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23663 (pkt->pkt_state == 0)) { 23664 /* Pkt not dispatched - try again. */ 23665 poll_delay = 1 * CSEC; /* 10 msec. */ 23666 23667 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23668 (rc == STATUS_QFULL)) { 23669 /* Queue full - try again. */ 23670 poll_delay = 1 * CSEC; /* 10 msec. */ 23671 23672 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23673 (rc == STATUS_BUSY)) { 23674 /* Busy - try again. */ 23675 poll_delay = 100 * CSEC; /* 1 sec. */ 23676 busy_count += (SEC_TO_CSEC - 1); 23677 23678 } else if ((sensep != NULL) && 23679 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23680 /* 23681 * Unit Attention - try again. 23682 * Pretend it took 1 sec. 23683 * NOTE: 'continue' avoids poll_delay 23684 */ 23685 busy_count += (SEC_TO_CSEC - 1); 23686 continue; 23687 23688 } else if ((sensep != NULL) && 23689 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23690 (scsi_sense_asc(sensep) == 0x04) && 23691 (scsi_sense_ascq(sensep) == 0x01)) { 23692 /* 23693 * Not ready -> ready - try again. 23694 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23695 * ...same as STATUS_BUSY 23696 */ 23697 poll_delay = 100 * CSEC; /* 1 sec. */ 23698 busy_count += (SEC_TO_CSEC - 1); 23699 23700 } else { 23701 /* BAD status - give up. */ 23702 break; 23703 } 23704 } 23705 23706 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23707 !do_polled_io) { 23708 delay(drv_usectohz(poll_delay)); 23709 } else { 23710 /* we busy wait during cpr_dump or interrupt threads */ 23711 drv_usecwait(poll_delay); 23712 } 23713 } 23714 23715 pkt->pkt_flags = savef; 23716 pkt->pkt_comp = savec; 23717 pkt->pkt_time = savet; 23718 23719 /* return on error */ 23720 if (rval) 23721 return (rval); 23722 23723 /* 23724 * This is not a performance critical code path. 23725 * 23726 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23727 * issues associated with looking at DMA memory prior to 23728 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23729 */ 23730 scsi_sync_pkt(pkt); 23731 return (0); 23732 } 23733 23734 23735 23736 /* 23737 * Function: sd_persistent_reservation_in_read_keys 23738 * 23739 * Description: This routine is the driver entry point for handling CD-ROM 23740 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23741 * by sending the SCSI-3 PRIN commands to the device. 23742 * Processes the read keys command response by copying the 23743 * reservation key information into the user provided buffer. 23744 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23745 * 23746 * Arguments: un - Pointer to soft state struct for the target. 23747 * usrp - user provided pointer to multihost Persistent In Read 23748 * Keys structure (mhioc_inkeys_t) 23749 * flag - this argument is a pass through to ddi_copyxxx() 23750 * directly from the mode argument of ioctl(). 23751 * 23752 * Return Code: 0 - Success 23753 * EACCES 23754 * ENOTSUP 23755 * errno return code from sd_send_scsi_cmd() 23756 * 23757 * Context: Can sleep. Does not return until command is completed. 23758 */ 23759 23760 static int 23761 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23762 mhioc_inkeys_t *usrp, int flag) 23763 { 23764 #ifdef _MULTI_DATAMODEL 23765 struct mhioc_key_list32 li32; 23766 #endif 23767 sd_prin_readkeys_t *in; 23768 mhioc_inkeys_t *ptr; 23769 mhioc_key_list_t li; 23770 uchar_t *data_bufp; 23771 int data_len; 23772 int rval; 23773 size_t copysz; 23774 23775 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23776 return (EINVAL); 23777 } 23778 bzero(&li, sizeof (mhioc_key_list_t)); 23779 23780 /* 23781 * Get the listsize from user 23782 */ 23783 #ifdef _MULTI_DATAMODEL 23784 23785 switch (ddi_model_convert_from(flag & FMODELS)) { 23786 case DDI_MODEL_ILP32: 23787 copysz = sizeof (struct mhioc_key_list32); 23788 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23789 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23790 "sd_persistent_reservation_in_read_keys: " 23791 "failed ddi_copyin: mhioc_key_list32_t\n"); 23792 rval = EFAULT; 23793 goto done; 23794 } 23795 li.listsize = li32.listsize; 23796 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23797 break; 23798 23799 case DDI_MODEL_NONE: 23800 copysz = sizeof (mhioc_key_list_t); 23801 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23802 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23803 "sd_persistent_reservation_in_read_keys: " 23804 "failed ddi_copyin: mhioc_key_list_t\n"); 23805 rval = EFAULT; 23806 goto done; 23807 } 23808 break; 23809 } 23810 23811 #else /* ! _MULTI_DATAMODEL */ 23812 copysz = sizeof (mhioc_key_list_t); 23813 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23814 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23815 "sd_persistent_reservation_in_read_keys: " 23816 "failed ddi_copyin: mhioc_key_list_t\n"); 23817 rval = EFAULT; 23818 goto done; 23819 } 23820 #endif 23821 23822 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23823 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23824 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23825 23826 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23827 data_len, data_bufp)) != 0) { 23828 goto done; 23829 } 23830 in = (sd_prin_readkeys_t *)data_bufp; 23831 ptr->generation = BE_32(in->generation); 23832 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23833 23834 /* 23835 * Return the min(listsize, listlen) keys 23836 */ 23837 #ifdef _MULTI_DATAMODEL 23838 23839 switch (ddi_model_convert_from(flag & FMODELS)) { 23840 case DDI_MODEL_ILP32: 23841 li32.listlen = li.listlen; 23842 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23843 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23844 "sd_persistent_reservation_in_read_keys: " 23845 "failed ddi_copyout: mhioc_key_list32_t\n"); 23846 rval = EFAULT; 23847 goto done; 23848 } 23849 break; 23850 23851 case DDI_MODEL_NONE: 23852 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23853 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23854 "sd_persistent_reservation_in_read_keys: " 23855 "failed ddi_copyout: mhioc_key_list_t\n"); 23856 rval = EFAULT; 23857 goto done; 23858 } 23859 break; 23860 } 23861 23862 #else /* ! _MULTI_DATAMODEL */ 23863 23864 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23865 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23866 "sd_persistent_reservation_in_read_keys: " 23867 "failed ddi_copyout: mhioc_key_list_t\n"); 23868 rval = EFAULT; 23869 goto done; 23870 } 23871 23872 #endif /* _MULTI_DATAMODEL */ 23873 23874 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23875 li.listsize * MHIOC_RESV_KEY_SIZE); 23876 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23877 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23878 "sd_persistent_reservation_in_read_keys: " 23879 "failed ddi_copyout: keylist\n"); 23880 rval = EFAULT; 23881 } 23882 done: 23883 kmem_free(data_bufp, data_len); 23884 return (rval); 23885 } 23886 23887 23888 /* 23889 * Function: sd_persistent_reservation_in_read_resv 23890 * 23891 * Description: This routine is the driver entry point for handling CD-ROM 23892 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23893 * by sending the SCSI-3 PRIN commands to the device. 23894 * Process the read persistent reservations command response by 23895 * copying the reservation information into the user provided 23896 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23897 * 23898 * Arguments: un - Pointer to soft state struct for the target. 23899 * usrp - user provided pointer to multihost Persistent In Read 23900 * Keys structure (mhioc_inkeys_t) 23901 * flag - this argument is a pass through to ddi_copyxxx() 23902 * directly from the mode argument of ioctl(). 23903 * 23904 * Return Code: 0 - Success 23905 * EACCES 23906 * ENOTSUP 23907 * errno return code from sd_send_scsi_cmd() 23908 * 23909 * Context: Can sleep. Does not return until command is completed. 23910 */ 23911 23912 static int 23913 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23914 mhioc_inresvs_t *usrp, int flag) 23915 { 23916 #ifdef _MULTI_DATAMODEL 23917 struct mhioc_resv_desc_list32 resvlist32; 23918 #endif 23919 sd_prin_readresv_t *in; 23920 mhioc_inresvs_t *ptr; 23921 sd_readresv_desc_t *readresv_ptr; 23922 mhioc_resv_desc_list_t resvlist; 23923 mhioc_resv_desc_t resvdesc; 23924 uchar_t *data_bufp; 23925 int data_len; 23926 int rval; 23927 int i; 23928 size_t copysz; 23929 mhioc_resv_desc_t *bufp; 23930 23931 if ((ptr = usrp) == NULL) { 23932 return (EINVAL); 23933 } 23934 23935 /* 23936 * Get the listsize from user 23937 */ 23938 #ifdef _MULTI_DATAMODEL 23939 switch (ddi_model_convert_from(flag & FMODELS)) { 23940 case DDI_MODEL_ILP32: 23941 copysz = sizeof (struct mhioc_resv_desc_list32); 23942 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23943 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23944 "sd_persistent_reservation_in_read_resv: " 23945 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23946 rval = EFAULT; 23947 goto done; 23948 } 23949 resvlist.listsize = resvlist32.listsize; 23950 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23951 break; 23952 23953 case DDI_MODEL_NONE: 23954 copysz = sizeof (mhioc_resv_desc_list_t); 23955 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23956 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23957 "sd_persistent_reservation_in_read_resv: " 23958 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23959 rval = EFAULT; 23960 goto done; 23961 } 23962 break; 23963 } 23964 #else /* ! _MULTI_DATAMODEL */ 23965 copysz = sizeof (mhioc_resv_desc_list_t); 23966 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23967 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23968 "sd_persistent_reservation_in_read_resv: " 23969 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23970 rval = EFAULT; 23971 goto done; 23972 } 23973 #endif /* ! _MULTI_DATAMODEL */ 23974 23975 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23976 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23977 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23978 23979 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23980 data_len, data_bufp)) != 0) { 23981 goto done; 23982 } 23983 in = (sd_prin_readresv_t *)data_bufp; 23984 ptr->generation = BE_32(in->generation); 23985 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23986 23987 /* 23988 * Return the min(listsize, listlen( keys 23989 */ 23990 #ifdef _MULTI_DATAMODEL 23991 23992 switch (ddi_model_convert_from(flag & FMODELS)) { 23993 case DDI_MODEL_ILP32: 23994 resvlist32.listlen = resvlist.listlen; 23995 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23996 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23997 "sd_persistent_reservation_in_read_resv: " 23998 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23999 rval = EFAULT; 24000 goto done; 24001 } 24002 break; 24003 24004 case DDI_MODEL_NONE: 24005 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24006 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24007 "sd_persistent_reservation_in_read_resv: " 24008 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24009 rval = EFAULT; 24010 goto done; 24011 } 24012 break; 24013 } 24014 24015 #else /* ! _MULTI_DATAMODEL */ 24016 24017 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24018 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24019 "sd_persistent_reservation_in_read_resv: " 24020 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24021 rval = EFAULT; 24022 goto done; 24023 } 24024 24025 #endif /* ! _MULTI_DATAMODEL */ 24026 24027 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24028 bufp = resvlist.list; 24029 copysz = sizeof (mhioc_resv_desc_t); 24030 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24031 i++, readresv_ptr++, bufp++) { 24032 24033 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24034 MHIOC_RESV_KEY_SIZE); 24035 resvdesc.type = readresv_ptr->type; 24036 resvdesc.scope = readresv_ptr->scope; 24037 resvdesc.scope_specific_addr = 24038 BE_32(readresv_ptr->scope_specific_addr); 24039 24040 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24041 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24042 "sd_persistent_reservation_in_read_resv: " 24043 "failed ddi_copyout: resvlist\n"); 24044 rval = EFAULT; 24045 goto done; 24046 } 24047 } 24048 done: 24049 kmem_free(data_bufp, data_len); 24050 return (rval); 24051 } 24052 24053 24054 /* 24055 * Function: sr_change_blkmode() 24056 * 24057 * Description: This routine is the driver entry point for handling CD-ROM 24058 * block mode ioctl requests. Support for returning and changing 24059 * the current block size in use by the device is implemented. The 24060 * LBA size is changed via a MODE SELECT Block Descriptor. 24061 * 24062 * This routine issues a mode sense with an allocation length of 24063 * 12 bytes for the mode page header and a single block descriptor. 24064 * 24065 * Arguments: dev - the device 'dev_t' 24066 * cmd - the request type; one of CDROMGBLKMODE (get) or 24067 * CDROMSBLKMODE (set) 24068 * data - current block size or requested block size 24069 * flag - this argument is a pass through to ddi_copyxxx() directly 24070 * from the mode argument of ioctl(). 24071 * 24072 * Return Code: the code returned by sd_send_scsi_cmd() 24073 * EINVAL if invalid arguments are provided 24074 * EFAULT if ddi_copyxxx() fails 24075 * ENXIO if fail ddi_get_soft_state 24076 * EIO if invalid mode sense block descriptor length 24077 * 24078 */ 24079 24080 static int 24081 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24082 { 24083 struct sd_lun *un = NULL; 24084 struct mode_header *sense_mhp, *select_mhp; 24085 struct block_descriptor *sense_desc, *select_desc; 24086 int current_bsize; 24087 int rval = EINVAL; 24088 uchar_t *sense = NULL; 24089 uchar_t *select = NULL; 24090 24091 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24092 24093 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24094 return (ENXIO); 24095 } 24096 24097 /* 24098 * The block length is changed via the Mode Select block descriptor, the 24099 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24100 * required as part of this routine. Therefore the mode sense allocation 24101 * length is specified to be the length of a mode page header and a 24102 * block descriptor. 24103 */ 24104 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24105 24106 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24107 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24108 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24109 "sr_change_blkmode: Mode Sense Failed\n"); 24110 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24111 return (rval); 24112 } 24113 24114 /* Check the block descriptor len to handle only 1 block descriptor */ 24115 sense_mhp = (struct mode_header *)sense; 24116 if ((sense_mhp->bdesc_length == 0) || 24117 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24118 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24119 "sr_change_blkmode: Mode Sense returned invalid block" 24120 " descriptor length\n"); 24121 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24122 return (EIO); 24123 } 24124 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24125 current_bsize = ((sense_desc->blksize_hi << 16) | 24126 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24127 24128 /* Process command */ 24129 switch (cmd) { 24130 case CDROMGBLKMODE: 24131 /* Return the block size obtained during the mode sense */ 24132 if (ddi_copyout(¤t_bsize, (void *)data, 24133 sizeof (int), flag) != 0) 24134 rval = EFAULT; 24135 break; 24136 case CDROMSBLKMODE: 24137 /* Validate the requested block size */ 24138 switch (data) { 24139 case CDROM_BLK_512: 24140 case CDROM_BLK_1024: 24141 case CDROM_BLK_2048: 24142 case CDROM_BLK_2056: 24143 case CDROM_BLK_2336: 24144 case CDROM_BLK_2340: 24145 case CDROM_BLK_2352: 24146 case CDROM_BLK_2368: 24147 case CDROM_BLK_2448: 24148 case CDROM_BLK_2646: 24149 case CDROM_BLK_2647: 24150 break; 24151 default: 24152 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24153 "sr_change_blkmode: " 24154 "Block Size '%ld' Not Supported\n", data); 24155 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24156 return (EINVAL); 24157 } 24158 24159 /* 24160 * The current block size matches the requested block size so 24161 * there is no need to send the mode select to change the size 24162 */ 24163 if (current_bsize == data) { 24164 break; 24165 } 24166 24167 /* Build the select data for the requested block size */ 24168 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24169 select_mhp = (struct mode_header *)select; 24170 select_desc = 24171 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24172 /* 24173 * The LBA size is changed via the block descriptor, so the 24174 * descriptor is built according to the user data 24175 */ 24176 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24177 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24178 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24179 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24180 24181 /* Send the mode select for the requested block size */ 24182 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24183 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24184 SD_PATH_STANDARD)) != 0) { 24185 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24186 "sr_change_blkmode: Mode Select Failed\n"); 24187 /* 24188 * The mode select failed for the requested block size, 24189 * so reset the data for the original block size and 24190 * send it to the target. The error is indicated by the 24191 * return value for the failed mode select. 24192 */ 24193 select_desc->blksize_hi = sense_desc->blksize_hi; 24194 select_desc->blksize_mid = sense_desc->blksize_mid; 24195 select_desc->blksize_lo = sense_desc->blksize_lo; 24196 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24197 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24198 SD_PATH_STANDARD); 24199 } else { 24200 ASSERT(!mutex_owned(SD_MUTEX(un))); 24201 mutex_enter(SD_MUTEX(un)); 24202 sd_update_block_info(un, (uint32_t)data, 0); 24203 mutex_exit(SD_MUTEX(un)); 24204 } 24205 break; 24206 default: 24207 /* should not reach here, but check anyway */ 24208 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24209 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24210 rval = EINVAL; 24211 break; 24212 } 24213 24214 if (select) { 24215 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24216 } 24217 if (sense) { 24218 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24219 } 24220 return (rval); 24221 } 24222 24223 24224 /* 24225 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24226 * implement driver support for getting and setting the CD speed. The command 24227 * set used will be based on the device type. If the device has not been 24228 * identified as MMC the Toshiba vendor specific mode page will be used. If 24229 * the device is MMC but does not support the Real Time Streaming feature 24230 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24231 * be used to read the speed. 24232 */ 24233 24234 /* 24235 * Function: sr_change_speed() 24236 * 24237 * Description: This routine is the driver entry point for handling CD-ROM 24238 * drive speed ioctl requests for devices supporting the Toshiba 24239 * vendor specific drive speed mode page. Support for returning 24240 * and changing the current drive speed in use by the device is 24241 * implemented. 24242 * 24243 * Arguments: dev - the device 'dev_t' 24244 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24245 * CDROMSDRVSPEED (set) 24246 * data - current drive speed or requested drive speed 24247 * flag - this argument is a pass through to ddi_copyxxx() directly 24248 * from the mode argument of ioctl(). 24249 * 24250 * Return Code: the code returned by sd_send_scsi_cmd() 24251 * EINVAL if invalid arguments are provided 24252 * EFAULT if ddi_copyxxx() fails 24253 * ENXIO if fail ddi_get_soft_state 24254 * EIO if invalid mode sense block descriptor length 24255 */ 24256 24257 static int 24258 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24259 { 24260 struct sd_lun *un = NULL; 24261 struct mode_header *sense_mhp, *select_mhp; 24262 struct mode_speed *sense_page, *select_page; 24263 int current_speed; 24264 int rval = EINVAL; 24265 int bd_len; 24266 uchar_t *sense = NULL; 24267 uchar_t *select = NULL; 24268 24269 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24270 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24271 return (ENXIO); 24272 } 24273 24274 /* 24275 * Note: The drive speed is being modified here according to a Toshiba 24276 * vendor specific mode page (0x31). 24277 */ 24278 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24279 24280 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24281 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24282 SD_PATH_STANDARD)) != 0) { 24283 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24284 "sr_change_speed: Mode Sense Failed\n"); 24285 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24286 return (rval); 24287 } 24288 sense_mhp = (struct mode_header *)sense; 24289 24290 /* Check the block descriptor len to handle only 1 block descriptor */ 24291 bd_len = sense_mhp->bdesc_length; 24292 if (bd_len > MODE_BLK_DESC_LENGTH) { 24293 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24294 "sr_change_speed: Mode Sense returned invalid block " 24295 "descriptor length\n"); 24296 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24297 return (EIO); 24298 } 24299 24300 sense_page = (struct mode_speed *) 24301 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24302 current_speed = sense_page->speed; 24303 24304 /* Process command */ 24305 switch (cmd) { 24306 case CDROMGDRVSPEED: 24307 /* Return the drive speed obtained during the mode sense */ 24308 if (current_speed == 0x2) { 24309 current_speed = CDROM_TWELVE_SPEED; 24310 } 24311 if (ddi_copyout(¤t_speed, (void *)data, 24312 sizeof (int), flag) != 0) { 24313 rval = EFAULT; 24314 } 24315 break; 24316 case CDROMSDRVSPEED: 24317 /* Validate the requested drive speed */ 24318 switch ((uchar_t)data) { 24319 case CDROM_TWELVE_SPEED: 24320 data = 0x2; 24321 /*FALLTHROUGH*/ 24322 case CDROM_NORMAL_SPEED: 24323 case CDROM_DOUBLE_SPEED: 24324 case CDROM_QUAD_SPEED: 24325 case CDROM_MAXIMUM_SPEED: 24326 break; 24327 default: 24328 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24329 "sr_change_speed: " 24330 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24331 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24332 return (EINVAL); 24333 } 24334 24335 /* 24336 * The current drive speed matches the requested drive speed so 24337 * there is no need to send the mode select to change the speed 24338 */ 24339 if (current_speed == data) { 24340 break; 24341 } 24342 24343 /* Build the select data for the requested drive speed */ 24344 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24345 select_mhp = (struct mode_header *)select; 24346 select_mhp->bdesc_length = 0; 24347 select_page = 24348 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24349 select_page = 24350 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24351 select_page->mode_page.code = CDROM_MODE_SPEED; 24352 select_page->mode_page.length = 2; 24353 select_page->speed = (uchar_t)data; 24354 24355 /* Send the mode select for the requested block size */ 24356 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24357 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24358 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24359 /* 24360 * The mode select failed for the requested drive speed, 24361 * so reset the data for the original drive speed and 24362 * send it to the target. The error is indicated by the 24363 * return value for the failed mode select. 24364 */ 24365 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24366 "sr_drive_speed: Mode Select Failed\n"); 24367 select_page->speed = sense_page->speed; 24368 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24369 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24370 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24371 } 24372 break; 24373 default: 24374 /* should not reach here, but check anyway */ 24375 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24376 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24377 rval = EINVAL; 24378 break; 24379 } 24380 24381 if (select) { 24382 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24383 } 24384 if (sense) { 24385 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24386 } 24387 24388 return (rval); 24389 } 24390 24391 24392 /* 24393 * Function: sr_atapi_change_speed() 24394 * 24395 * Description: This routine is the driver entry point for handling CD-ROM 24396 * drive speed ioctl requests for MMC devices that do not support 24397 * the Real Time Streaming feature (0x107). 24398 * 24399 * Note: This routine will use the SET SPEED command which may not 24400 * be supported by all devices. 24401 * 24402 * Arguments: dev- the device 'dev_t' 24403 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24404 * CDROMSDRVSPEED (set) 24405 * data- current drive speed or requested drive speed 24406 * flag- this argument is a pass through to ddi_copyxxx() directly 24407 * from the mode argument of ioctl(). 24408 * 24409 * Return Code: the code returned by sd_send_scsi_cmd() 24410 * EINVAL if invalid arguments are provided 24411 * EFAULT if ddi_copyxxx() fails 24412 * ENXIO if fail ddi_get_soft_state 24413 * EIO if invalid mode sense block descriptor length 24414 */ 24415 24416 static int 24417 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24418 { 24419 struct sd_lun *un; 24420 struct uscsi_cmd *com = NULL; 24421 struct mode_header_grp2 *sense_mhp; 24422 uchar_t *sense_page; 24423 uchar_t *sense = NULL; 24424 char cdb[CDB_GROUP5]; 24425 int bd_len; 24426 int current_speed = 0; 24427 int max_speed = 0; 24428 int rval; 24429 24430 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24431 24432 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24433 return (ENXIO); 24434 } 24435 24436 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24437 24438 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24439 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24440 SD_PATH_STANDARD)) != 0) { 24441 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24442 "sr_atapi_change_speed: Mode Sense Failed\n"); 24443 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24444 return (rval); 24445 } 24446 24447 /* Check the block descriptor len to handle only 1 block descriptor */ 24448 sense_mhp = (struct mode_header_grp2 *)sense; 24449 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24450 if (bd_len > MODE_BLK_DESC_LENGTH) { 24451 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24452 "sr_atapi_change_speed: Mode Sense returned invalid " 24453 "block descriptor length\n"); 24454 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24455 return (EIO); 24456 } 24457 24458 /* Calculate the current and maximum drive speeds */ 24459 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24460 current_speed = (sense_page[14] << 8) | sense_page[15]; 24461 max_speed = (sense_page[8] << 8) | sense_page[9]; 24462 24463 /* Process the command */ 24464 switch (cmd) { 24465 case CDROMGDRVSPEED: 24466 current_speed /= SD_SPEED_1X; 24467 if (ddi_copyout(¤t_speed, (void *)data, 24468 sizeof (int), flag) != 0) 24469 rval = EFAULT; 24470 break; 24471 case CDROMSDRVSPEED: 24472 /* Convert the speed code to KB/sec */ 24473 switch ((uchar_t)data) { 24474 case CDROM_NORMAL_SPEED: 24475 current_speed = SD_SPEED_1X; 24476 break; 24477 case CDROM_DOUBLE_SPEED: 24478 current_speed = 2 * SD_SPEED_1X; 24479 break; 24480 case CDROM_QUAD_SPEED: 24481 current_speed = 4 * SD_SPEED_1X; 24482 break; 24483 case CDROM_TWELVE_SPEED: 24484 current_speed = 12 * SD_SPEED_1X; 24485 break; 24486 case CDROM_MAXIMUM_SPEED: 24487 current_speed = 0xffff; 24488 break; 24489 default: 24490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24491 "sr_atapi_change_speed: invalid drive speed %d\n", 24492 (uchar_t)data); 24493 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24494 return (EINVAL); 24495 } 24496 24497 /* Check the request against the drive's max speed. */ 24498 if (current_speed != 0xffff) { 24499 if (current_speed > max_speed) { 24500 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24501 return (EINVAL); 24502 } 24503 } 24504 24505 /* 24506 * Build and send the SET SPEED command 24507 * 24508 * Note: The SET SPEED (0xBB) command used in this routine is 24509 * obsolete per the SCSI MMC spec but still supported in the 24510 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24511 * therefore the command is still implemented in this routine. 24512 */ 24513 bzero(cdb, sizeof (cdb)); 24514 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24515 cdb[2] = (uchar_t)(current_speed >> 8); 24516 cdb[3] = (uchar_t)current_speed; 24517 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24518 com->uscsi_cdb = (caddr_t)cdb; 24519 com->uscsi_cdblen = CDB_GROUP5; 24520 com->uscsi_bufaddr = NULL; 24521 com->uscsi_buflen = 0; 24522 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24523 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24524 break; 24525 default: 24526 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24527 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24528 rval = EINVAL; 24529 } 24530 24531 if (sense) { 24532 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24533 } 24534 if (com) { 24535 kmem_free(com, sizeof (*com)); 24536 } 24537 return (rval); 24538 } 24539 24540 24541 /* 24542 * Function: sr_pause_resume() 24543 * 24544 * Description: This routine is the driver entry point for handling CD-ROM 24545 * pause/resume ioctl requests. This only affects the audio play 24546 * operation. 24547 * 24548 * Arguments: dev - the device 'dev_t' 24549 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24550 * for setting the resume bit of the cdb. 24551 * 24552 * Return Code: the code returned by sd_send_scsi_cmd() 24553 * EINVAL if invalid mode specified 24554 * 24555 */ 24556 24557 static int 24558 sr_pause_resume(dev_t dev, int cmd) 24559 { 24560 struct sd_lun *un; 24561 struct uscsi_cmd *com; 24562 char cdb[CDB_GROUP1]; 24563 int rval; 24564 24565 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24566 return (ENXIO); 24567 } 24568 24569 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24570 bzero(cdb, CDB_GROUP1); 24571 cdb[0] = SCMD_PAUSE_RESUME; 24572 switch (cmd) { 24573 case CDROMRESUME: 24574 cdb[8] = 1; 24575 break; 24576 case CDROMPAUSE: 24577 cdb[8] = 0; 24578 break; 24579 default: 24580 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24581 " Command '%x' Not Supported\n", cmd); 24582 rval = EINVAL; 24583 goto done; 24584 } 24585 24586 com->uscsi_cdb = cdb; 24587 com->uscsi_cdblen = CDB_GROUP1; 24588 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24589 24590 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24591 SD_PATH_STANDARD); 24592 24593 done: 24594 kmem_free(com, sizeof (*com)); 24595 return (rval); 24596 } 24597 24598 24599 /* 24600 * Function: sr_play_msf() 24601 * 24602 * Description: This routine is the driver entry point for handling CD-ROM 24603 * ioctl requests to output the audio signals at the specified 24604 * starting address and continue the audio play until the specified 24605 * ending address (CDROMPLAYMSF) The address is in Minute Second 24606 * Frame (MSF) format. 24607 * 24608 * Arguments: dev - the device 'dev_t' 24609 * data - pointer to user provided audio msf structure, 24610 * specifying start/end addresses. 24611 * flag - this argument is a pass through to ddi_copyxxx() 24612 * directly from the mode argument of ioctl(). 24613 * 24614 * Return Code: the code returned by sd_send_scsi_cmd() 24615 * EFAULT if ddi_copyxxx() fails 24616 * ENXIO if fail ddi_get_soft_state 24617 * EINVAL if data pointer is NULL 24618 */ 24619 24620 static int 24621 sr_play_msf(dev_t dev, caddr_t data, int flag) 24622 { 24623 struct sd_lun *un; 24624 struct uscsi_cmd *com; 24625 struct cdrom_msf msf_struct; 24626 struct cdrom_msf *msf = &msf_struct; 24627 char cdb[CDB_GROUP1]; 24628 int rval; 24629 24630 if (data == NULL) { 24631 return (EINVAL); 24632 } 24633 24634 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24635 return (ENXIO); 24636 } 24637 24638 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24639 return (EFAULT); 24640 } 24641 24642 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24643 bzero(cdb, CDB_GROUP1); 24644 cdb[0] = SCMD_PLAYAUDIO_MSF; 24645 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24646 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24647 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24648 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24649 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24650 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24651 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24652 } else { 24653 cdb[3] = msf->cdmsf_min0; 24654 cdb[4] = msf->cdmsf_sec0; 24655 cdb[5] = msf->cdmsf_frame0; 24656 cdb[6] = msf->cdmsf_min1; 24657 cdb[7] = msf->cdmsf_sec1; 24658 cdb[8] = msf->cdmsf_frame1; 24659 } 24660 com->uscsi_cdb = cdb; 24661 com->uscsi_cdblen = CDB_GROUP1; 24662 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24663 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24664 SD_PATH_STANDARD); 24665 kmem_free(com, sizeof (*com)); 24666 return (rval); 24667 } 24668 24669 24670 /* 24671 * Function: sr_play_trkind() 24672 * 24673 * Description: This routine is the driver entry point for handling CD-ROM 24674 * ioctl requests to output the audio signals at the specified 24675 * starting address and continue the audio play until the specified 24676 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24677 * format. 24678 * 24679 * Arguments: dev - the device 'dev_t' 24680 * data - pointer to user provided audio track/index structure, 24681 * specifying start/end addresses. 24682 * flag - this argument is a pass through to ddi_copyxxx() 24683 * directly from the mode argument of ioctl(). 24684 * 24685 * Return Code: the code returned by sd_send_scsi_cmd() 24686 * EFAULT if ddi_copyxxx() fails 24687 * ENXIO if fail ddi_get_soft_state 24688 * EINVAL if data pointer is NULL 24689 */ 24690 24691 static int 24692 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24693 { 24694 struct cdrom_ti ti_struct; 24695 struct cdrom_ti *ti = &ti_struct; 24696 struct uscsi_cmd *com = NULL; 24697 char cdb[CDB_GROUP1]; 24698 int rval; 24699 24700 if (data == NULL) { 24701 return (EINVAL); 24702 } 24703 24704 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24705 return (EFAULT); 24706 } 24707 24708 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24709 bzero(cdb, CDB_GROUP1); 24710 cdb[0] = SCMD_PLAYAUDIO_TI; 24711 cdb[4] = ti->cdti_trk0; 24712 cdb[5] = ti->cdti_ind0; 24713 cdb[7] = ti->cdti_trk1; 24714 cdb[8] = ti->cdti_ind1; 24715 com->uscsi_cdb = cdb; 24716 com->uscsi_cdblen = CDB_GROUP1; 24717 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24718 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24719 SD_PATH_STANDARD); 24720 kmem_free(com, sizeof (*com)); 24721 return (rval); 24722 } 24723 24724 24725 /* 24726 * Function: sr_read_all_subcodes() 24727 * 24728 * Description: This routine is the driver entry point for handling CD-ROM 24729 * ioctl requests to return raw subcode data while the target is 24730 * playing audio (CDROMSUBCODE). 24731 * 24732 * Arguments: dev - the device 'dev_t' 24733 * data - pointer to user provided cdrom subcode structure, 24734 * specifying the transfer length and address. 24735 * flag - this argument is a pass through to ddi_copyxxx() 24736 * directly from the mode argument of ioctl(). 24737 * 24738 * Return Code: the code returned by sd_send_scsi_cmd() 24739 * EFAULT if ddi_copyxxx() fails 24740 * ENXIO if fail ddi_get_soft_state 24741 * EINVAL if data pointer is NULL 24742 */ 24743 24744 static int 24745 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24746 { 24747 struct sd_lun *un = NULL; 24748 struct uscsi_cmd *com = NULL; 24749 struct cdrom_subcode *subcode = NULL; 24750 int rval; 24751 size_t buflen; 24752 char cdb[CDB_GROUP5]; 24753 24754 #ifdef _MULTI_DATAMODEL 24755 /* To support ILP32 applications in an LP64 world */ 24756 struct cdrom_subcode32 cdrom_subcode32; 24757 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24758 #endif 24759 if (data == NULL) { 24760 return (EINVAL); 24761 } 24762 24763 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24764 return (ENXIO); 24765 } 24766 24767 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24768 24769 #ifdef _MULTI_DATAMODEL 24770 switch (ddi_model_convert_from(flag & FMODELS)) { 24771 case DDI_MODEL_ILP32: 24772 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24773 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24774 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24775 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24776 return (EFAULT); 24777 } 24778 /* Convert the ILP32 uscsi data from the application to LP64 */ 24779 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24780 break; 24781 case DDI_MODEL_NONE: 24782 if (ddi_copyin(data, subcode, 24783 sizeof (struct cdrom_subcode), flag)) { 24784 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24785 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24786 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24787 return (EFAULT); 24788 } 24789 break; 24790 } 24791 #else /* ! _MULTI_DATAMODEL */ 24792 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24793 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24794 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24795 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24796 return (EFAULT); 24797 } 24798 #endif /* _MULTI_DATAMODEL */ 24799 24800 /* 24801 * Since MMC-2 expects max 3 bytes for length, check if the 24802 * length input is greater than 3 bytes 24803 */ 24804 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24805 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24806 "sr_read_all_subcodes: " 24807 "cdrom transfer length too large: %d (limit %d)\n", 24808 subcode->cdsc_length, 0xFFFFFF); 24809 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24810 return (EINVAL); 24811 } 24812 24813 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24814 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24815 bzero(cdb, CDB_GROUP5); 24816 24817 if (un->un_f_mmc_cap == TRUE) { 24818 cdb[0] = (char)SCMD_READ_CD; 24819 cdb[2] = (char)0xff; 24820 cdb[3] = (char)0xff; 24821 cdb[4] = (char)0xff; 24822 cdb[5] = (char)0xff; 24823 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24824 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24825 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24826 cdb[10] = 1; 24827 } else { 24828 /* 24829 * Note: A vendor specific command (0xDF) is being used her to 24830 * request a read of all subcodes. 24831 */ 24832 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24833 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24834 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24835 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24836 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24837 } 24838 com->uscsi_cdb = cdb; 24839 com->uscsi_cdblen = CDB_GROUP5; 24840 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24841 com->uscsi_buflen = buflen; 24842 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24843 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24844 SD_PATH_STANDARD); 24845 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24846 kmem_free(com, sizeof (*com)); 24847 return (rval); 24848 } 24849 24850 24851 /* 24852 * Function: sr_read_subchannel() 24853 * 24854 * Description: This routine is the driver entry point for handling CD-ROM 24855 * ioctl requests to return the Q sub-channel data of the CD 24856 * current position block. (CDROMSUBCHNL) The data includes the 24857 * track number, index number, absolute CD-ROM address (LBA or MSF 24858 * format per the user) , track relative CD-ROM address (LBA or MSF 24859 * format per the user), control data and audio status. 24860 * 24861 * Arguments: dev - the device 'dev_t' 24862 * data - pointer to user provided cdrom sub-channel structure 24863 * flag - this argument is a pass through to ddi_copyxxx() 24864 * directly from the mode argument of ioctl(). 24865 * 24866 * Return Code: the code returned by sd_send_scsi_cmd() 24867 * EFAULT if ddi_copyxxx() fails 24868 * ENXIO if fail ddi_get_soft_state 24869 * EINVAL if data pointer is NULL 24870 */ 24871 24872 static int 24873 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24874 { 24875 struct sd_lun *un; 24876 struct uscsi_cmd *com; 24877 struct cdrom_subchnl subchanel; 24878 struct cdrom_subchnl *subchnl = &subchanel; 24879 char cdb[CDB_GROUP1]; 24880 caddr_t buffer; 24881 int rval; 24882 24883 if (data == NULL) { 24884 return (EINVAL); 24885 } 24886 24887 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24888 (un->un_state == SD_STATE_OFFLINE)) { 24889 return (ENXIO); 24890 } 24891 24892 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24893 return (EFAULT); 24894 } 24895 24896 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24897 bzero(cdb, CDB_GROUP1); 24898 cdb[0] = SCMD_READ_SUBCHANNEL; 24899 /* Set the MSF bit based on the user requested address format */ 24900 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24901 /* 24902 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24903 * returned 24904 */ 24905 cdb[2] = 0x40; 24906 /* 24907 * Set byte 3 to specify the return data format. A value of 0x01 24908 * indicates that the CD-ROM current position should be returned. 24909 */ 24910 cdb[3] = 0x01; 24911 cdb[8] = 0x10; 24912 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24913 com->uscsi_cdb = cdb; 24914 com->uscsi_cdblen = CDB_GROUP1; 24915 com->uscsi_bufaddr = buffer; 24916 com->uscsi_buflen = 16; 24917 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24918 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24919 SD_PATH_STANDARD); 24920 if (rval != 0) { 24921 kmem_free(buffer, 16); 24922 kmem_free(com, sizeof (*com)); 24923 return (rval); 24924 } 24925 24926 /* Process the returned Q sub-channel data */ 24927 subchnl->cdsc_audiostatus = buffer[1]; 24928 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24929 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24930 subchnl->cdsc_trk = buffer[6]; 24931 subchnl->cdsc_ind = buffer[7]; 24932 if (subchnl->cdsc_format & CDROM_LBA) { 24933 subchnl->cdsc_absaddr.lba = 24934 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24935 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24936 subchnl->cdsc_reladdr.lba = 24937 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24938 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24939 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24940 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24941 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24942 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24943 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24944 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24945 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24946 } else { 24947 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24948 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24949 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24950 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24951 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24952 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24953 } 24954 kmem_free(buffer, 16); 24955 kmem_free(com, sizeof (*com)); 24956 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24957 != 0) { 24958 return (EFAULT); 24959 } 24960 return (rval); 24961 } 24962 24963 24964 /* 24965 * Function: sr_read_tocentry() 24966 * 24967 * Description: This routine is the driver entry point for handling CD-ROM 24968 * ioctl requests to read from the Table of Contents (TOC) 24969 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24970 * fields, the starting address (LBA or MSF format per the user) 24971 * and the data mode if the user specified track is a data track. 24972 * 24973 * Note: The READ HEADER (0x44) command used in this routine is 24974 * obsolete per the SCSI MMC spec but still supported in the 24975 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24976 * therefore the command is still implemented in this routine. 24977 * 24978 * Arguments: dev - the device 'dev_t' 24979 * data - pointer to user provided toc entry structure, 24980 * specifying the track # and the address format 24981 * (LBA or MSF). 24982 * flag - this argument is a pass through to ddi_copyxxx() 24983 * directly from the mode argument of ioctl(). 24984 * 24985 * Return Code: the code returned by sd_send_scsi_cmd() 24986 * EFAULT if ddi_copyxxx() fails 24987 * ENXIO if fail ddi_get_soft_state 24988 * EINVAL if data pointer is NULL 24989 */ 24990 24991 static int 24992 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24993 { 24994 struct sd_lun *un = NULL; 24995 struct uscsi_cmd *com; 24996 struct cdrom_tocentry toc_entry; 24997 struct cdrom_tocentry *entry = &toc_entry; 24998 caddr_t buffer; 24999 int rval; 25000 char cdb[CDB_GROUP1]; 25001 25002 if (data == NULL) { 25003 return (EINVAL); 25004 } 25005 25006 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25007 (un->un_state == SD_STATE_OFFLINE)) { 25008 return (ENXIO); 25009 } 25010 25011 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25012 return (EFAULT); 25013 } 25014 25015 /* Validate the requested track and address format */ 25016 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25017 return (EINVAL); 25018 } 25019 25020 if (entry->cdte_track == 0) { 25021 return (EINVAL); 25022 } 25023 25024 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25025 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25026 bzero(cdb, CDB_GROUP1); 25027 25028 cdb[0] = SCMD_READ_TOC; 25029 /* Set the MSF bit based on the user requested address format */ 25030 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25031 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25032 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25033 } else { 25034 cdb[6] = entry->cdte_track; 25035 } 25036 25037 /* 25038 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25039 * (4 byte TOC response header + 8 byte track descriptor) 25040 */ 25041 cdb[8] = 12; 25042 com->uscsi_cdb = cdb; 25043 com->uscsi_cdblen = CDB_GROUP1; 25044 com->uscsi_bufaddr = buffer; 25045 com->uscsi_buflen = 0x0C; 25046 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25047 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25048 SD_PATH_STANDARD); 25049 if (rval != 0) { 25050 kmem_free(buffer, 12); 25051 kmem_free(com, sizeof (*com)); 25052 return (rval); 25053 } 25054 25055 /* Process the toc entry */ 25056 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25057 entry->cdte_ctrl = (buffer[5] & 0x0F); 25058 if (entry->cdte_format & CDROM_LBA) { 25059 entry->cdte_addr.lba = 25060 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25061 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25062 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25063 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25064 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25065 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25066 /* 25067 * Send a READ TOC command using the LBA address format to get 25068 * the LBA for the track requested so it can be used in the 25069 * READ HEADER request 25070 * 25071 * Note: The MSF bit of the READ HEADER command specifies the 25072 * output format. The block address specified in that command 25073 * must be in LBA format. 25074 */ 25075 cdb[1] = 0; 25076 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25077 SD_PATH_STANDARD); 25078 if (rval != 0) { 25079 kmem_free(buffer, 12); 25080 kmem_free(com, sizeof (*com)); 25081 return (rval); 25082 } 25083 } else { 25084 entry->cdte_addr.msf.minute = buffer[9]; 25085 entry->cdte_addr.msf.second = buffer[10]; 25086 entry->cdte_addr.msf.frame = buffer[11]; 25087 /* 25088 * Send a READ TOC command using the LBA address format to get 25089 * the LBA for the track requested so it can be used in the 25090 * READ HEADER request 25091 * 25092 * Note: The MSF bit of the READ HEADER command specifies the 25093 * output format. The block address specified in that command 25094 * must be in LBA format. 25095 */ 25096 cdb[1] = 0; 25097 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25098 SD_PATH_STANDARD); 25099 if (rval != 0) { 25100 kmem_free(buffer, 12); 25101 kmem_free(com, sizeof (*com)); 25102 return (rval); 25103 } 25104 } 25105 25106 /* 25107 * Build and send the READ HEADER command to determine the data mode of 25108 * the user specified track. 25109 */ 25110 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25111 (entry->cdte_track != CDROM_LEADOUT)) { 25112 bzero(cdb, CDB_GROUP1); 25113 cdb[0] = SCMD_READ_HEADER; 25114 cdb[2] = buffer[8]; 25115 cdb[3] = buffer[9]; 25116 cdb[4] = buffer[10]; 25117 cdb[5] = buffer[11]; 25118 cdb[8] = 0x08; 25119 com->uscsi_buflen = 0x08; 25120 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25121 SD_PATH_STANDARD); 25122 if (rval == 0) { 25123 entry->cdte_datamode = buffer[0]; 25124 } else { 25125 /* 25126 * READ HEADER command failed, since this is 25127 * obsoleted in one spec, its better to return 25128 * -1 for an invlid track so that we can still 25129 * receive the rest of the TOC data. 25130 */ 25131 entry->cdte_datamode = (uchar_t)-1; 25132 } 25133 } else { 25134 entry->cdte_datamode = (uchar_t)-1; 25135 } 25136 25137 kmem_free(buffer, 12); 25138 kmem_free(com, sizeof (*com)); 25139 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25140 return (EFAULT); 25141 25142 return (rval); 25143 } 25144 25145 25146 /* 25147 * Function: sr_read_tochdr() 25148 * 25149 * Description: This routine is the driver entry point for handling CD-ROM 25150 * ioctl requests to read the Table of Contents (TOC) header 25151 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25152 * and ending track numbers 25153 * 25154 * Arguments: dev - the device 'dev_t' 25155 * data - pointer to user provided toc header structure, 25156 * specifying the starting and ending track numbers. 25157 * flag - this argument is a pass through to ddi_copyxxx() 25158 * directly from the mode argument of ioctl(). 25159 * 25160 * Return Code: the code returned by sd_send_scsi_cmd() 25161 * EFAULT if ddi_copyxxx() fails 25162 * ENXIO if fail ddi_get_soft_state 25163 * EINVAL if data pointer is NULL 25164 */ 25165 25166 static int 25167 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25168 { 25169 struct sd_lun *un; 25170 struct uscsi_cmd *com; 25171 struct cdrom_tochdr toc_header; 25172 struct cdrom_tochdr *hdr = &toc_header; 25173 char cdb[CDB_GROUP1]; 25174 int rval; 25175 caddr_t buffer; 25176 25177 if (data == NULL) { 25178 return (EINVAL); 25179 } 25180 25181 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25182 (un->un_state == SD_STATE_OFFLINE)) { 25183 return (ENXIO); 25184 } 25185 25186 buffer = kmem_zalloc(4, KM_SLEEP); 25187 bzero(cdb, CDB_GROUP1); 25188 cdb[0] = SCMD_READ_TOC; 25189 /* 25190 * Specifying a track number of 0x00 in the READ TOC command indicates 25191 * that the TOC header should be returned 25192 */ 25193 cdb[6] = 0x00; 25194 /* 25195 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25196 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25197 */ 25198 cdb[8] = 0x04; 25199 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25200 com->uscsi_cdb = cdb; 25201 com->uscsi_cdblen = CDB_GROUP1; 25202 com->uscsi_bufaddr = buffer; 25203 com->uscsi_buflen = 0x04; 25204 com->uscsi_timeout = 300; 25205 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25206 25207 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25208 SD_PATH_STANDARD); 25209 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25210 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25211 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25212 } else { 25213 hdr->cdth_trk0 = buffer[2]; 25214 hdr->cdth_trk1 = buffer[3]; 25215 } 25216 kmem_free(buffer, 4); 25217 kmem_free(com, sizeof (*com)); 25218 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25219 return (EFAULT); 25220 } 25221 return (rval); 25222 } 25223 25224 25225 /* 25226 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25227 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25228 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25229 * digital audio and extended architecture digital audio. These modes are 25230 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25231 * MMC specs. 25232 * 25233 * In addition to support for the various data formats these routines also 25234 * include support for devices that implement only the direct access READ 25235 * commands (0x08, 0x28), devices that implement the READ_CD commands 25236 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25237 * READ CDXA commands (0xD8, 0xDB) 25238 */ 25239 25240 /* 25241 * Function: sr_read_mode1() 25242 * 25243 * Description: This routine is the driver entry point for handling CD-ROM 25244 * ioctl read mode1 requests (CDROMREADMODE1). 25245 * 25246 * Arguments: dev - the device 'dev_t' 25247 * data - pointer to user provided cd read structure specifying 25248 * the lba buffer address and length. 25249 * flag - this argument is a pass through to ddi_copyxxx() 25250 * directly from the mode argument of ioctl(). 25251 * 25252 * Return Code: the code returned by sd_send_scsi_cmd() 25253 * EFAULT if ddi_copyxxx() fails 25254 * ENXIO if fail ddi_get_soft_state 25255 * EINVAL if data pointer is NULL 25256 */ 25257 25258 static int 25259 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25260 { 25261 struct sd_lun *un; 25262 struct cdrom_read mode1_struct; 25263 struct cdrom_read *mode1 = &mode1_struct; 25264 int rval; 25265 #ifdef _MULTI_DATAMODEL 25266 /* To support ILP32 applications in an LP64 world */ 25267 struct cdrom_read32 cdrom_read32; 25268 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25269 #endif /* _MULTI_DATAMODEL */ 25270 25271 if (data == NULL) { 25272 return (EINVAL); 25273 } 25274 25275 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25276 (un->un_state == SD_STATE_OFFLINE)) { 25277 return (ENXIO); 25278 } 25279 25280 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25281 "sd_read_mode1: entry: un:0x%p\n", un); 25282 25283 #ifdef _MULTI_DATAMODEL 25284 switch (ddi_model_convert_from(flag & FMODELS)) { 25285 case DDI_MODEL_ILP32: 25286 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25287 return (EFAULT); 25288 } 25289 /* Convert the ILP32 uscsi data from the application to LP64 */ 25290 cdrom_read32tocdrom_read(cdrd32, mode1); 25291 break; 25292 case DDI_MODEL_NONE: 25293 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25294 return (EFAULT); 25295 } 25296 } 25297 #else /* ! _MULTI_DATAMODEL */ 25298 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25299 return (EFAULT); 25300 } 25301 #endif /* _MULTI_DATAMODEL */ 25302 25303 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25304 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25305 25306 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25307 "sd_read_mode1: exit: un:0x%p\n", un); 25308 25309 return (rval); 25310 } 25311 25312 25313 /* 25314 * Function: sr_read_cd_mode2() 25315 * 25316 * Description: This routine is the driver entry point for handling CD-ROM 25317 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25318 * support the READ CD (0xBE) command or the 1st generation 25319 * READ CD (0xD4) command. 25320 * 25321 * Arguments: dev - the device 'dev_t' 25322 * data - pointer to user provided cd read structure specifying 25323 * the lba buffer address and length. 25324 * flag - this argument is a pass through to ddi_copyxxx() 25325 * directly from the mode argument of ioctl(). 25326 * 25327 * Return Code: the code returned by sd_send_scsi_cmd() 25328 * EFAULT if ddi_copyxxx() fails 25329 * ENXIO if fail ddi_get_soft_state 25330 * EINVAL if data pointer is NULL 25331 */ 25332 25333 static int 25334 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25335 { 25336 struct sd_lun *un; 25337 struct uscsi_cmd *com; 25338 struct cdrom_read mode2_struct; 25339 struct cdrom_read *mode2 = &mode2_struct; 25340 uchar_t cdb[CDB_GROUP5]; 25341 int nblocks; 25342 int rval; 25343 #ifdef _MULTI_DATAMODEL 25344 /* To support ILP32 applications in an LP64 world */ 25345 struct cdrom_read32 cdrom_read32; 25346 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25347 #endif /* _MULTI_DATAMODEL */ 25348 25349 if (data == NULL) { 25350 return (EINVAL); 25351 } 25352 25353 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25354 (un->un_state == SD_STATE_OFFLINE)) { 25355 return (ENXIO); 25356 } 25357 25358 #ifdef _MULTI_DATAMODEL 25359 switch (ddi_model_convert_from(flag & FMODELS)) { 25360 case DDI_MODEL_ILP32: 25361 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25362 return (EFAULT); 25363 } 25364 /* Convert the ILP32 uscsi data from the application to LP64 */ 25365 cdrom_read32tocdrom_read(cdrd32, mode2); 25366 break; 25367 case DDI_MODEL_NONE: 25368 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25369 return (EFAULT); 25370 } 25371 break; 25372 } 25373 25374 #else /* ! _MULTI_DATAMODEL */ 25375 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25376 return (EFAULT); 25377 } 25378 #endif /* _MULTI_DATAMODEL */ 25379 25380 bzero(cdb, sizeof (cdb)); 25381 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25382 /* Read command supported by 1st generation atapi drives */ 25383 cdb[0] = SCMD_READ_CDD4; 25384 } else { 25385 /* Universal CD Access Command */ 25386 cdb[0] = SCMD_READ_CD; 25387 } 25388 25389 /* 25390 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25391 */ 25392 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25393 25394 /* set the start address */ 25395 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25396 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25397 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25398 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25399 25400 /* set the transfer length */ 25401 nblocks = mode2->cdread_buflen / 2336; 25402 cdb[6] = (uchar_t)(nblocks >> 16); 25403 cdb[7] = (uchar_t)(nblocks >> 8); 25404 cdb[8] = (uchar_t)nblocks; 25405 25406 /* set the filter bits */ 25407 cdb[9] = CDROM_READ_CD_USERDATA; 25408 25409 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25410 com->uscsi_cdb = (caddr_t)cdb; 25411 com->uscsi_cdblen = sizeof (cdb); 25412 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25413 com->uscsi_buflen = mode2->cdread_buflen; 25414 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25415 25416 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25417 SD_PATH_STANDARD); 25418 kmem_free(com, sizeof (*com)); 25419 return (rval); 25420 } 25421 25422 25423 /* 25424 * Function: sr_read_mode2() 25425 * 25426 * Description: This routine is the driver entry point for handling CD-ROM 25427 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25428 * do not support the READ CD (0xBE) command. 25429 * 25430 * Arguments: dev - the device 'dev_t' 25431 * data - pointer to user provided cd read structure specifying 25432 * the lba buffer address and length. 25433 * flag - this argument is a pass through to ddi_copyxxx() 25434 * directly from the mode argument of ioctl(). 25435 * 25436 * Return Code: the code returned by sd_send_scsi_cmd() 25437 * EFAULT if ddi_copyxxx() fails 25438 * ENXIO if fail ddi_get_soft_state 25439 * EINVAL if data pointer is NULL 25440 * EIO if fail to reset block size 25441 * EAGAIN if commands are in progress in the driver 25442 */ 25443 25444 static int 25445 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25446 { 25447 struct sd_lun *un; 25448 struct cdrom_read mode2_struct; 25449 struct cdrom_read *mode2 = &mode2_struct; 25450 int rval; 25451 uint32_t restore_blksize; 25452 struct uscsi_cmd *com; 25453 uchar_t cdb[CDB_GROUP0]; 25454 int nblocks; 25455 25456 #ifdef _MULTI_DATAMODEL 25457 /* To support ILP32 applications in an LP64 world */ 25458 struct cdrom_read32 cdrom_read32; 25459 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25460 #endif /* _MULTI_DATAMODEL */ 25461 25462 if (data == NULL) { 25463 return (EINVAL); 25464 } 25465 25466 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25467 (un->un_state == SD_STATE_OFFLINE)) { 25468 return (ENXIO); 25469 } 25470 25471 /* 25472 * Because this routine will update the device and driver block size 25473 * being used we want to make sure there are no commands in progress. 25474 * If commands are in progress the user will have to try again. 25475 * 25476 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25477 * in sdioctl to protect commands from sdioctl through to the top of 25478 * sd_uscsi_strategy. See sdioctl for details. 25479 */ 25480 mutex_enter(SD_MUTEX(un)); 25481 if (un->un_ncmds_in_driver != 1) { 25482 mutex_exit(SD_MUTEX(un)); 25483 return (EAGAIN); 25484 } 25485 mutex_exit(SD_MUTEX(un)); 25486 25487 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25488 "sd_read_mode2: entry: un:0x%p\n", un); 25489 25490 #ifdef _MULTI_DATAMODEL 25491 switch (ddi_model_convert_from(flag & FMODELS)) { 25492 case DDI_MODEL_ILP32: 25493 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25494 return (EFAULT); 25495 } 25496 /* Convert the ILP32 uscsi data from the application to LP64 */ 25497 cdrom_read32tocdrom_read(cdrd32, mode2); 25498 break; 25499 case DDI_MODEL_NONE: 25500 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25501 return (EFAULT); 25502 } 25503 break; 25504 } 25505 #else /* ! _MULTI_DATAMODEL */ 25506 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25507 return (EFAULT); 25508 } 25509 #endif /* _MULTI_DATAMODEL */ 25510 25511 /* Store the current target block size for restoration later */ 25512 restore_blksize = un->un_tgt_blocksize; 25513 25514 /* Change the device and soft state target block size to 2336 */ 25515 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25516 rval = EIO; 25517 goto done; 25518 } 25519 25520 25521 bzero(cdb, sizeof (cdb)); 25522 25523 /* set READ operation */ 25524 cdb[0] = SCMD_READ; 25525 25526 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25527 mode2->cdread_lba >>= 2; 25528 25529 /* set the start address */ 25530 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25531 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25532 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25533 25534 /* set the transfer length */ 25535 nblocks = mode2->cdread_buflen / 2336; 25536 cdb[4] = (uchar_t)nblocks & 0xFF; 25537 25538 /* build command */ 25539 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25540 com->uscsi_cdb = (caddr_t)cdb; 25541 com->uscsi_cdblen = sizeof (cdb); 25542 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25543 com->uscsi_buflen = mode2->cdread_buflen; 25544 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25545 25546 /* 25547 * Issue SCSI command with user space address for read buffer. 25548 * 25549 * This sends the command through main channel in the driver. 25550 * 25551 * Since this is accessed via an IOCTL call, we go through the 25552 * standard path, so that if the device was powered down, then 25553 * it would be 'awakened' to handle the command. 25554 */ 25555 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25556 SD_PATH_STANDARD); 25557 25558 kmem_free(com, sizeof (*com)); 25559 25560 /* Restore the device and soft state target block size */ 25561 if (sr_sector_mode(dev, restore_blksize) != 0) { 25562 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25563 "can't do switch back to mode 1\n"); 25564 /* 25565 * If sd_send_scsi_READ succeeded we still need to report 25566 * an error because we failed to reset the block size 25567 */ 25568 if (rval == 0) { 25569 rval = EIO; 25570 } 25571 } 25572 25573 done: 25574 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25575 "sd_read_mode2: exit: un:0x%p\n", un); 25576 25577 return (rval); 25578 } 25579 25580 25581 /* 25582 * Function: sr_sector_mode() 25583 * 25584 * Description: This utility function is used by sr_read_mode2 to set the target 25585 * block size based on the user specified size. This is a legacy 25586 * implementation based upon a vendor specific mode page 25587 * 25588 * Arguments: dev - the device 'dev_t' 25589 * data - flag indicating if block size is being set to 2336 or 25590 * 512. 25591 * 25592 * Return Code: the code returned by sd_send_scsi_cmd() 25593 * EFAULT if ddi_copyxxx() fails 25594 * ENXIO if fail ddi_get_soft_state 25595 * EINVAL if data pointer is NULL 25596 */ 25597 25598 static int 25599 sr_sector_mode(dev_t dev, uint32_t blksize) 25600 { 25601 struct sd_lun *un; 25602 uchar_t *sense; 25603 uchar_t *select; 25604 int rval; 25605 25606 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25607 (un->un_state == SD_STATE_OFFLINE)) { 25608 return (ENXIO); 25609 } 25610 25611 sense = kmem_zalloc(20, KM_SLEEP); 25612 25613 /* Note: This is a vendor specific mode page (0x81) */ 25614 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25615 SD_PATH_STANDARD)) != 0) { 25616 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25617 "sr_sector_mode: Mode Sense failed\n"); 25618 kmem_free(sense, 20); 25619 return (rval); 25620 } 25621 select = kmem_zalloc(20, KM_SLEEP); 25622 select[3] = 0x08; 25623 select[10] = ((blksize >> 8) & 0xff); 25624 select[11] = (blksize & 0xff); 25625 select[12] = 0x01; 25626 select[13] = 0x06; 25627 select[14] = sense[14]; 25628 select[15] = sense[15]; 25629 if (blksize == SD_MODE2_BLKSIZE) { 25630 select[14] |= 0x01; 25631 } 25632 25633 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25634 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25635 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25636 "sr_sector_mode: Mode Select failed\n"); 25637 } else { 25638 /* 25639 * Only update the softstate block size if we successfully 25640 * changed the device block mode. 25641 */ 25642 mutex_enter(SD_MUTEX(un)); 25643 sd_update_block_info(un, blksize, 0); 25644 mutex_exit(SD_MUTEX(un)); 25645 } 25646 kmem_free(sense, 20); 25647 kmem_free(select, 20); 25648 return (rval); 25649 } 25650 25651 25652 /* 25653 * Function: sr_read_cdda() 25654 * 25655 * Description: This routine is the driver entry point for handling CD-ROM 25656 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25657 * the target supports CDDA these requests are handled via a vendor 25658 * specific command (0xD8) If the target does not support CDDA 25659 * these requests are handled via the READ CD command (0xBE). 25660 * 25661 * Arguments: dev - the device 'dev_t' 25662 * data - pointer to user provided CD-DA structure specifying 25663 * the track starting address, transfer length, and 25664 * subcode options. 25665 * flag - this argument is a pass through to ddi_copyxxx() 25666 * directly from the mode argument of ioctl(). 25667 * 25668 * Return Code: the code returned by sd_send_scsi_cmd() 25669 * EFAULT if ddi_copyxxx() fails 25670 * ENXIO if fail ddi_get_soft_state 25671 * EINVAL if invalid arguments are provided 25672 * ENOTTY 25673 */ 25674 25675 static int 25676 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25677 { 25678 struct sd_lun *un; 25679 struct uscsi_cmd *com; 25680 struct cdrom_cdda *cdda; 25681 int rval; 25682 size_t buflen; 25683 char cdb[CDB_GROUP5]; 25684 25685 #ifdef _MULTI_DATAMODEL 25686 /* To support ILP32 applications in an LP64 world */ 25687 struct cdrom_cdda32 cdrom_cdda32; 25688 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25689 #endif /* _MULTI_DATAMODEL */ 25690 25691 if (data == NULL) { 25692 return (EINVAL); 25693 } 25694 25695 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25696 return (ENXIO); 25697 } 25698 25699 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25700 25701 #ifdef _MULTI_DATAMODEL 25702 switch (ddi_model_convert_from(flag & FMODELS)) { 25703 case DDI_MODEL_ILP32: 25704 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25705 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25706 "sr_read_cdda: ddi_copyin Failed\n"); 25707 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25708 return (EFAULT); 25709 } 25710 /* Convert the ILP32 uscsi data from the application to LP64 */ 25711 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25712 break; 25713 case DDI_MODEL_NONE: 25714 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25716 "sr_read_cdda: ddi_copyin Failed\n"); 25717 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25718 return (EFAULT); 25719 } 25720 break; 25721 } 25722 #else /* ! _MULTI_DATAMODEL */ 25723 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25724 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25725 "sr_read_cdda: ddi_copyin Failed\n"); 25726 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25727 return (EFAULT); 25728 } 25729 #endif /* _MULTI_DATAMODEL */ 25730 25731 /* 25732 * Since MMC-2 expects max 3 bytes for length, check if the 25733 * length input is greater than 3 bytes 25734 */ 25735 if ((cdda->cdda_length & 0xFF000000) != 0) { 25736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25737 "cdrom transfer length too large: %d (limit %d)\n", 25738 cdda->cdda_length, 0xFFFFFF); 25739 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25740 return (EINVAL); 25741 } 25742 25743 switch (cdda->cdda_subcode) { 25744 case CDROM_DA_NO_SUBCODE: 25745 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25746 break; 25747 case CDROM_DA_SUBQ: 25748 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25749 break; 25750 case CDROM_DA_ALL_SUBCODE: 25751 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25752 break; 25753 case CDROM_DA_SUBCODE_ONLY: 25754 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25755 break; 25756 default: 25757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25758 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25759 cdda->cdda_subcode); 25760 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25761 return (EINVAL); 25762 } 25763 25764 /* Build and send the command */ 25765 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25766 bzero(cdb, CDB_GROUP5); 25767 25768 if (un->un_f_cfg_cdda == TRUE) { 25769 cdb[0] = (char)SCMD_READ_CD; 25770 cdb[1] = 0x04; 25771 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25772 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25773 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25774 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25775 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25776 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25777 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25778 cdb[9] = 0x10; 25779 switch (cdda->cdda_subcode) { 25780 case CDROM_DA_NO_SUBCODE : 25781 cdb[10] = 0x0; 25782 break; 25783 case CDROM_DA_SUBQ : 25784 cdb[10] = 0x2; 25785 break; 25786 case CDROM_DA_ALL_SUBCODE : 25787 cdb[10] = 0x1; 25788 break; 25789 case CDROM_DA_SUBCODE_ONLY : 25790 /* FALLTHROUGH */ 25791 default : 25792 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25793 kmem_free(com, sizeof (*com)); 25794 return (ENOTTY); 25795 } 25796 } else { 25797 cdb[0] = (char)SCMD_READ_CDDA; 25798 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25799 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25800 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25801 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25802 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25803 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25804 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25805 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25806 cdb[10] = cdda->cdda_subcode; 25807 } 25808 25809 com->uscsi_cdb = cdb; 25810 com->uscsi_cdblen = CDB_GROUP5; 25811 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25812 com->uscsi_buflen = buflen; 25813 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25814 25815 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25816 SD_PATH_STANDARD); 25817 25818 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25819 kmem_free(com, sizeof (*com)); 25820 return (rval); 25821 } 25822 25823 25824 /* 25825 * Function: sr_read_cdxa() 25826 * 25827 * Description: This routine is the driver entry point for handling CD-ROM 25828 * ioctl requests to return CD-XA (Extended Architecture) data. 25829 * (CDROMCDXA). 25830 * 25831 * Arguments: dev - the device 'dev_t' 25832 * data - pointer to user provided CD-XA structure specifying 25833 * the data starting address, transfer length, and format 25834 * flag - this argument is a pass through to ddi_copyxxx() 25835 * directly from the mode argument of ioctl(). 25836 * 25837 * Return Code: the code returned by sd_send_scsi_cmd() 25838 * EFAULT if ddi_copyxxx() fails 25839 * ENXIO if fail ddi_get_soft_state 25840 * EINVAL if data pointer is NULL 25841 */ 25842 25843 static int 25844 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25845 { 25846 struct sd_lun *un; 25847 struct uscsi_cmd *com; 25848 struct cdrom_cdxa *cdxa; 25849 int rval; 25850 size_t buflen; 25851 char cdb[CDB_GROUP5]; 25852 uchar_t read_flags; 25853 25854 #ifdef _MULTI_DATAMODEL 25855 /* To support ILP32 applications in an LP64 world */ 25856 struct cdrom_cdxa32 cdrom_cdxa32; 25857 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25858 #endif /* _MULTI_DATAMODEL */ 25859 25860 if (data == NULL) { 25861 return (EINVAL); 25862 } 25863 25864 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25865 return (ENXIO); 25866 } 25867 25868 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25869 25870 #ifdef _MULTI_DATAMODEL 25871 switch (ddi_model_convert_from(flag & FMODELS)) { 25872 case DDI_MODEL_ILP32: 25873 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25874 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25875 return (EFAULT); 25876 } 25877 /* 25878 * Convert the ILP32 uscsi data from the 25879 * application to LP64 for internal use. 25880 */ 25881 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25882 break; 25883 case DDI_MODEL_NONE: 25884 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25885 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25886 return (EFAULT); 25887 } 25888 break; 25889 } 25890 #else /* ! _MULTI_DATAMODEL */ 25891 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25892 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25893 return (EFAULT); 25894 } 25895 #endif /* _MULTI_DATAMODEL */ 25896 25897 /* 25898 * Since MMC-2 expects max 3 bytes for length, check if the 25899 * length input is greater than 3 bytes 25900 */ 25901 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25903 "cdrom transfer length too large: %d (limit %d)\n", 25904 cdxa->cdxa_length, 0xFFFFFF); 25905 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25906 return (EINVAL); 25907 } 25908 25909 switch (cdxa->cdxa_format) { 25910 case CDROM_XA_DATA: 25911 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25912 read_flags = 0x10; 25913 break; 25914 case CDROM_XA_SECTOR_DATA: 25915 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25916 read_flags = 0xf8; 25917 break; 25918 case CDROM_XA_DATA_W_ERROR: 25919 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25920 read_flags = 0xfc; 25921 break; 25922 default: 25923 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25924 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25925 cdxa->cdxa_format); 25926 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25927 return (EINVAL); 25928 } 25929 25930 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25931 bzero(cdb, CDB_GROUP5); 25932 if (un->un_f_mmc_cap == TRUE) { 25933 cdb[0] = (char)SCMD_READ_CD; 25934 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25935 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25936 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25937 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25938 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25939 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25940 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25941 cdb[9] = (char)read_flags; 25942 } else { 25943 /* 25944 * Note: A vendor specific command (0xDB) is being used her to 25945 * request a read of all subcodes. 25946 */ 25947 cdb[0] = (char)SCMD_READ_CDXA; 25948 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25949 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25950 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25951 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25952 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25953 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25954 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25955 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25956 cdb[10] = cdxa->cdxa_format; 25957 } 25958 com->uscsi_cdb = cdb; 25959 com->uscsi_cdblen = CDB_GROUP5; 25960 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25961 com->uscsi_buflen = buflen; 25962 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25963 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25964 SD_PATH_STANDARD); 25965 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25966 kmem_free(com, sizeof (*com)); 25967 return (rval); 25968 } 25969 25970 25971 /* 25972 * Function: sr_eject() 25973 * 25974 * Description: This routine is the driver entry point for handling CD-ROM 25975 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25976 * 25977 * Arguments: dev - the device 'dev_t' 25978 * 25979 * Return Code: the code returned by sd_send_scsi_cmd() 25980 */ 25981 25982 static int 25983 sr_eject(dev_t dev) 25984 { 25985 struct sd_lun *un; 25986 int rval; 25987 25988 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25989 (un->un_state == SD_STATE_OFFLINE)) { 25990 return (ENXIO); 25991 } 25992 25993 /* 25994 * To prevent race conditions with the eject 25995 * command, keep track of an eject command as 25996 * it progresses. If we are already handling 25997 * an eject command in the driver for the given 25998 * unit and another request to eject is received 25999 * immediately return EAGAIN so we don't lose 26000 * the command if the current eject command fails. 26001 */ 26002 mutex_enter(SD_MUTEX(un)); 26003 if (un->un_f_ejecting == TRUE) { 26004 mutex_exit(SD_MUTEX(un)); 26005 return (EAGAIN); 26006 } 26007 un->un_f_ejecting = TRUE; 26008 mutex_exit(SD_MUTEX(un)); 26009 26010 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26011 SD_PATH_STANDARD)) != 0) { 26012 mutex_enter(SD_MUTEX(un)); 26013 un->un_f_ejecting = FALSE; 26014 mutex_exit(SD_MUTEX(un)); 26015 return (rval); 26016 } 26017 26018 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26019 SD_PATH_STANDARD); 26020 26021 if (rval == 0) { 26022 mutex_enter(SD_MUTEX(un)); 26023 sr_ejected(un); 26024 un->un_mediastate = DKIO_EJECTED; 26025 un->un_f_ejecting = FALSE; 26026 cv_broadcast(&un->un_state_cv); 26027 mutex_exit(SD_MUTEX(un)); 26028 } else { 26029 mutex_enter(SD_MUTEX(un)); 26030 un->un_f_ejecting = FALSE; 26031 mutex_exit(SD_MUTEX(un)); 26032 } 26033 return (rval); 26034 } 26035 26036 26037 /* 26038 * Function: sr_ejected() 26039 * 26040 * Description: This routine updates the soft state structure to invalidate the 26041 * geometry information after the media has been ejected or a 26042 * media eject has been detected. 26043 * 26044 * Arguments: un - driver soft state (unit) structure 26045 */ 26046 26047 static void 26048 sr_ejected(struct sd_lun *un) 26049 { 26050 struct sd_errstats *stp; 26051 26052 ASSERT(un != NULL); 26053 ASSERT(mutex_owned(SD_MUTEX(un))); 26054 26055 un->un_f_blockcount_is_valid = FALSE; 26056 un->un_f_tgt_blocksize_is_valid = FALSE; 26057 mutex_exit(SD_MUTEX(un)); 26058 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26059 mutex_enter(SD_MUTEX(un)); 26060 26061 if (un->un_errstats != NULL) { 26062 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26063 stp->sd_capacity.value.ui64 = 0; 26064 } 26065 26066 /* remove "capacity-of-device" properties */ 26067 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26068 "device-nblocks"); 26069 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 26070 "device-blksize"); 26071 } 26072 26073 26074 /* 26075 * Function: sr_check_wp() 26076 * 26077 * Description: This routine checks the write protection of a removable 26078 * media disk and hotpluggable devices via the write protect bit of 26079 * the Mode Page Header device specific field. Some devices choke 26080 * on unsupported mode page. In order to workaround this issue, 26081 * this routine has been implemented to use 0x3f mode page(request 26082 * for all pages) for all device types. 26083 * 26084 * Arguments: dev - the device 'dev_t' 26085 * 26086 * Return Code: int indicating if the device is write protected (1) or not (0) 26087 * 26088 * Context: Kernel thread. 26089 * 26090 */ 26091 26092 static int 26093 sr_check_wp(dev_t dev) 26094 { 26095 struct sd_lun *un; 26096 uchar_t device_specific; 26097 uchar_t *sense; 26098 int hdrlen; 26099 int rval = FALSE; 26100 26101 /* 26102 * Note: The return codes for this routine should be reworked to 26103 * properly handle the case of a NULL softstate. 26104 */ 26105 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26106 return (FALSE); 26107 } 26108 26109 if (un->un_f_cfg_is_atapi == TRUE) { 26110 /* 26111 * The mode page contents are not required; set the allocation 26112 * length for the mode page header only 26113 */ 26114 hdrlen = MODE_HEADER_LENGTH_GRP2; 26115 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26116 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26117 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26118 goto err_exit; 26119 device_specific = 26120 ((struct mode_header_grp2 *)sense)->device_specific; 26121 } else { 26122 hdrlen = MODE_HEADER_LENGTH; 26123 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26124 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26125 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26126 goto err_exit; 26127 device_specific = 26128 ((struct mode_header *)sense)->device_specific; 26129 } 26130 26131 /* 26132 * Write protect mode sense failed; not all disks 26133 * understand this query. Return FALSE assuming that 26134 * these devices are not writable. 26135 */ 26136 if (device_specific & WRITE_PROTECT) { 26137 rval = TRUE; 26138 } 26139 26140 err_exit: 26141 kmem_free(sense, hdrlen); 26142 return (rval); 26143 } 26144 26145 /* 26146 * Function: sr_volume_ctrl() 26147 * 26148 * Description: This routine is the driver entry point for handling CD-ROM 26149 * audio output volume ioctl requests. (CDROMVOLCTRL) 26150 * 26151 * Arguments: dev - the device 'dev_t' 26152 * data - pointer to user audio volume control structure 26153 * flag - this argument is a pass through to ddi_copyxxx() 26154 * directly from the mode argument of ioctl(). 26155 * 26156 * Return Code: the code returned by sd_send_scsi_cmd() 26157 * EFAULT if ddi_copyxxx() fails 26158 * ENXIO if fail ddi_get_soft_state 26159 * EINVAL if data pointer is NULL 26160 * 26161 */ 26162 26163 static int 26164 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26165 { 26166 struct sd_lun *un; 26167 struct cdrom_volctrl volume; 26168 struct cdrom_volctrl *vol = &volume; 26169 uchar_t *sense_page; 26170 uchar_t *select_page; 26171 uchar_t *sense; 26172 uchar_t *select; 26173 int sense_buflen; 26174 int select_buflen; 26175 int rval; 26176 26177 if (data == NULL) { 26178 return (EINVAL); 26179 } 26180 26181 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26182 (un->un_state == SD_STATE_OFFLINE)) { 26183 return (ENXIO); 26184 } 26185 26186 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26187 return (EFAULT); 26188 } 26189 26190 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26191 struct mode_header_grp2 *sense_mhp; 26192 struct mode_header_grp2 *select_mhp; 26193 int bd_len; 26194 26195 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26196 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26197 MODEPAGE_AUDIO_CTRL_LEN; 26198 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26199 select = kmem_zalloc(select_buflen, KM_SLEEP); 26200 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26201 sense_buflen, MODEPAGE_AUDIO_CTRL, 26202 SD_PATH_STANDARD)) != 0) { 26203 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26204 "sr_volume_ctrl: Mode Sense Failed\n"); 26205 kmem_free(sense, sense_buflen); 26206 kmem_free(select, select_buflen); 26207 return (rval); 26208 } 26209 sense_mhp = (struct mode_header_grp2 *)sense; 26210 select_mhp = (struct mode_header_grp2 *)select; 26211 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26212 sense_mhp->bdesc_length_lo; 26213 if (bd_len > MODE_BLK_DESC_LENGTH) { 26214 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26215 "sr_volume_ctrl: Mode Sense returned invalid " 26216 "block descriptor length\n"); 26217 kmem_free(sense, sense_buflen); 26218 kmem_free(select, select_buflen); 26219 return (EIO); 26220 } 26221 sense_page = (uchar_t *) 26222 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26223 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26224 select_mhp->length_msb = 0; 26225 select_mhp->length_lsb = 0; 26226 select_mhp->bdesc_length_hi = 0; 26227 select_mhp->bdesc_length_lo = 0; 26228 } else { 26229 struct mode_header *sense_mhp, *select_mhp; 26230 26231 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26232 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26233 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26234 select = kmem_zalloc(select_buflen, KM_SLEEP); 26235 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26236 sense_buflen, MODEPAGE_AUDIO_CTRL, 26237 SD_PATH_STANDARD)) != 0) { 26238 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26239 "sr_volume_ctrl: Mode Sense Failed\n"); 26240 kmem_free(sense, sense_buflen); 26241 kmem_free(select, select_buflen); 26242 return (rval); 26243 } 26244 sense_mhp = (struct mode_header *)sense; 26245 select_mhp = (struct mode_header *)select; 26246 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26247 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26248 "sr_volume_ctrl: Mode Sense returned invalid " 26249 "block descriptor length\n"); 26250 kmem_free(sense, sense_buflen); 26251 kmem_free(select, select_buflen); 26252 return (EIO); 26253 } 26254 sense_page = (uchar_t *) 26255 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26256 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26257 select_mhp->length = 0; 26258 select_mhp->bdesc_length = 0; 26259 } 26260 /* 26261 * Note: An audio control data structure could be created and overlayed 26262 * on the following in place of the array indexing method implemented. 26263 */ 26264 26265 /* Build the select data for the user volume data */ 26266 select_page[0] = MODEPAGE_AUDIO_CTRL; 26267 select_page[1] = 0xE; 26268 /* Set the immediate bit */ 26269 select_page[2] = 0x04; 26270 /* Zero out reserved fields */ 26271 select_page[3] = 0x00; 26272 select_page[4] = 0x00; 26273 /* Return sense data for fields not to be modified */ 26274 select_page[5] = sense_page[5]; 26275 select_page[6] = sense_page[6]; 26276 select_page[7] = sense_page[7]; 26277 /* Set the user specified volume levels for channel 0 and 1 */ 26278 select_page[8] = 0x01; 26279 select_page[9] = vol->channel0; 26280 select_page[10] = 0x02; 26281 select_page[11] = vol->channel1; 26282 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26283 select_page[12] = sense_page[12]; 26284 select_page[13] = sense_page[13]; 26285 select_page[14] = sense_page[14]; 26286 select_page[15] = sense_page[15]; 26287 26288 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26289 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26290 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26291 } else { 26292 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26293 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26294 } 26295 26296 kmem_free(sense, sense_buflen); 26297 kmem_free(select, select_buflen); 26298 return (rval); 26299 } 26300 26301 26302 /* 26303 * Function: sr_read_sony_session_offset() 26304 * 26305 * Description: This routine is the driver entry point for handling CD-ROM 26306 * ioctl requests for session offset information. (CDROMREADOFFSET) 26307 * The address of the first track in the last session of a 26308 * multi-session CD-ROM is returned 26309 * 26310 * Note: This routine uses a vendor specific key value in the 26311 * command control field without implementing any vendor check here 26312 * or in the ioctl routine. 26313 * 26314 * Arguments: dev - the device 'dev_t' 26315 * data - pointer to an int to hold the requested address 26316 * flag - this argument is a pass through to ddi_copyxxx() 26317 * directly from the mode argument of ioctl(). 26318 * 26319 * Return Code: the code returned by sd_send_scsi_cmd() 26320 * EFAULT if ddi_copyxxx() fails 26321 * ENXIO if fail ddi_get_soft_state 26322 * EINVAL if data pointer is NULL 26323 */ 26324 26325 static int 26326 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26327 { 26328 struct sd_lun *un; 26329 struct uscsi_cmd *com; 26330 caddr_t buffer; 26331 char cdb[CDB_GROUP1]; 26332 int session_offset = 0; 26333 int rval; 26334 26335 if (data == NULL) { 26336 return (EINVAL); 26337 } 26338 26339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26340 (un->un_state == SD_STATE_OFFLINE)) { 26341 return (ENXIO); 26342 } 26343 26344 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26345 bzero(cdb, CDB_GROUP1); 26346 cdb[0] = SCMD_READ_TOC; 26347 /* 26348 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26349 * (4 byte TOC response header + 8 byte response data) 26350 */ 26351 cdb[8] = SONY_SESSION_OFFSET_LEN; 26352 /* Byte 9 is the control byte. A vendor specific value is used */ 26353 cdb[9] = SONY_SESSION_OFFSET_KEY; 26354 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26355 com->uscsi_cdb = cdb; 26356 com->uscsi_cdblen = CDB_GROUP1; 26357 com->uscsi_bufaddr = buffer; 26358 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26359 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26360 26361 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26362 SD_PATH_STANDARD); 26363 if (rval != 0) { 26364 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26365 kmem_free(com, sizeof (*com)); 26366 return (rval); 26367 } 26368 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26369 session_offset = 26370 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26371 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26372 /* 26373 * Offset returned offset in current lbasize block's. Convert to 26374 * 2k block's to return to the user 26375 */ 26376 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26377 session_offset >>= 2; 26378 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26379 session_offset >>= 1; 26380 } 26381 } 26382 26383 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26384 rval = EFAULT; 26385 } 26386 26387 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26388 kmem_free(com, sizeof (*com)); 26389 return (rval); 26390 } 26391 26392 26393 /* 26394 * Function: sd_wm_cache_constructor() 26395 * 26396 * Description: Cache Constructor for the wmap cache for the read/modify/write 26397 * devices. 26398 * 26399 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26400 * un - sd_lun structure for the device. 26401 * flag - the km flags passed to constructor 26402 * 26403 * Return Code: 0 on success. 26404 * -1 on failure. 26405 */ 26406 26407 /*ARGSUSED*/ 26408 static int 26409 sd_wm_cache_constructor(void *wm, void *un, int flags) 26410 { 26411 bzero(wm, sizeof (struct sd_w_map)); 26412 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26413 return (0); 26414 } 26415 26416 26417 /* 26418 * Function: sd_wm_cache_destructor() 26419 * 26420 * Description: Cache destructor for the wmap cache for the read/modify/write 26421 * devices. 26422 * 26423 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26424 * un - sd_lun structure for the device. 26425 */ 26426 /*ARGSUSED*/ 26427 static void 26428 sd_wm_cache_destructor(void *wm, void *un) 26429 { 26430 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26431 } 26432 26433 26434 /* 26435 * Function: sd_range_lock() 26436 * 26437 * Description: Lock the range of blocks specified as parameter to ensure 26438 * that read, modify write is atomic and no other i/o writes 26439 * to the same location. The range is specified in terms 26440 * of start and end blocks. Block numbers are the actual 26441 * media block numbers and not system. 26442 * 26443 * Arguments: un - sd_lun structure for the device. 26444 * startb - The starting block number 26445 * endb - The end block number 26446 * typ - type of i/o - simple/read_modify_write 26447 * 26448 * Return Code: wm - pointer to the wmap structure. 26449 * 26450 * Context: This routine can sleep. 26451 */ 26452 26453 static struct sd_w_map * 26454 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26455 { 26456 struct sd_w_map *wmp = NULL; 26457 struct sd_w_map *sl_wmp = NULL; 26458 struct sd_w_map *tmp_wmp; 26459 wm_state state = SD_WM_CHK_LIST; 26460 26461 26462 ASSERT(un != NULL); 26463 ASSERT(!mutex_owned(SD_MUTEX(un))); 26464 26465 mutex_enter(SD_MUTEX(un)); 26466 26467 while (state != SD_WM_DONE) { 26468 26469 switch (state) { 26470 case SD_WM_CHK_LIST: 26471 /* 26472 * This is the starting state. Check the wmap list 26473 * to see if the range is currently available. 26474 */ 26475 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26476 /* 26477 * If this is a simple write and no rmw 26478 * i/o is pending then try to lock the 26479 * range as the range should be available. 26480 */ 26481 state = SD_WM_LOCK_RANGE; 26482 } else { 26483 tmp_wmp = sd_get_range(un, startb, endb); 26484 if (tmp_wmp != NULL) { 26485 if ((wmp != NULL) && ONLIST(un, wmp)) { 26486 /* 26487 * Should not keep onlist wmps 26488 * while waiting this macro 26489 * will also do wmp = NULL; 26490 */ 26491 FREE_ONLIST_WMAP(un, wmp); 26492 } 26493 /* 26494 * sl_wmp is the wmap on which wait 26495 * is done, since the tmp_wmp points 26496 * to the inuse wmap, set sl_wmp to 26497 * tmp_wmp and change the state to sleep 26498 */ 26499 sl_wmp = tmp_wmp; 26500 state = SD_WM_WAIT_MAP; 26501 } else { 26502 state = SD_WM_LOCK_RANGE; 26503 } 26504 26505 } 26506 break; 26507 26508 case SD_WM_LOCK_RANGE: 26509 ASSERT(un->un_wm_cache); 26510 /* 26511 * The range need to be locked, try to get a wmap. 26512 * First attempt it with NO_SLEEP, want to avoid a sleep 26513 * if possible as we will have to release the sd mutex 26514 * if we have to sleep. 26515 */ 26516 if (wmp == NULL) 26517 wmp = kmem_cache_alloc(un->un_wm_cache, 26518 KM_NOSLEEP); 26519 if (wmp == NULL) { 26520 mutex_exit(SD_MUTEX(un)); 26521 _NOTE(DATA_READABLE_WITHOUT_LOCK 26522 (sd_lun::un_wm_cache)) 26523 wmp = kmem_cache_alloc(un->un_wm_cache, 26524 KM_SLEEP); 26525 mutex_enter(SD_MUTEX(un)); 26526 /* 26527 * we released the mutex so recheck and go to 26528 * check list state. 26529 */ 26530 state = SD_WM_CHK_LIST; 26531 } else { 26532 /* 26533 * We exit out of state machine since we 26534 * have the wmap. Do the housekeeping first. 26535 * place the wmap on the wmap list if it is not 26536 * on it already and then set the state to done. 26537 */ 26538 wmp->wm_start = startb; 26539 wmp->wm_end = endb; 26540 wmp->wm_flags = typ | SD_WM_BUSY; 26541 if (typ & SD_WTYPE_RMW) { 26542 un->un_rmw_count++; 26543 } 26544 /* 26545 * If not already on the list then link 26546 */ 26547 if (!ONLIST(un, wmp)) { 26548 wmp->wm_next = un->un_wm; 26549 wmp->wm_prev = NULL; 26550 if (wmp->wm_next) 26551 wmp->wm_next->wm_prev = wmp; 26552 un->un_wm = wmp; 26553 } 26554 state = SD_WM_DONE; 26555 } 26556 break; 26557 26558 case SD_WM_WAIT_MAP: 26559 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26560 /* 26561 * Wait is done on sl_wmp, which is set in the 26562 * check_list state. 26563 */ 26564 sl_wmp->wm_wanted_count++; 26565 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26566 sl_wmp->wm_wanted_count--; 26567 /* 26568 * We can reuse the memory from the completed sl_wmp 26569 * lock range for our new lock, but only if noone is 26570 * waiting for it. 26571 */ 26572 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26573 if (sl_wmp->wm_wanted_count == 0) { 26574 if (wmp != NULL) 26575 CHK_N_FREEWMP(un, wmp); 26576 wmp = sl_wmp; 26577 } 26578 sl_wmp = NULL; 26579 /* 26580 * After waking up, need to recheck for availability of 26581 * range. 26582 */ 26583 state = SD_WM_CHK_LIST; 26584 break; 26585 26586 default: 26587 panic("sd_range_lock: " 26588 "Unknown state %d in sd_range_lock", state); 26589 /*NOTREACHED*/ 26590 } /* switch(state) */ 26591 26592 } /* while(state != SD_WM_DONE) */ 26593 26594 mutex_exit(SD_MUTEX(un)); 26595 26596 ASSERT(wmp != NULL); 26597 26598 return (wmp); 26599 } 26600 26601 26602 /* 26603 * Function: sd_get_range() 26604 * 26605 * Description: Find if there any overlapping I/O to this one 26606 * Returns the write-map of 1st such I/O, NULL otherwise. 26607 * 26608 * Arguments: un - sd_lun structure for the device. 26609 * startb - The starting block number 26610 * endb - The end block number 26611 * 26612 * Return Code: wm - pointer to the wmap structure. 26613 */ 26614 26615 static struct sd_w_map * 26616 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26617 { 26618 struct sd_w_map *wmp; 26619 26620 ASSERT(un != NULL); 26621 26622 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26623 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26624 continue; 26625 } 26626 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26627 break; 26628 } 26629 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26630 break; 26631 } 26632 } 26633 26634 return (wmp); 26635 } 26636 26637 26638 /* 26639 * Function: sd_free_inlist_wmap() 26640 * 26641 * Description: Unlink and free a write map struct. 26642 * 26643 * Arguments: un - sd_lun structure for the device. 26644 * wmp - sd_w_map which needs to be unlinked. 26645 */ 26646 26647 static void 26648 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26649 { 26650 ASSERT(un != NULL); 26651 26652 if (un->un_wm == wmp) { 26653 un->un_wm = wmp->wm_next; 26654 } else { 26655 wmp->wm_prev->wm_next = wmp->wm_next; 26656 } 26657 26658 if (wmp->wm_next) { 26659 wmp->wm_next->wm_prev = wmp->wm_prev; 26660 } 26661 26662 wmp->wm_next = wmp->wm_prev = NULL; 26663 26664 kmem_cache_free(un->un_wm_cache, wmp); 26665 } 26666 26667 26668 /* 26669 * Function: sd_range_unlock() 26670 * 26671 * Description: Unlock the range locked by wm. 26672 * Free write map if nobody else is waiting on it. 26673 * 26674 * Arguments: un - sd_lun structure for the device. 26675 * wmp - sd_w_map which needs to be unlinked. 26676 */ 26677 26678 static void 26679 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26680 { 26681 ASSERT(un != NULL); 26682 ASSERT(wm != NULL); 26683 ASSERT(!mutex_owned(SD_MUTEX(un))); 26684 26685 mutex_enter(SD_MUTEX(un)); 26686 26687 if (wm->wm_flags & SD_WTYPE_RMW) { 26688 un->un_rmw_count--; 26689 } 26690 26691 if (wm->wm_wanted_count) { 26692 wm->wm_flags = 0; 26693 /* 26694 * Broadcast that the wmap is available now. 26695 */ 26696 cv_broadcast(&wm->wm_avail); 26697 } else { 26698 /* 26699 * If no one is waiting on the map, it should be free'ed. 26700 */ 26701 sd_free_inlist_wmap(un, wm); 26702 } 26703 26704 mutex_exit(SD_MUTEX(un)); 26705 } 26706 26707 26708 /* 26709 * Function: sd_read_modify_write_task 26710 * 26711 * Description: Called from a taskq thread to initiate the write phase of 26712 * a read-modify-write request. This is used for targets where 26713 * un->un_sys_blocksize != un->un_tgt_blocksize. 26714 * 26715 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26716 * 26717 * Context: Called under taskq thread context. 26718 */ 26719 26720 static void 26721 sd_read_modify_write_task(void *arg) 26722 { 26723 struct sd_mapblocksize_info *bsp; 26724 struct buf *bp; 26725 struct sd_xbuf *xp; 26726 struct sd_lun *un; 26727 26728 bp = arg; /* The bp is given in arg */ 26729 ASSERT(bp != NULL); 26730 26731 /* Get the pointer to the layer-private data struct */ 26732 xp = SD_GET_XBUF(bp); 26733 ASSERT(xp != NULL); 26734 bsp = xp->xb_private; 26735 ASSERT(bsp != NULL); 26736 26737 un = SD_GET_UN(bp); 26738 ASSERT(un != NULL); 26739 ASSERT(!mutex_owned(SD_MUTEX(un))); 26740 26741 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26742 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26743 26744 /* 26745 * This is the write phase of a read-modify-write request, called 26746 * under the context of a taskq thread in response to the completion 26747 * of the read portion of the rmw request completing under interrupt 26748 * context. The write request must be sent from here down the iostart 26749 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26750 * we use the layer index saved in the layer-private data area. 26751 */ 26752 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26753 26754 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26755 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26756 } 26757 26758 26759 /* 26760 * Function: sddump_do_read_of_rmw() 26761 * 26762 * Description: This routine will be called from sddump, If sddump is called 26763 * with an I/O which not aligned on device blocksize boundary 26764 * then the write has to be converted to read-modify-write. 26765 * Do the read part here in order to keep sddump simple. 26766 * Note - That the sd_mutex is held across the call to this 26767 * routine. 26768 * 26769 * Arguments: un - sd_lun 26770 * blkno - block number in terms of media block size. 26771 * nblk - number of blocks. 26772 * bpp - pointer to pointer to the buf structure. On return 26773 * from this function, *bpp points to the valid buffer 26774 * to which the write has to be done. 26775 * 26776 * Return Code: 0 for success or errno-type return code 26777 */ 26778 26779 static int 26780 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26781 struct buf **bpp) 26782 { 26783 int err; 26784 int i; 26785 int rval; 26786 struct buf *bp; 26787 struct scsi_pkt *pkt = NULL; 26788 uint32_t target_blocksize; 26789 26790 ASSERT(un != NULL); 26791 ASSERT(mutex_owned(SD_MUTEX(un))); 26792 26793 target_blocksize = un->un_tgt_blocksize; 26794 26795 mutex_exit(SD_MUTEX(un)); 26796 26797 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26798 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26799 if (bp == NULL) { 26800 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26801 "no resources for dumping; giving up"); 26802 err = ENOMEM; 26803 goto done; 26804 } 26805 26806 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26807 blkno, nblk); 26808 if (rval != 0) { 26809 scsi_free_consistent_buf(bp); 26810 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26811 "no resources for dumping; giving up"); 26812 err = ENOMEM; 26813 goto done; 26814 } 26815 26816 pkt->pkt_flags |= FLAG_NOINTR; 26817 26818 err = EIO; 26819 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26820 26821 /* 26822 * Scsi_poll returns 0 (success) if the command completes and 26823 * the status block is STATUS_GOOD. We should only check 26824 * errors if this condition is not true. Even then we should 26825 * send our own request sense packet only if we have a check 26826 * condition and auto request sense has not been performed by 26827 * the hba. 26828 */ 26829 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26830 26831 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26832 err = 0; 26833 break; 26834 } 26835 26836 /* 26837 * Check CMD_DEV_GONE 1st, give up if device is gone, 26838 * no need to read RQS data. 26839 */ 26840 if (pkt->pkt_reason == CMD_DEV_GONE) { 26841 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26842 "Error while dumping state with rmw..." 26843 "Device is gone\n"); 26844 break; 26845 } 26846 26847 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26848 SD_INFO(SD_LOG_DUMP, un, 26849 "sddump: read failed with CHECK, try # %d\n", i); 26850 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26851 (void) sd_send_polled_RQS(un); 26852 } 26853 26854 continue; 26855 } 26856 26857 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26858 int reset_retval = 0; 26859 26860 SD_INFO(SD_LOG_DUMP, un, 26861 "sddump: read failed with BUSY, try # %d\n", i); 26862 26863 if (un->un_f_lun_reset_enabled == TRUE) { 26864 reset_retval = scsi_reset(SD_ADDRESS(un), 26865 RESET_LUN); 26866 } 26867 if (reset_retval == 0) { 26868 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26869 } 26870 (void) sd_send_polled_RQS(un); 26871 26872 } else { 26873 SD_INFO(SD_LOG_DUMP, un, 26874 "sddump: read failed with 0x%x, try # %d\n", 26875 SD_GET_PKT_STATUS(pkt), i); 26876 mutex_enter(SD_MUTEX(un)); 26877 sd_reset_target(un, pkt); 26878 mutex_exit(SD_MUTEX(un)); 26879 } 26880 26881 /* 26882 * If we are not getting anywhere with lun/target resets, 26883 * let's reset the bus. 26884 */ 26885 if (i > SD_NDUMP_RETRIES/2) { 26886 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26887 (void) sd_send_polled_RQS(un); 26888 } 26889 26890 } 26891 scsi_destroy_pkt(pkt); 26892 26893 if (err != 0) { 26894 scsi_free_consistent_buf(bp); 26895 *bpp = NULL; 26896 } else { 26897 *bpp = bp; 26898 } 26899 26900 done: 26901 mutex_enter(SD_MUTEX(un)); 26902 return (err); 26903 } 26904 26905 26906 /* 26907 * Function: sd_failfast_flushq 26908 * 26909 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26910 * in b_flags and move them onto the failfast queue, then kick 26911 * off a thread to return all bp's on the failfast queue to 26912 * their owners with an error set. 26913 * 26914 * Arguments: un - pointer to the soft state struct for the instance. 26915 * 26916 * Context: may execute in interrupt context. 26917 */ 26918 26919 static void 26920 sd_failfast_flushq(struct sd_lun *un) 26921 { 26922 struct buf *bp; 26923 struct buf *next_waitq_bp; 26924 struct buf *prev_waitq_bp = NULL; 26925 26926 ASSERT(un != NULL); 26927 ASSERT(mutex_owned(SD_MUTEX(un))); 26928 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26929 ASSERT(un->un_failfast_bp == NULL); 26930 26931 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26932 "sd_failfast_flushq: entry: un:0x%p\n", un); 26933 26934 /* 26935 * Check if we should flush all bufs when entering failfast state, or 26936 * just those with B_FAILFAST set. 26937 */ 26938 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26939 /* 26940 * Move *all* bp's on the wait queue to the failfast flush 26941 * queue, including those that do NOT have B_FAILFAST set. 26942 */ 26943 if (un->un_failfast_headp == NULL) { 26944 ASSERT(un->un_failfast_tailp == NULL); 26945 un->un_failfast_headp = un->un_waitq_headp; 26946 } else { 26947 ASSERT(un->un_failfast_tailp != NULL); 26948 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26949 } 26950 26951 un->un_failfast_tailp = un->un_waitq_tailp; 26952 26953 /* update kstat for each bp moved out of the waitq */ 26954 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26955 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26956 } 26957 26958 /* empty the waitq */ 26959 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26960 26961 } else { 26962 /* 26963 * Go thru the wait queue, pick off all entries with 26964 * B_FAILFAST set, and move these onto the failfast queue. 26965 */ 26966 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26967 /* 26968 * Save the pointer to the next bp on the wait queue, 26969 * so we get to it on the next iteration of this loop. 26970 */ 26971 next_waitq_bp = bp->av_forw; 26972 26973 /* 26974 * If this bp from the wait queue does NOT have 26975 * B_FAILFAST set, just move on to the next element 26976 * in the wait queue. Note, this is the only place 26977 * where it is correct to set prev_waitq_bp. 26978 */ 26979 if ((bp->b_flags & B_FAILFAST) == 0) { 26980 prev_waitq_bp = bp; 26981 continue; 26982 } 26983 26984 /* 26985 * Remove the bp from the wait queue. 26986 */ 26987 if (bp == un->un_waitq_headp) { 26988 /* The bp is the first element of the waitq. */ 26989 un->un_waitq_headp = next_waitq_bp; 26990 if (un->un_waitq_headp == NULL) { 26991 /* The wait queue is now empty */ 26992 un->un_waitq_tailp = NULL; 26993 } 26994 } else { 26995 /* 26996 * The bp is either somewhere in the middle 26997 * or at the end of the wait queue. 26998 */ 26999 ASSERT(un->un_waitq_headp != NULL); 27000 ASSERT(prev_waitq_bp != NULL); 27001 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 27002 == 0); 27003 if (bp == un->un_waitq_tailp) { 27004 /* bp is the last entry on the waitq. */ 27005 ASSERT(next_waitq_bp == NULL); 27006 un->un_waitq_tailp = prev_waitq_bp; 27007 } 27008 prev_waitq_bp->av_forw = next_waitq_bp; 27009 } 27010 bp->av_forw = NULL; 27011 27012 /* 27013 * update kstat since the bp is moved out of 27014 * the waitq 27015 */ 27016 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27017 27018 /* 27019 * Now put the bp onto the failfast queue. 27020 */ 27021 if (un->un_failfast_headp == NULL) { 27022 /* failfast queue is currently empty */ 27023 ASSERT(un->un_failfast_tailp == NULL); 27024 un->un_failfast_headp = 27025 un->un_failfast_tailp = bp; 27026 } else { 27027 /* Add the bp to the end of the failfast q */ 27028 ASSERT(un->un_failfast_tailp != NULL); 27029 ASSERT(un->un_failfast_tailp->b_flags & 27030 B_FAILFAST); 27031 un->un_failfast_tailp->av_forw = bp; 27032 un->un_failfast_tailp = bp; 27033 } 27034 } 27035 } 27036 27037 /* 27038 * Now return all bp's on the failfast queue to their owners. 27039 */ 27040 while ((bp = un->un_failfast_headp) != NULL) { 27041 27042 un->un_failfast_headp = bp->av_forw; 27043 if (un->un_failfast_headp == NULL) { 27044 un->un_failfast_tailp = NULL; 27045 } 27046 27047 /* 27048 * We want to return the bp with a failure error code, but 27049 * we do not want a call to sd_start_cmds() to occur here, 27050 * so use sd_return_failed_command_no_restart() instead of 27051 * sd_return_failed_command(). 27052 */ 27053 sd_return_failed_command_no_restart(un, bp, EIO); 27054 } 27055 27056 /* Flush the xbuf queues if required. */ 27057 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27058 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27059 } 27060 27061 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27062 "sd_failfast_flushq: exit: un:0x%p\n", un); 27063 } 27064 27065 27066 /* 27067 * Function: sd_failfast_flushq_callback 27068 * 27069 * Description: Return TRUE if the given bp meets the criteria for failfast 27070 * flushing. Used with ddi_xbuf_flushq(9F). 27071 * 27072 * Arguments: bp - ptr to buf struct to be examined. 27073 * 27074 * Context: Any 27075 */ 27076 27077 static int 27078 sd_failfast_flushq_callback(struct buf *bp) 27079 { 27080 /* 27081 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27082 * state is entered; OR (2) the given bp has B_FAILFAST set. 27083 */ 27084 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27085 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27086 } 27087 27088 27089 27090 /* 27091 * Function: sd_setup_next_xfer 27092 * 27093 * Description: Prepare next I/O operation using DMA_PARTIAL 27094 * 27095 */ 27096 27097 static int 27098 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27099 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27100 { 27101 ssize_t num_blks_not_xfered; 27102 daddr_t strt_blk_num; 27103 ssize_t bytes_not_xfered; 27104 int rval; 27105 27106 ASSERT(pkt->pkt_resid == 0); 27107 27108 /* 27109 * Calculate next block number and amount to be transferred. 27110 * 27111 * How much data NOT transfered to the HBA yet. 27112 */ 27113 bytes_not_xfered = xp->xb_dma_resid; 27114 27115 /* 27116 * figure how many blocks NOT transfered to the HBA yet. 27117 */ 27118 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27119 27120 /* 27121 * set starting block number to the end of what WAS transfered. 27122 */ 27123 strt_blk_num = xp->xb_blkno + 27124 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27125 27126 /* 27127 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27128 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27129 * the disk mutex here. 27130 */ 27131 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27132 strt_blk_num, num_blks_not_xfered); 27133 27134 if (rval == 0) { 27135 27136 /* 27137 * Success. 27138 * 27139 * Adjust things if there are still more blocks to be 27140 * transfered. 27141 */ 27142 xp->xb_dma_resid = pkt->pkt_resid; 27143 pkt->pkt_resid = 0; 27144 27145 return (1); 27146 } 27147 27148 /* 27149 * There's really only one possible return value from 27150 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27151 * returns NULL. 27152 */ 27153 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27154 27155 bp->b_resid = bp->b_bcount; 27156 bp->b_flags |= B_ERROR; 27157 27158 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27159 "Error setting up next portion of DMA transfer\n"); 27160 27161 return (0); 27162 } 27163 27164 /* 27165 * Function: sd_panic_for_res_conflict 27166 * 27167 * Description: Call panic with a string formatted with "Reservation Conflict" 27168 * and a human readable identifier indicating the SD instance 27169 * that experienced the reservation conflict. 27170 * 27171 * Arguments: un - pointer to the soft state struct for the instance. 27172 * 27173 * Context: may execute in interrupt context. 27174 */ 27175 27176 #define SD_RESV_CONFLICT_FMT_LEN 40 27177 void 27178 sd_panic_for_res_conflict(struct sd_lun *un) 27179 { 27180 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27181 char path_str[MAXPATHLEN]; 27182 27183 (void) snprintf(panic_str, sizeof (panic_str), 27184 "Reservation Conflict\nDisk: %s", 27185 ddi_pathname(SD_DEVINFO(un), path_str)); 27186 27187 panic(panic_str); 27188 } 27189 27190 /* 27191 * Note: The following sd_faultinjection_ioctl( ) routines implement 27192 * driver support for handling fault injection for error analysis 27193 * causing faults in multiple layers of the driver. 27194 * 27195 */ 27196 27197 #ifdef SD_FAULT_INJECTION 27198 static uint_t sd_fault_injection_on = 0; 27199 27200 /* 27201 * Function: sd_faultinjection_ioctl() 27202 * 27203 * Description: This routine is the driver entry point for handling 27204 * faultinjection ioctls to inject errors into the 27205 * layer model 27206 * 27207 * Arguments: cmd - the ioctl cmd received 27208 * arg - the arguments from user and returns 27209 */ 27210 27211 static void 27212 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27213 27214 uint_t i; 27215 uint_t rval; 27216 27217 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27218 27219 mutex_enter(SD_MUTEX(un)); 27220 27221 switch (cmd) { 27222 case SDIOCRUN: 27223 /* Allow pushed faults to be injected */ 27224 SD_INFO(SD_LOG_SDTEST, un, 27225 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27226 27227 sd_fault_injection_on = 1; 27228 27229 SD_INFO(SD_LOG_IOERR, un, 27230 "sd_faultinjection_ioctl: run finished\n"); 27231 break; 27232 27233 case SDIOCSTART: 27234 /* Start Injection Session */ 27235 SD_INFO(SD_LOG_SDTEST, un, 27236 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27237 27238 sd_fault_injection_on = 0; 27239 un->sd_injection_mask = 0xFFFFFFFF; 27240 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27241 un->sd_fi_fifo_pkt[i] = NULL; 27242 un->sd_fi_fifo_xb[i] = NULL; 27243 un->sd_fi_fifo_un[i] = NULL; 27244 un->sd_fi_fifo_arq[i] = NULL; 27245 } 27246 un->sd_fi_fifo_start = 0; 27247 un->sd_fi_fifo_end = 0; 27248 27249 mutex_enter(&(un->un_fi_mutex)); 27250 un->sd_fi_log[0] = '\0'; 27251 un->sd_fi_buf_len = 0; 27252 mutex_exit(&(un->un_fi_mutex)); 27253 27254 SD_INFO(SD_LOG_IOERR, un, 27255 "sd_faultinjection_ioctl: start finished\n"); 27256 break; 27257 27258 case SDIOCSTOP: 27259 /* Stop Injection Session */ 27260 SD_INFO(SD_LOG_SDTEST, un, 27261 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27262 sd_fault_injection_on = 0; 27263 un->sd_injection_mask = 0x0; 27264 27265 /* Empty stray or unuseds structs from fifo */ 27266 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27267 if (un->sd_fi_fifo_pkt[i] != NULL) { 27268 kmem_free(un->sd_fi_fifo_pkt[i], 27269 sizeof (struct sd_fi_pkt)); 27270 } 27271 if (un->sd_fi_fifo_xb[i] != NULL) { 27272 kmem_free(un->sd_fi_fifo_xb[i], 27273 sizeof (struct sd_fi_xb)); 27274 } 27275 if (un->sd_fi_fifo_un[i] != NULL) { 27276 kmem_free(un->sd_fi_fifo_un[i], 27277 sizeof (struct sd_fi_un)); 27278 } 27279 if (un->sd_fi_fifo_arq[i] != NULL) { 27280 kmem_free(un->sd_fi_fifo_arq[i], 27281 sizeof (struct sd_fi_arq)); 27282 } 27283 un->sd_fi_fifo_pkt[i] = NULL; 27284 un->sd_fi_fifo_un[i] = NULL; 27285 un->sd_fi_fifo_xb[i] = NULL; 27286 un->sd_fi_fifo_arq[i] = NULL; 27287 } 27288 un->sd_fi_fifo_start = 0; 27289 un->sd_fi_fifo_end = 0; 27290 27291 SD_INFO(SD_LOG_IOERR, un, 27292 "sd_faultinjection_ioctl: stop finished\n"); 27293 break; 27294 27295 case SDIOCINSERTPKT: 27296 /* Store a packet struct to be pushed onto fifo */ 27297 SD_INFO(SD_LOG_SDTEST, un, 27298 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27299 27300 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27301 27302 sd_fault_injection_on = 0; 27303 27304 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27305 if (un->sd_fi_fifo_pkt[i] != NULL) { 27306 kmem_free(un->sd_fi_fifo_pkt[i], 27307 sizeof (struct sd_fi_pkt)); 27308 } 27309 if (arg != NULL) { 27310 un->sd_fi_fifo_pkt[i] = 27311 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27312 if (un->sd_fi_fifo_pkt[i] == NULL) { 27313 /* Alloc failed don't store anything */ 27314 break; 27315 } 27316 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27317 sizeof (struct sd_fi_pkt), 0); 27318 if (rval == -1) { 27319 kmem_free(un->sd_fi_fifo_pkt[i], 27320 sizeof (struct sd_fi_pkt)); 27321 un->sd_fi_fifo_pkt[i] = NULL; 27322 } 27323 } else { 27324 SD_INFO(SD_LOG_IOERR, un, 27325 "sd_faultinjection_ioctl: pkt null\n"); 27326 } 27327 break; 27328 27329 case SDIOCINSERTXB: 27330 /* Store a xb struct to be pushed onto fifo */ 27331 SD_INFO(SD_LOG_SDTEST, un, 27332 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27333 27334 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27335 27336 sd_fault_injection_on = 0; 27337 27338 if (un->sd_fi_fifo_xb[i] != NULL) { 27339 kmem_free(un->sd_fi_fifo_xb[i], 27340 sizeof (struct sd_fi_xb)); 27341 un->sd_fi_fifo_xb[i] = NULL; 27342 } 27343 if (arg != NULL) { 27344 un->sd_fi_fifo_xb[i] = 27345 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27346 if (un->sd_fi_fifo_xb[i] == NULL) { 27347 /* Alloc failed don't store anything */ 27348 break; 27349 } 27350 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27351 sizeof (struct sd_fi_xb), 0); 27352 27353 if (rval == -1) { 27354 kmem_free(un->sd_fi_fifo_xb[i], 27355 sizeof (struct sd_fi_xb)); 27356 un->sd_fi_fifo_xb[i] = NULL; 27357 } 27358 } else { 27359 SD_INFO(SD_LOG_IOERR, un, 27360 "sd_faultinjection_ioctl: xb null\n"); 27361 } 27362 break; 27363 27364 case SDIOCINSERTUN: 27365 /* Store a un struct to be pushed onto fifo */ 27366 SD_INFO(SD_LOG_SDTEST, un, 27367 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27368 27369 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27370 27371 sd_fault_injection_on = 0; 27372 27373 if (un->sd_fi_fifo_un[i] != NULL) { 27374 kmem_free(un->sd_fi_fifo_un[i], 27375 sizeof (struct sd_fi_un)); 27376 un->sd_fi_fifo_un[i] = NULL; 27377 } 27378 if (arg != NULL) { 27379 un->sd_fi_fifo_un[i] = 27380 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27381 if (un->sd_fi_fifo_un[i] == NULL) { 27382 /* Alloc failed don't store anything */ 27383 break; 27384 } 27385 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27386 sizeof (struct sd_fi_un), 0); 27387 if (rval == -1) { 27388 kmem_free(un->sd_fi_fifo_un[i], 27389 sizeof (struct sd_fi_un)); 27390 un->sd_fi_fifo_un[i] = NULL; 27391 } 27392 27393 } else { 27394 SD_INFO(SD_LOG_IOERR, un, 27395 "sd_faultinjection_ioctl: un null\n"); 27396 } 27397 27398 break; 27399 27400 case SDIOCINSERTARQ: 27401 /* Store a arq struct to be pushed onto fifo */ 27402 SD_INFO(SD_LOG_SDTEST, un, 27403 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27404 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27405 27406 sd_fault_injection_on = 0; 27407 27408 if (un->sd_fi_fifo_arq[i] != NULL) { 27409 kmem_free(un->sd_fi_fifo_arq[i], 27410 sizeof (struct sd_fi_arq)); 27411 un->sd_fi_fifo_arq[i] = NULL; 27412 } 27413 if (arg != NULL) { 27414 un->sd_fi_fifo_arq[i] = 27415 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27416 if (un->sd_fi_fifo_arq[i] == NULL) { 27417 /* Alloc failed don't store anything */ 27418 break; 27419 } 27420 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27421 sizeof (struct sd_fi_arq), 0); 27422 if (rval == -1) { 27423 kmem_free(un->sd_fi_fifo_arq[i], 27424 sizeof (struct sd_fi_arq)); 27425 un->sd_fi_fifo_arq[i] = NULL; 27426 } 27427 27428 } else { 27429 SD_INFO(SD_LOG_IOERR, un, 27430 "sd_faultinjection_ioctl: arq null\n"); 27431 } 27432 27433 break; 27434 27435 case SDIOCPUSH: 27436 /* Push stored xb, pkt, un, and arq onto fifo */ 27437 sd_fault_injection_on = 0; 27438 27439 if (arg != NULL) { 27440 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27441 if (rval != -1 && 27442 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27443 un->sd_fi_fifo_end += i; 27444 } 27445 } else { 27446 SD_INFO(SD_LOG_IOERR, un, 27447 "sd_faultinjection_ioctl: push arg null\n"); 27448 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27449 un->sd_fi_fifo_end++; 27450 } 27451 } 27452 SD_INFO(SD_LOG_IOERR, un, 27453 "sd_faultinjection_ioctl: push to end=%d\n", 27454 un->sd_fi_fifo_end); 27455 break; 27456 27457 case SDIOCRETRIEVE: 27458 /* Return buffer of log from Injection session */ 27459 SD_INFO(SD_LOG_SDTEST, un, 27460 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27461 27462 sd_fault_injection_on = 0; 27463 27464 mutex_enter(&(un->un_fi_mutex)); 27465 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27466 un->sd_fi_buf_len+1, 0); 27467 mutex_exit(&(un->un_fi_mutex)); 27468 27469 if (rval == -1) { 27470 /* 27471 * arg is possibly invalid setting 27472 * it to NULL for return 27473 */ 27474 arg = NULL; 27475 } 27476 break; 27477 } 27478 27479 mutex_exit(SD_MUTEX(un)); 27480 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27481 " exit\n"); 27482 } 27483 27484 27485 /* 27486 * Function: sd_injection_log() 27487 * 27488 * Description: This routine adds buff to the already existing injection log 27489 * for retrieval via faultinjection_ioctl for use in fault 27490 * detection and recovery 27491 * 27492 * Arguments: buf - the string to add to the log 27493 */ 27494 27495 static void 27496 sd_injection_log(char *buf, struct sd_lun *un) 27497 { 27498 uint_t len; 27499 27500 ASSERT(un != NULL); 27501 ASSERT(buf != NULL); 27502 27503 mutex_enter(&(un->un_fi_mutex)); 27504 27505 len = min(strlen(buf), 255); 27506 /* Add logged value to Injection log to be returned later */ 27507 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27508 uint_t offset = strlen((char *)un->sd_fi_log); 27509 char *destp = (char *)un->sd_fi_log + offset; 27510 int i; 27511 for (i = 0; i < len; i++) { 27512 *destp++ = *buf++; 27513 } 27514 un->sd_fi_buf_len += len; 27515 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27516 } 27517 27518 mutex_exit(&(un->un_fi_mutex)); 27519 } 27520 27521 27522 /* 27523 * Function: sd_faultinjection() 27524 * 27525 * Description: This routine takes the pkt and changes its 27526 * content based on error injection scenerio. 27527 * 27528 * Arguments: pktp - packet to be changed 27529 */ 27530 27531 static void 27532 sd_faultinjection(struct scsi_pkt *pktp) 27533 { 27534 uint_t i; 27535 struct sd_fi_pkt *fi_pkt; 27536 struct sd_fi_xb *fi_xb; 27537 struct sd_fi_un *fi_un; 27538 struct sd_fi_arq *fi_arq; 27539 struct buf *bp; 27540 struct sd_xbuf *xb; 27541 struct sd_lun *un; 27542 27543 ASSERT(pktp != NULL); 27544 27545 /* pull bp xb and un from pktp */ 27546 bp = (struct buf *)pktp->pkt_private; 27547 xb = SD_GET_XBUF(bp); 27548 un = SD_GET_UN(bp); 27549 27550 ASSERT(un != NULL); 27551 27552 mutex_enter(SD_MUTEX(un)); 27553 27554 SD_TRACE(SD_LOG_SDTEST, un, 27555 "sd_faultinjection: entry Injection from sdintr\n"); 27556 27557 /* if injection is off return */ 27558 if (sd_fault_injection_on == 0 || 27559 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27560 mutex_exit(SD_MUTEX(un)); 27561 return; 27562 } 27563 27564 27565 /* take next set off fifo */ 27566 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27567 27568 fi_pkt = un->sd_fi_fifo_pkt[i]; 27569 fi_xb = un->sd_fi_fifo_xb[i]; 27570 fi_un = un->sd_fi_fifo_un[i]; 27571 fi_arq = un->sd_fi_fifo_arq[i]; 27572 27573 27574 /* set variables accordingly */ 27575 /* set pkt if it was on fifo */ 27576 if (fi_pkt != NULL) { 27577 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27578 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27579 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27580 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27581 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27582 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27583 27584 } 27585 27586 /* set xb if it was on fifo */ 27587 if (fi_xb != NULL) { 27588 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27589 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27590 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27591 SD_CONDSET(xb, xb, xb_victim_retry_count, 27592 "xb_victim_retry_count"); 27593 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27594 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27595 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27596 27597 /* copy in block data from sense */ 27598 if (fi_xb->xb_sense_data[0] != -1) { 27599 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27600 SENSE_LENGTH); 27601 } 27602 27603 /* copy in extended sense codes */ 27604 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27605 "es_code"); 27606 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27607 "es_key"); 27608 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27609 "es_add_code"); 27610 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27611 es_qual_code, "es_qual_code"); 27612 } 27613 27614 /* set un if it was on fifo */ 27615 if (fi_un != NULL) { 27616 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27617 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27618 SD_CONDSET(un, un, un_reset_retry_count, 27619 "un_reset_retry_count"); 27620 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27621 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27622 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27623 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27624 "un_f_allow_bus_device_reset"); 27625 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27626 27627 } 27628 27629 /* copy in auto request sense if it was on fifo */ 27630 if (fi_arq != NULL) { 27631 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27632 } 27633 27634 /* free structs */ 27635 if (un->sd_fi_fifo_pkt[i] != NULL) { 27636 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27637 } 27638 if (un->sd_fi_fifo_xb[i] != NULL) { 27639 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27640 } 27641 if (un->sd_fi_fifo_un[i] != NULL) { 27642 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27643 } 27644 if (un->sd_fi_fifo_arq[i] != NULL) { 27645 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27646 } 27647 27648 /* 27649 * kmem_free does not gurantee to set to NULL 27650 * since we uses these to determine if we set 27651 * values or not lets confirm they are always 27652 * NULL after free 27653 */ 27654 un->sd_fi_fifo_pkt[i] = NULL; 27655 un->sd_fi_fifo_un[i] = NULL; 27656 un->sd_fi_fifo_xb[i] = NULL; 27657 un->sd_fi_fifo_arq[i] = NULL; 27658 27659 un->sd_fi_fifo_start++; 27660 27661 mutex_exit(SD_MUTEX(un)); 27662 27663 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27664 } 27665 27666 #endif /* SD_FAULT_INJECTION */ 27667 27668 /* 27669 * This routine is invoked in sd_unit_attach(). Before calling it, the 27670 * properties in conf file should be processed already, and "hotpluggable" 27671 * property was processed also. 27672 * 27673 * The sd driver distinguishes 3 different type of devices: removable media, 27674 * non-removable media, and hotpluggable. Below the differences are defined: 27675 * 27676 * 1. Device ID 27677 * 27678 * The device ID of a device is used to identify this device. Refer to 27679 * ddi_devid_register(9F). 27680 * 27681 * For a non-removable media disk device which can provide 0x80 or 0x83 27682 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27683 * device ID is created to identify this device. For other non-removable 27684 * media devices, a default device ID is created only if this device has 27685 * at least 2 alter cylinders. Otherwise, this device has no devid. 27686 * 27687 * ------------------------------------------------------- 27688 * removable media hotpluggable | Can Have Device ID 27689 * ------------------------------------------------------- 27690 * false false | Yes 27691 * false true | Yes 27692 * true x | No 27693 * ------------------------------------------------------ 27694 * 27695 * 27696 * 2. SCSI group 4 commands 27697 * 27698 * In SCSI specs, only some commands in group 4 command set can use 27699 * 8-byte addresses that can be used to access >2TB storage spaces. 27700 * Other commands have no such capability. Without supporting group4, 27701 * it is impossible to make full use of storage spaces of a disk with 27702 * capacity larger than 2TB. 27703 * 27704 * ----------------------------------------------- 27705 * removable media hotpluggable LP64 | Group 27706 * ----------------------------------------------- 27707 * false false false | 1 27708 * false false true | 4 27709 * false true false | 1 27710 * false true true | 4 27711 * true x x | 5 27712 * ----------------------------------------------- 27713 * 27714 * 27715 * 3. Check for VTOC Label 27716 * 27717 * If a direct-access disk has no EFI label, sd will check if it has a 27718 * valid VTOC label. Now, sd also does that check for removable media 27719 * and hotpluggable devices. 27720 * 27721 * -------------------------------------------------------------- 27722 * Direct-Access removable media hotpluggable | Check Label 27723 * ------------------------------------------------------------- 27724 * false false false | No 27725 * false false true | No 27726 * false true false | Yes 27727 * false true true | Yes 27728 * true x x | Yes 27729 * -------------------------------------------------------------- 27730 * 27731 * 27732 * 4. Building default VTOC label 27733 * 27734 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27735 * If those devices have no valid VTOC label, sd(7d) will attempt to 27736 * create default VTOC for them. Currently sd creates default VTOC label 27737 * for all devices on x86 platform (VTOC_16), but only for removable 27738 * media devices on SPARC (VTOC_8). 27739 * 27740 * ----------------------------------------------------------- 27741 * removable media hotpluggable platform | Default Label 27742 * ----------------------------------------------------------- 27743 * false false sparc | No 27744 * false true x86 | Yes 27745 * false true sparc | Yes 27746 * true x x | Yes 27747 * ---------------------------------------------------------- 27748 * 27749 * 27750 * 5. Supported blocksizes of target devices 27751 * 27752 * Sd supports non-512-byte blocksize for removable media devices only. 27753 * For other devices, only 512-byte blocksize is supported. This may be 27754 * changed in near future because some RAID devices require non-512-byte 27755 * blocksize 27756 * 27757 * ----------------------------------------------------------- 27758 * removable media hotpluggable | non-512-byte blocksize 27759 * ----------------------------------------------------------- 27760 * false false | No 27761 * false true | No 27762 * true x | Yes 27763 * ----------------------------------------------------------- 27764 * 27765 * 27766 * 6. Automatic mount & unmount 27767 * 27768 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27769 * if a device is removable media device. It return 1 for removable media 27770 * devices, and 0 for others. 27771 * 27772 * The automatic mounting subsystem should distinguish between the types 27773 * of devices and apply automounting policies to each. 27774 * 27775 * 27776 * 7. fdisk partition management 27777 * 27778 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27779 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27780 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27781 * fdisk partitions on both x86 and SPARC platform. 27782 * 27783 * ----------------------------------------------------------- 27784 * platform removable media USB/1394 | fdisk supported 27785 * ----------------------------------------------------------- 27786 * x86 X X | true 27787 * ------------------------------------------------------------ 27788 * sparc X X | false 27789 * ------------------------------------------------------------ 27790 * 27791 * 27792 * 8. MBOOT/MBR 27793 * 27794 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27795 * read/write mboot for removable media devices on sparc platform. 27796 * 27797 * ----------------------------------------------------------- 27798 * platform removable media USB/1394 | mboot supported 27799 * ----------------------------------------------------------- 27800 * x86 X X | true 27801 * ------------------------------------------------------------ 27802 * sparc false false | false 27803 * sparc false true | true 27804 * sparc true false | true 27805 * sparc true true | true 27806 * ------------------------------------------------------------ 27807 * 27808 * 27809 * 9. error handling during opening device 27810 * 27811 * If failed to open a disk device, an errno is returned. For some kinds 27812 * of errors, different errno is returned depending on if this device is 27813 * a removable media device. This brings USB/1394 hard disks in line with 27814 * expected hard disk behavior. It is not expected that this breaks any 27815 * application. 27816 * 27817 * ------------------------------------------------------ 27818 * removable media hotpluggable | errno 27819 * ------------------------------------------------------ 27820 * false false | EIO 27821 * false true | EIO 27822 * true x | ENXIO 27823 * ------------------------------------------------------ 27824 * 27825 * 27826 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27827 * 27828 * These IOCTLs are applicable only to removable media devices. 27829 * 27830 * ----------------------------------------------------------- 27831 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27832 * ----------------------------------------------------------- 27833 * false false | No 27834 * false true | No 27835 * true x | Yes 27836 * ----------------------------------------------------------- 27837 * 27838 * 27839 * 12. Kstats for partitions 27840 * 27841 * sd creates partition kstat for non-removable media devices. USB and 27842 * Firewire hard disks now have partition kstats 27843 * 27844 * ------------------------------------------------------ 27845 * removable media hotpluggable | kstat 27846 * ------------------------------------------------------ 27847 * false false | Yes 27848 * false true | Yes 27849 * true x | No 27850 * ------------------------------------------------------ 27851 * 27852 * 27853 * 13. Removable media & hotpluggable properties 27854 * 27855 * Sd driver creates a "removable-media" property for removable media 27856 * devices. Parent nexus drivers create a "hotpluggable" property if 27857 * it supports hotplugging. 27858 * 27859 * --------------------------------------------------------------------- 27860 * removable media hotpluggable | "removable-media" " hotpluggable" 27861 * --------------------------------------------------------------------- 27862 * false false | No No 27863 * false true | No Yes 27864 * true false | Yes No 27865 * true true | Yes Yes 27866 * --------------------------------------------------------------------- 27867 * 27868 * 27869 * 14. Power Management 27870 * 27871 * sd only power manages removable media devices or devices that support 27872 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27873 * 27874 * A parent nexus that supports hotplugging can also set "pm-capable" 27875 * if the disk can be power managed. 27876 * 27877 * ------------------------------------------------------------ 27878 * removable media hotpluggable pm-capable | power manage 27879 * ------------------------------------------------------------ 27880 * false false false | No 27881 * false false true | Yes 27882 * false true false | No 27883 * false true true | Yes 27884 * true x x | Yes 27885 * ------------------------------------------------------------ 27886 * 27887 * USB and firewire hard disks can now be power managed independently 27888 * of the framebuffer 27889 * 27890 * 27891 * 15. Support for USB disks with capacity larger than 1TB 27892 * 27893 * Currently, sd doesn't permit a fixed disk device with capacity 27894 * larger than 1TB to be used in a 32-bit operating system environment. 27895 * However, sd doesn't do that for removable media devices. Instead, it 27896 * assumes that removable media devices cannot have a capacity larger 27897 * than 1TB. Therefore, using those devices on 32-bit system is partially 27898 * supported, which can cause some unexpected results. 27899 * 27900 * --------------------------------------------------------------------- 27901 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27902 * --------------------------------------------------------------------- 27903 * false false | true | no 27904 * false true | true | no 27905 * true false | true | Yes 27906 * true true | true | Yes 27907 * --------------------------------------------------------------------- 27908 * 27909 * 27910 * 16. Check write-protection at open time 27911 * 27912 * When a removable media device is being opened for writing without NDELAY 27913 * flag, sd will check if this device is writable. If attempting to open 27914 * without NDELAY flag a write-protected device, this operation will abort. 27915 * 27916 * ------------------------------------------------------------ 27917 * removable media USB/1394 | WP Check 27918 * ------------------------------------------------------------ 27919 * false false | No 27920 * false true | No 27921 * true false | Yes 27922 * true true | Yes 27923 * ------------------------------------------------------------ 27924 * 27925 * 27926 * 17. syslog when corrupted VTOC is encountered 27927 * 27928 * Currently, if an invalid VTOC is encountered, sd only print syslog 27929 * for fixed SCSI disks. 27930 * ------------------------------------------------------------ 27931 * removable media USB/1394 | print syslog 27932 * ------------------------------------------------------------ 27933 * false false | Yes 27934 * false true | No 27935 * true false | No 27936 * true true | No 27937 * ------------------------------------------------------------ 27938 */ 27939 static void 27940 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27941 { 27942 int pm_capable_prop; 27943 27944 ASSERT(un->un_sd); 27945 ASSERT(un->un_sd->sd_inq); 27946 27947 /* 27948 * Enable SYNC CACHE support for all devices. 27949 */ 27950 un->un_f_sync_cache_supported = TRUE; 27951 27952 if (un->un_sd->sd_inq->inq_rmb) { 27953 /* 27954 * The media of this device is removable. And for this kind 27955 * of devices, it is possible to change medium after opening 27956 * devices. Thus we should support this operation. 27957 */ 27958 un->un_f_has_removable_media = TRUE; 27959 27960 /* 27961 * support non-512-byte blocksize of removable media devices 27962 */ 27963 un->un_f_non_devbsize_supported = TRUE; 27964 27965 /* 27966 * Assume that all removable media devices support DOOR_LOCK 27967 */ 27968 un->un_f_doorlock_supported = TRUE; 27969 27970 /* 27971 * For a removable media device, it is possible to be opened 27972 * with NDELAY flag when there is no media in drive, in this 27973 * case we don't care if device is writable. But if without 27974 * NDELAY flag, we need to check if media is write-protected. 27975 */ 27976 un->un_f_chk_wp_open = TRUE; 27977 27978 /* 27979 * need to start a SCSI watch thread to monitor media state, 27980 * when media is being inserted or ejected, notify syseventd. 27981 */ 27982 un->un_f_monitor_media_state = TRUE; 27983 27984 /* 27985 * Some devices don't support START_STOP_UNIT command. 27986 * Therefore, we'd better check if a device supports it 27987 * before sending it. 27988 */ 27989 un->un_f_check_start_stop = TRUE; 27990 27991 /* 27992 * support eject media ioctl: 27993 * FDEJECT, DKIOCEJECT, CDROMEJECT 27994 */ 27995 un->un_f_eject_media_supported = TRUE; 27996 27997 /* 27998 * Because many removable-media devices don't support 27999 * LOG_SENSE, we couldn't use this command to check if 28000 * a removable media device support power-management. 28001 * We assume that they support power-management via 28002 * START_STOP_UNIT command and can be spun up and down 28003 * without limitations. 28004 */ 28005 un->un_f_pm_supported = TRUE; 28006 28007 /* 28008 * Need to create a zero length (Boolean) property 28009 * removable-media for the removable media devices. 28010 * Note that the return value of the property is not being 28011 * checked, since if unable to create the property 28012 * then do not want the attach to fail altogether. Consistent 28013 * with other property creation in attach. 28014 */ 28015 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28016 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28017 28018 } else { 28019 /* 28020 * create device ID for device 28021 */ 28022 un->un_f_devid_supported = TRUE; 28023 28024 /* 28025 * Spin up non-removable-media devices once it is attached 28026 */ 28027 un->un_f_attach_spinup = TRUE; 28028 28029 /* 28030 * According to SCSI specification, Sense data has two kinds of 28031 * format: fixed format, and descriptor format. At present, we 28032 * don't support descriptor format sense data for removable 28033 * media. 28034 */ 28035 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28036 un->un_f_descr_format_supported = TRUE; 28037 } 28038 28039 /* 28040 * kstats are created only for non-removable media devices. 28041 * 28042 * Set this in sd.conf to 0 in order to disable kstats. The 28043 * default is 1, so they are enabled by default. 28044 */ 28045 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28046 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28047 "enable-partition-kstats", 1)); 28048 28049 /* 28050 * Check if HBA has set the "pm-capable" property. 28051 * If "pm-capable" exists and is non-zero then we can 28052 * power manage the device without checking the start/stop 28053 * cycle count log sense page. 28054 * 28055 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28056 * then we should not power manage the device. 28057 * 28058 * If "pm-capable" doesn't exist then pm_capable_prop will 28059 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28060 * sd will check the start/stop cycle count log sense page 28061 * and power manage the device if the cycle count limit has 28062 * not been exceeded. 28063 */ 28064 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28065 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28066 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28067 un->un_f_log_sense_supported = TRUE; 28068 } else { 28069 /* 28070 * pm-capable property exists. 28071 * 28072 * Convert "TRUE" values for pm_capable_prop to 28073 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28074 * later. "TRUE" values are any values except 28075 * SD_PM_CAPABLE_FALSE (0) and 28076 * SD_PM_CAPABLE_UNDEFINED (-1) 28077 */ 28078 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28079 un->un_f_log_sense_supported = FALSE; 28080 } else { 28081 un->un_f_pm_supported = TRUE; 28082 } 28083 28084 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28085 "sd_unit_attach: un:0x%p pm-capable " 28086 "property set to %d.\n", un, un->un_f_pm_supported); 28087 } 28088 } 28089 28090 if (un->un_f_is_hotpluggable) { 28091 28092 /* 28093 * Have to watch hotpluggable devices as well, since 28094 * that's the only way for userland applications to 28095 * detect hot removal while device is busy/mounted. 28096 */ 28097 un->un_f_monitor_media_state = TRUE; 28098 28099 un->un_f_check_start_stop = TRUE; 28100 28101 } 28102 } 28103 28104 /* 28105 * sd_tg_rdwr: 28106 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28107 * in sys block size, req_length in bytes. 28108 * 28109 */ 28110 static int 28111 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28112 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28113 { 28114 struct sd_lun *un; 28115 int path_flag = (int)(uintptr_t)tg_cookie; 28116 char *dkl = NULL; 28117 diskaddr_t real_addr = start_block; 28118 diskaddr_t first_byte, end_block; 28119 28120 size_t buffer_size = reqlength; 28121 int rval; 28122 diskaddr_t cap; 28123 uint32_t lbasize; 28124 28125 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28126 if (un == NULL) 28127 return (ENXIO); 28128 28129 if (cmd != TG_READ && cmd != TG_WRITE) 28130 return (EINVAL); 28131 28132 mutex_enter(SD_MUTEX(un)); 28133 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28134 mutex_exit(SD_MUTEX(un)); 28135 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28136 &lbasize, path_flag); 28137 if (rval != 0) 28138 return (rval); 28139 mutex_enter(SD_MUTEX(un)); 28140 sd_update_block_info(un, lbasize, cap); 28141 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28142 mutex_exit(SD_MUTEX(un)); 28143 return (EIO); 28144 } 28145 } 28146 28147 if (NOT_DEVBSIZE(un)) { 28148 /* 28149 * sys_blocksize != tgt_blocksize, need to re-adjust 28150 * blkno and save the index to beginning of dk_label 28151 */ 28152 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28153 real_addr = first_byte / un->un_tgt_blocksize; 28154 28155 end_block = (first_byte + reqlength + 28156 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28157 28158 /* round up buffer size to multiple of target block size */ 28159 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28160 28161 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28162 "label_addr: 0x%x allocation size: 0x%x\n", 28163 real_addr, buffer_size); 28164 28165 if (((first_byte % un->un_tgt_blocksize) != 0) || 28166 (reqlength % un->un_tgt_blocksize) != 0) 28167 /* the request is not aligned */ 28168 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28169 } 28170 28171 /* 28172 * The MMC standard allows READ CAPACITY to be 28173 * inaccurate by a bounded amount (in the interest of 28174 * response latency). As a result, failed READs are 28175 * commonplace (due to the reading of metadata and not 28176 * data). Depending on the per-Vendor/drive Sense data, 28177 * the failed READ can cause many (unnecessary) retries. 28178 */ 28179 28180 if (ISCD(un) && (cmd == TG_READ) && 28181 (un->un_f_blockcount_is_valid == TRUE) && 28182 ((start_block == (un->un_blockcount - 1))|| 28183 (start_block == (un->un_blockcount - 2)))) { 28184 path_flag = SD_PATH_DIRECT_PRIORITY; 28185 } 28186 28187 mutex_exit(SD_MUTEX(un)); 28188 if (cmd == TG_READ) { 28189 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28190 buffer_size, real_addr, path_flag); 28191 if (dkl != NULL) 28192 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28193 real_addr), bufaddr, reqlength); 28194 } else { 28195 if (dkl) { 28196 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28197 real_addr, path_flag); 28198 if (rval) { 28199 kmem_free(dkl, buffer_size); 28200 return (rval); 28201 } 28202 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28203 real_addr), reqlength); 28204 } 28205 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28206 buffer_size, real_addr, path_flag); 28207 } 28208 28209 if (dkl != NULL) 28210 kmem_free(dkl, buffer_size); 28211 28212 return (rval); 28213 } 28214 28215 28216 static int 28217 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28218 { 28219 28220 struct sd_lun *un; 28221 diskaddr_t cap; 28222 uint32_t lbasize; 28223 int path_flag = (int)(uintptr_t)tg_cookie; 28224 int ret = 0; 28225 28226 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28227 if (un == NULL) 28228 return (ENXIO); 28229 28230 switch (cmd) { 28231 case TG_GETPHYGEOM: 28232 case TG_GETVIRTGEOM: 28233 case TG_GETCAPACITY: 28234 case TG_GETBLOCKSIZE: 28235 mutex_enter(SD_MUTEX(un)); 28236 28237 if ((un->un_f_blockcount_is_valid == TRUE) && 28238 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28239 cap = un->un_blockcount; 28240 lbasize = un->un_tgt_blocksize; 28241 mutex_exit(SD_MUTEX(un)); 28242 } else { 28243 mutex_exit(SD_MUTEX(un)); 28244 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28245 &lbasize, path_flag); 28246 if (ret != 0) 28247 return (ret); 28248 mutex_enter(SD_MUTEX(un)); 28249 sd_update_block_info(un, lbasize, cap); 28250 if ((un->un_f_blockcount_is_valid == FALSE) || 28251 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28252 mutex_exit(SD_MUTEX(un)); 28253 return (EIO); 28254 } 28255 mutex_exit(SD_MUTEX(un)); 28256 } 28257 28258 if (cmd == TG_GETCAPACITY) { 28259 *(diskaddr_t *)arg = cap; 28260 return (0); 28261 } 28262 28263 if (cmd == TG_GETBLOCKSIZE) { 28264 *(uint32_t *)arg = lbasize; 28265 return (0); 28266 } 28267 28268 if (cmd == TG_GETPHYGEOM) 28269 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28270 cap, lbasize, path_flag); 28271 else 28272 /* TG_GETVIRTGEOM */ 28273 ret = sd_get_virtual_geometry(un, 28274 (cmlb_geom_t *)arg, cap, lbasize); 28275 28276 return (ret); 28277 28278 case TG_GETATTR: 28279 mutex_enter(SD_MUTEX(un)); 28280 ((tg_attribute_t *)arg)->media_is_writable = 28281 un->un_f_mmc_writable_media; 28282 mutex_exit(SD_MUTEX(un)); 28283 return (0); 28284 default: 28285 return (ENOTTY); 28286 28287 } 28288 28289 } 28290