1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 166 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 167 #define sd_check_media_time ssd_check_media_time 168 #define sd_wait_cmds_complete ssd_wait_cmds_complete 169 #define sd_label_mutex ssd_label_mutex 170 #define sd_detach_mutex ssd_detach_mutex 171 #define sd_log_buf ssd_log_buf 172 #define sd_log_mutex ssd_log_mutex 173 174 #define sd_disk_table ssd_disk_table 175 #define sd_disk_table_size ssd_disk_table_size 176 #define sd_sense_mutex ssd_sense_mutex 177 #define sd_cdbtab ssd_cdbtab 178 179 #define sd_cb_ops ssd_cb_ops 180 #define sd_ops ssd_ops 181 #define sd_additional_codes ssd_additional_codes 182 183 #define sd_minor_data ssd_minor_data 184 #define sd_minor_data_efi ssd_minor_data_efi 185 186 #define sd_tq ssd_tq 187 #define sd_wmr_tq ssd_wmr_tq 188 #define sd_taskq_name ssd_taskq_name 189 #define sd_wmr_taskq_name ssd_wmr_taskq_name 190 #define sd_taskq_minalloc ssd_taskq_minalloc 191 #define sd_taskq_maxalloc ssd_taskq_maxalloc 192 193 #define sd_dump_format_string ssd_dump_format_string 194 195 #define sd_iostart_chain ssd_iostart_chain 196 #define sd_iodone_chain ssd_iodone_chain 197 198 #define sd_pm_idletime ssd_pm_idletime 199 200 #define sd_force_pm_supported ssd_force_pm_supported 201 202 #define sd_dtype_optical_bind ssd_dtype_optical_bind 203 204 #endif 205 206 207 #ifdef SDDEBUG 208 int sd_force_pm_supported = 0; 209 #endif /* SDDEBUG */ 210 211 void *sd_state = NULL; 212 int sd_io_time = SD_IO_TIME; 213 int sd_failfast_enable = 1; 214 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 215 int sd_report_pfa = 1; 216 int sd_max_throttle = SD_MAX_THROTTLE; 217 int sd_min_throttle = SD_MIN_THROTTLE; 218 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 219 int sd_qfull_throttle_enable = TRUE; 220 221 int sd_retry_on_reservation_conflict = 1; 222 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 223 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 224 225 static int sd_dtype_optical_bind = -1; 226 227 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 228 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 229 230 /* 231 * Global data for debug logging. To enable debug printing, sd_component_mask 232 * and sd_level_mask should be set to the desired bit patterns as outlined in 233 * sddef.h. 234 */ 235 uint_t sd_component_mask = 0x0; 236 uint_t sd_level_mask = 0x0; 237 struct sd_lun *sd_debug_un = NULL; 238 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 239 240 /* Note: these may go away in the future... */ 241 static uint32_t sd_xbuf_active_limit = 512; 242 static uint32_t sd_xbuf_reserve_limit = 16; 243 244 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 245 246 /* 247 * Timer value used to reset the throttle after it has been reduced 248 * (typically in response to TRAN_BUSY or STATUS_QFULL) 249 */ 250 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 251 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 252 253 /* 254 * Interval value associated with the media change scsi watch. 255 */ 256 static int sd_check_media_time = 3000000; 257 258 /* 259 * Wait value used for in progress operations during a DDI_SUSPEND 260 */ 261 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 262 263 /* 264 * sd_label_mutex protects a static buffer used in the disk label 265 * component of the driver 266 */ 267 static kmutex_t sd_label_mutex; 268 269 /* 270 * sd_detach_mutex protects un_layer_count, un_detach_count, and 271 * un_opens_in_progress in the sd_lun structure. 272 */ 273 static kmutex_t sd_detach_mutex; 274 275 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 276 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 277 278 /* 279 * Global buffer and mutex for debug logging 280 */ 281 static char sd_log_buf[1024]; 282 static kmutex_t sd_log_mutex; 283 284 285 /* 286 * "Smart" Probe Caching structs, globals, #defines, etc. 287 * For parallel scsi and non-self-identify device only. 288 */ 289 290 /* 291 * The following resources and routines are implemented to support 292 * "smart" probing, which caches the scsi_probe() results in an array, 293 * in order to help avoid long probe times. 294 */ 295 struct sd_scsi_probe_cache { 296 struct sd_scsi_probe_cache *next; 297 dev_info_t *pdip; 298 int cache[NTARGETS_WIDE]; 299 }; 300 301 static kmutex_t sd_scsi_probe_cache_mutex; 302 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 303 304 /* 305 * Really we only need protection on the head of the linked list, but 306 * better safe than sorry. 307 */ 308 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 309 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 312 sd_scsi_probe_cache_head)) 313 314 315 /* 316 * Vendor specific data name property declarations 317 */ 318 319 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 320 321 static sd_tunables seagate_properties = { 322 SEAGATE_THROTTLE_VALUE, 323 0, 324 0, 325 0, 326 0, 327 0, 328 0, 329 0, 330 0 331 }; 332 333 334 static sd_tunables fujitsu_properties = { 335 FUJITSU_THROTTLE_VALUE, 336 0, 337 0, 338 0, 339 0, 340 0, 341 0, 342 0, 343 0 344 }; 345 346 static sd_tunables ibm_properties = { 347 IBM_THROTTLE_VALUE, 348 0, 349 0, 350 0, 351 0, 352 0, 353 0, 354 0, 355 0 356 }; 357 358 static sd_tunables purple_properties = { 359 PURPLE_THROTTLE_VALUE, 360 0, 361 0, 362 PURPLE_BUSY_RETRIES, 363 PURPLE_RESET_RETRY_COUNT, 364 PURPLE_RESERVE_RELEASE_TIME, 365 0, 366 0, 367 0 368 }; 369 370 static sd_tunables sve_properties = { 371 SVE_THROTTLE_VALUE, 372 0, 373 0, 374 SVE_BUSY_RETRIES, 375 SVE_RESET_RETRY_COUNT, 376 SVE_RESERVE_RELEASE_TIME, 377 SVE_MIN_THROTTLE_VALUE, 378 SVE_DISKSORT_DISABLED_FLAG, 379 0 380 }; 381 382 static sd_tunables maserati_properties = { 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0, 390 MASERATI_DISKSORT_DISABLED_FLAG, 391 MASERATI_LUN_RESET_ENABLED_FLAG 392 }; 393 394 static sd_tunables pirus_properties = { 395 PIRUS_THROTTLE_VALUE, 396 0, 397 PIRUS_NRR_COUNT, 398 PIRUS_BUSY_RETRIES, 399 PIRUS_RESET_RETRY_COUNT, 400 0, 401 PIRUS_MIN_THROTTLE_VALUE, 402 PIRUS_DISKSORT_DISABLED_FLAG, 403 PIRUS_LUN_RESET_ENABLED_FLAG 404 }; 405 406 #endif 407 408 #if (defined(__sparc) && !defined(__fibre)) || \ 409 (defined(__i386) || defined(__amd64)) 410 411 412 static sd_tunables elite_properties = { 413 ELITE_THROTTLE_VALUE, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0 422 }; 423 424 static sd_tunables st31200n_properties = { 425 ST31200N_THROTTLE_VALUE, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 0 434 }; 435 436 #endif /* Fibre or not */ 437 438 static sd_tunables lsi_properties_scsi = { 439 LSI_THROTTLE_VALUE, 440 0, 441 LSI_NOTREADY_RETRIES, 442 0, 443 0, 444 0, 445 0, 446 0, 447 0 448 }; 449 450 static sd_tunables symbios_properties = { 451 SYMBIOS_THROTTLE_VALUE, 452 0, 453 SYMBIOS_NOTREADY_RETRIES, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0 460 }; 461 462 static sd_tunables lsi_properties = { 463 0, 464 0, 465 LSI_NOTREADY_RETRIES, 466 0, 467 0, 468 0, 469 0, 470 0, 471 0 472 }; 473 474 static sd_tunables lsi_oem_properties = { 475 0, 476 0, 477 LSI_OEM_NOTREADY_RETRIES, 478 0, 479 0, 480 0, 481 0, 482 0, 483 0 484 }; 485 486 487 488 #if (defined(SD_PROP_TST)) 489 490 #define SD_TST_CTYPE_VAL CTYPE_CDROM 491 #define SD_TST_THROTTLE_VAL 16 492 #define SD_TST_NOTREADY_VAL 12 493 #define SD_TST_BUSY_VAL 60 494 #define SD_TST_RST_RETRY_VAL 36 495 #define SD_TST_RSV_REL_TIME 60 496 497 static sd_tunables tst_properties = { 498 SD_TST_THROTTLE_VAL, 499 SD_TST_CTYPE_VAL, 500 SD_TST_NOTREADY_VAL, 501 SD_TST_BUSY_VAL, 502 SD_TST_RST_RETRY_VAL, 503 SD_TST_RSV_REL_TIME, 504 0, 505 0, 506 0 507 }; 508 #endif 509 510 /* This is similiar to the ANSI toupper implementation */ 511 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 512 513 /* 514 * Static Driver Configuration Table 515 * 516 * This is the table of disks which need throttle adjustment (or, perhaps 517 * something else as defined by the flags at a future time.) device_id 518 * is a string consisting of concatenated vid (vendor), pid (product/model) 519 * and revision strings as defined in the scsi_inquiry structure. Offsets of 520 * the parts of the string are as defined by the sizes in the scsi_inquiry 521 * structure. Device type is searched as far as the device_id string is 522 * defined. Flags defines which values are to be set in the driver from the 523 * properties list. 524 * 525 * Entries below which begin and end with a "*" are a special case. 526 * These do not have a specific vendor, and the string which follows 527 * can appear anywhere in the 16 byte PID portion of the inquiry data. 528 * 529 * Entries below which begin and end with a " " (blank) are a special 530 * case. The comparison function will treat multiple consecutive blanks 531 * as equivalent to a single blank. For example, this causes a 532 * sd_disk_table entry of " NEC CDROM " to match a device's id string 533 * of "NEC CDROM". 534 * 535 * Note: The MD21 controller type has been obsoleted. 536 * ST318202F is a Legacy device 537 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 538 * made with an FC connection. The entries here are a legacy. 539 */ 540 static sd_disk_config_t sd_disk_table[] = { 541 #if defined(__fibre) || defined(__i386) || defined(__amd64) 542 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 550 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 551 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 552 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 553 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 554 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 555 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 556 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 559 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 560 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 561 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 562 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 563 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 564 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 565 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 566 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 567 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 568 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 569 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 576 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 577 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 578 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 579 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 580 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 581 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 582 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 583 { "SUN T3", SD_CONF_BSET_THROTTLE | 584 SD_CONF_BSET_BSY_RETRY_COUNT| 585 SD_CONF_BSET_RST_RETRIES| 586 SD_CONF_BSET_RSV_REL_TIME, 587 &purple_properties }, 588 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 589 SD_CONF_BSET_BSY_RETRY_COUNT| 590 SD_CONF_BSET_RST_RETRIES| 591 SD_CONF_BSET_RSV_REL_TIME| 592 SD_CONF_BSET_MIN_THROTTLE| 593 SD_CONF_BSET_DISKSORT_DISABLED, 594 &sve_properties }, 595 { "SUN T4", SD_CONF_BSET_THROTTLE | 596 SD_CONF_BSET_BSY_RETRY_COUNT| 597 SD_CONF_BSET_RST_RETRIES| 598 SD_CONF_BSET_RSV_REL_TIME, 599 &purple_properties }, 600 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 601 SD_CONF_BSET_LUN_RESET_ENABLED, 602 &maserati_properties }, 603 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 604 SD_CONF_BSET_NRR_COUNT| 605 SD_CONF_BSET_BSY_RETRY_COUNT| 606 SD_CONF_BSET_RST_RETRIES| 607 SD_CONF_BSET_MIN_THROTTLE| 608 SD_CONF_BSET_DISKSORT_DISABLED| 609 SD_CONF_BSET_LUN_RESET_ENABLED, 610 &pirus_properties }, 611 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 612 SD_CONF_BSET_NRR_COUNT| 613 SD_CONF_BSET_BSY_RETRY_COUNT| 614 SD_CONF_BSET_RST_RETRIES| 615 SD_CONF_BSET_MIN_THROTTLE| 616 SD_CONF_BSET_DISKSORT_DISABLED| 617 SD_CONF_BSET_LUN_RESET_ENABLED, 618 &pirus_properties }, 619 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 620 SD_CONF_BSET_NRR_COUNT| 621 SD_CONF_BSET_BSY_RETRY_COUNT| 622 SD_CONF_BSET_RST_RETRIES| 623 SD_CONF_BSET_MIN_THROTTLE| 624 SD_CONF_BSET_DISKSORT_DISABLED| 625 SD_CONF_BSET_LUN_RESET_ENABLED, 626 &pirus_properties }, 627 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_NRR_COUNT| 629 SD_CONF_BSET_BSY_RETRY_COUNT| 630 SD_CONF_BSET_RST_RETRIES| 631 SD_CONF_BSET_MIN_THROTTLE| 632 SD_CONF_BSET_DISKSORT_DISABLED| 633 SD_CONF_BSET_LUN_RESET_ENABLED, 634 &pirus_properties }, 635 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 636 SD_CONF_BSET_NRR_COUNT| 637 SD_CONF_BSET_BSY_RETRY_COUNT| 638 SD_CONF_BSET_RST_RETRIES| 639 SD_CONF_BSET_MIN_THROTTLE| 640 SD_CONF_BSET_DISKSORT_DISABLED| 641 SD_CONF_BSET_LUN_RESET_ENABLED, 642 &pirus_properties }, 643 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 644 SD_CONF_BSET_NRR_COUNT| 645 SD_CONF_BSET_BSY_RETRY_COUNT| 646 SD_CONF_BSET_RST_RETRIES| 647 SD_CONF_BSET_MIN_THROTTLE| 648 SD_CONF_BSET_DISKSORT_DISABLED| 649 SD_CONF_BSET_LUN_RESET_ENABLED, 650 &pirus_properties }, 651 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 656 #endif /* fibre or NON-sparc platforms */ 657 #if ((defined(__sparc) && !defined(__fibre)) ||\ 658 (defined(__i386) || defined(__amd64))) 659 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 660 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 661 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 662 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 663 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 664 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 665 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 666 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 667 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 668 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 669 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 670 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 671 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 672 &symbios_properties }, 673 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 674 &lsi_properties_scsi }, 675 #if defined(__i386) || defined(__amd64) 676 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 677 | SD_CONF_BSET_READSUB_BCD 678 | SD_CONF_BSET_READ_TOC_ADDR_BCD 679 | SD_CONF_BSET_NO_READ_HEADER 680 | SD_CONF_BSET_READ_CD_XD4), NULL }, 681 682 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 683 | SD_CONF_BSET_READSUB_BCD 684 | SD_CONF_BSET_READ_TOC_ADDR_BCD 685 | SD_CONF_BSET_NO_READ_HEADER 686 | SD_CONF_BSET_READ_CD_XD4), NULL }, 687 #endif /* __i386 || __amd64 */ 688 #endif /* sparc NON-fibre or NON-sparc platforms */ 689 690 #if (defined(SD_PROP_TST)) 691 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 692 | SD_CONF_BSET_CTYPE 693 | SD_CONF_BSET_NRR_COUNT 694 | SD_CONF_BSET_FAB_DEVID 695 | SD_CONF_BSET_NOCACHE 696 | SD_CONF_BSET_BSY_RETRY_COUNT 697 | SD_CONF_BSET_PLAYMSF_BCD 698 | SD_CONF_BSET_READSUB_BCD 699 | SD_CONF_BSET_READ_TOC_TRK_BCD 700 | SD_CONF_BSET_READ_TOC_ADDR_BCD 701 | SD_CONF_BSET_NO_READ_HEADER 702 | SD_CONF_BSET_READ_CD_XD4 703 | SD_CONF_BSET_RST_RETRIES 704 | SD_CONF_BSET_RSV_REL_TIME 705 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 706 #endif 707 }; 708 709 static const int sd_disk_table_size = 710 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 711 712 713 /* 714 * Return codes of sd_uselabel(). 715 */ 716 #define SD_LABEL_IS_VALID 0 717 #define SD_LABEL_IS_INVALID 1 718 719 #define SD_INTERCONNECT_PARALLEL 0 720 #define SD_INTERCONNECT_FABRIC 1 721 #define SD_INTERCONNECT_FIBRE 2 722 #define SD_INTERCONNECT_SSA 3 723 #define SD_IS_PARALLEL_SCSI(un) \ 724 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 725 726 /* 727 * Definitions used by device id registration routines 728 */ 729 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 730 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 731 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 732 #define WD_NODE 7 /* the whole disk minor */ 733 734 static kmutex_t sd_sense_mutex = {0}; 735 736 /* 737 * Macros for updates of the driver state 738 */ 739 #define New_state(un, s) \ 740 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 741 #define Restore_state(un) \ 742 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 743 744 static struct sd_cdbinfo sd_cdbtab[] = { 745 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 746 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 747 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 748 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 749 }; 750 751 /* 752 * Specifies the number of seconds that must have elapsed since the last 753 * cmd. has completed for a device to be declared idle to the PM framework. 754 */ 755 static int sd_pm_idletime = 1; 756 757 /* 758 * Internal function prototypes 759 */ 760 761 #if (defined(__fibre)) 762 /* 763 * These #defines are to avoid namespace collisions that occur because this 764 * code is currently used to compile two seperate driver modules: sd and ssd. 765 * All function names need to be treated this way (even if declared static) 766 * in order to allow the debugger to resolve the names properly. 767 * It is anticipated that in the near future the ssd module will be obsoleted, 768 * at which time this ugliness should go away. 769 */ 770 #define sd_log_trace ssd_log_trace 771 #define sd_log_info ssd_log_info 772 #define sd_log_err ssd_log_err 773 #define sdprobe ssdprobe 774 #define sdinfo ssdinfo 775 #define sd_prop_op ssd_prop_op 776 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 777 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 778 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 779 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 780 #define sd_spin_up_unit ssd_spin_up_unit 781 #define sd_enable_descr_sense ssd_enable_descr_sense 782 #define sd_set_mmc_caps ssd_set_mmc_caps 783 #define sd_read_unit_properties ssd_read_unit_properties 784 #define sd_process_sdconf_file ssd_process_sdconf_file 785 #define sd_process_sdconf_table ssd_process_sdconf_table 786 #define sd_sdconf_id_match ssd_sdconf_id_match 787 #define sd_blank_cmp ssd_blank_cmp 788 #define sd_chk_vers1_data ssd_chk_vers1_data 789 #define sd_set_vers1_properties ssd_set_vers1_properties 790 #define sd_validate_geometry ssd_validate_geometry 791 792 #if defined(_SUNOS_VTOC_16) 793 #define sd_convert_geometry ssd_convert_geometry 794 #endif 795 796 #define sd_resync_geom_caches ssd_resync_geom_caches 797 #define sd_read_fdisk ssd_read_fdisk 798 #define sd_get_physical_geometry ssd_get_physical_geometry 799 #define sd_get_virtual_geometry ssd_get_virtual_geometry 800 #define sd_update_block_info ssd_update_block_info 801 #define sd_swap_efi_gpt ssd_swap_efi_gpt 802 #define sd_swap_efi_gpe ssd_swap_efi_gpe 803 #define sd_validate_efi ssd_validate_efi 804 #define sd_use_efi ssd_use_efi 805 #define sd_uselabel ssd_uselabel 806 #define sd_build_default_label ssd_build_default_label 807 #define sd_has_max_chs_vals ssd_has_max_chs_vals 808 #define sd_inq_fill ssd_inq_fill 809 #define sd_register_devid ssd_register_devid 810 #define sd_get_devid_block ssd_get_devid_block 811 #define sd_get_devid ssd_get_devid 812 #define sd_create_devid ssd_create_devid 813 #define sd_write_deviceid ssd_write_deviceid 814 #define sd_check_vpd_page_support ssd_check_vpd_page_support 815 #define sd_setup_pm ssd_setup_pm 816 #define sd_create_pm_components ssd_create_pm_components 817 #define sd_ddi_suspend ssd_ddi_suspend 818 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 819 #define sd_ddi_resume ssd_ddi_resume 820 #define sd_ddi_pm_resume ssd_ddi_pm_resume 821 #define sdpower ssdpower 822 #define sdattach ssdattach 823 #define sddetach ssddetach 824 #define sd_unit_attach ssd_unit_attach 825 #define sd_unit_detach ssd_unit_detach 826 #define sd_create_minor_nodes ssd_create_minor_nodes 827 #define sd_create_errstats ssd_create_errstats 828 #define sd_set_errstats ssd_set_errstats 829 #define sd_set_pstats ssd_set_pstats 830 #define sddump ssddump 831 #define sd_scsi_poll ssd_scsi_poll 832 #define sd_send_polled_RQS ssd_send_polled_RQS 833 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 834 #define sd_init_event_callbacks ssd_init_event_callbacks 835 #define sd_event_callback ssd_event_callback 836 #define sd_disable_caching ssd_disable_caching 837 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 838 #define sd_make_device ssd_make_device 839 #define sdopen ssdopen 840 #define sdclose ssdclose 841 #define sd_ready_and_valid ssd_ready_and_valid 842 #define sdmin ssdmin 843 #define sdread ssdread 844 #define sdwrite ssdwrite 845 #define sdaread ssdaread 846 #define sdawrite ssdawrite 847 #define sdstrategy ssdstrategy 848 #define sdioctl ssdioctl 849 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 850 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 851 #define sd_checksum_iostart ssd_checksum_iostart 852 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 853 #define sd_pm_iostart ssd_pm_iostart 854 #define sd_core_iostart ssd_core_iostart 855 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 856 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 857 #define sd_checksum_iodone ssd_checksum_iodone 858 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 859 #define sd_pm_iodone ssd_pm_iodone 860 #define sd_initpkt_for_buf ssd_initpkt_for_buf 861 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 862 #define sd_setup_rw_pkt ssd_setup_rw_pkt 863 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 864 #define sd_buf_iodone ssd_buf_iodone 865 #define sd_uscsi_strategy ssd_uscsi_strategy 866 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 867 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 868 #define sd_uscsi_iodone ssd_uscsi_iodone 869 #define sd_xbuf_strategy ssd_xbuf_strategy 870 #define sd_xbuf_init ssd_xbuf_init 871 #define sd_pm_entry ssd_pm_entry 872 #define sd_pm_exit ssd_pm_exit 873 874 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 875 #define sd_pm_timeout_handler ssd_pm_timeout_handler 876 877 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 878 #define sdintr ssdintr 879 #define sd_start_cmds ssd_start_cmds 880 #define sd_send_scsi_cmd ssd_send_scsi_cmd 881 #define sd_bioclone_alloc ssd_bioclone_alloc 882 #define sd_bioclone_free ssd_bioclone_free 883 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 884 #define sd_shadow_buf_free ssd_shadow_buf_free 885 #define sd_print_transport_rejected_message \ 886 ssd_print_transport_rejected_message 887 #define sd_retry_command ssd_retry_command 888 #define sd_set_retry_bp ssd_set_retry_bp 889 #define sd_send_request_sense_command ssd_send_request_sense_command 890 #define sd_start_retry_command ssd_start_retry_command 891 #define sd_start_direct_priority_command \ 892 ssd_start_direct_priority_command 893 #define sd_return_failed_command ssd_return_failed_command 894 #define sd_return_failed_command_no_restart \ 895 ssd_return_failed_command_no_restart 896 #define sd_return_command ssd_return_command 897 #define sd_sync_with_callback ssd_sync_with_callback 898 #define sdrunout ssdrunout 899 #define sd_mark_rqs_busy ssd_mark_rqs_busy 900 #define sd_mark_rqs_idle ssd_mark_rqs_idle 901 #define sd_reduce_throttle ssd_reduce_throttle 902 #define sd_restore_throttle ssd_restore_throttle 903 #define sd_print_incomplete_msg ssd_print_incomplete_msg 904 #define sd_init_cdb_limits ssd_init_cdb_limits 905 #define sd_pkt_status_good ssd_pkt_status_good 906 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 907 #define sd_pkt_status_busy ssd_pkt_status_busy 908 #define sd_pkt_status_reservation_conflict \ 909 ssd_pkt_status_reservation_conflict 910 #define sd_pkt_status_qfull ssd_pkt_status_qfull 911 #define sd_handle_request_sense ssd_handle_request_sense 912 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 913 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 914 #define sd_validate_sense_data ssd_validate_sense_data 915 #define sd_decode_sense ssd_decode_sense 916 #define sd_print_sense_msg ssd_print_sense_msg 917 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 918 #define sd_sense_key_no_sense ssd_sense_key_no_sense 919 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 920 #define sd_sense_key_not_ready ssd_sense_key_not_ready 921 #define sd_sense_key_medium_or_hardware_error \ 922 ssd_sense_key_medium_or_hardware_error 923 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 924 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 925 #define sd_sense_key_fail_command ssd_sense_key_fail_command 926 #define sd_sense_key_blank_check ssd_sense_key_blank_check 927 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 928 #define sd_sense_key_default ssd_sense_key_default 929 #define sd_print_retry_msg ssd_print_retry_msg 930 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 931 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 932 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 933 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 934 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 935 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 936 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 937 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 938 #define sd_pkt_reason_default ssd_pkt_reason_default 939 #define sd_reset_target ssd_reset_target 940 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 941 #define sd_start_stop_unit_task ssd_start_stop_unit_task 942 #define sd_taskq_create ssd_taskq_create 943 #define sd_taskq_delete ssd_taskq_delete 944 #define sd_media_change_task ssd_media_change_task 945 #define sd_handle_mchange ssd_handle_mchange 946 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 947 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 948 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 949 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 950 #define sd_send_scsi_feature_GET_CONFIGURATION \ 951 sd_send_scsi_feature_GET_CONFIGURATION 952 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 953 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 954 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 955 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 956 ssd_send_scsi_PERSISTENT_RESERVE_IN 957 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 958 ssd_send_scsi_PERSISTENT_RESERVE_OUT 959 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 960 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 961 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 962 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 963 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 964 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 965 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 966 #define sd_alloc_rqs ssd_alloc_rqs 967 #define sd_free_rqs ssd_free_rqs 968 #define sd_dump_memory ssd_dump_memory 969 #define sd_uscsi_ioctl ssd_uscsi_ioctl 970 #define sd_get_media_info ssd_get_media_info 971 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 972 #define sd_dkio_get_geometry ssd_dkio_get_geometry 973 #define sd_dkio_set_geometry ssd_dkio_set_geometry 974 #define sd_dkio_get_partition ssd_dkio_get_partition 975 #define sd_dkio_set_partition ssd_dkio_set_partition 976 #define sd_dkio_partition ssd_dkio_partition 977 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 978 #define sd_dkio_get_efi ssd_dkio_get_efi 979 #define sd_build_user_vtoc ssd_build_user_vtoc 980 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 981 #define sd_dkio_set_efi ssd_dkio_set_efi 982 #define sd_build_label_vtoc ssd_build_label_vtoc 983 #define sd_write_label ssd_write_label 984 #define sd_clear_vtoc ssd_clear_vtoc 985 #define sd_clear_efi ssd_clear_efi 986 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 987 #define sd_setup_next_xfer ssd_setup_next_xfer 988 #define sd_dkio_get_temp ssd_dkio_get_temp 989 #define sd_dkio_get_mboot ssd_dkio_get_mboot 990 #define sd_dkio_set_mboot ssd_dkio_set_mboot 991 #define sd_setup_default_geometry ssd_setup_default_geometry 992 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 993 #define sd_check_mhd ssd_check_mhd 994 #define sd_mhd_watch_cb ssd_mhd_watch_cb 995 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 996 #define sd_sname ssd_sname 997 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 998 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 999 #define sd_take_ownership ssd_take_ownership 1000 #define sd_reserve_release ssd_reserve_release 1001 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1002 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1003 #define sd_persistent_reservation_in_read_keys \ 1004 ssd_persistent_reservation_in_read_keys 1005 #define sd_persistent_reservation_in_read_resv \ 1006 ssd_persistent_reservation_in_read_resv 1007 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1008 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1009 #define sd_mhdioc_release ssd_mhdioc_release 1010 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1011 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1012 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1013 #define sr_change_blkmode ssr_change_blkmode 1014 #define sr_change_speed ssr_change_speed 1015 #define sr_atapi_change_speed ssr_atapi_change_speed 1016 #define sr_pause_resume ssr_pause_resume 1017 #define sr_play_msf ssr_play_msf 1018 #define sr_play_trkind ssr_play_trkind 1019 #define sr_read_all_subcodes ssr_read_all_subcodes 1020 #define sr_read_subchannel ssr_read_subchannel 1021 #define sr_read_tocentry ssr_read_tocentry 1022 #define sr_read_tochdr ssr_read_tochdr 1023 #define sr_read_cdda ssr_read_cdda 1024 #define sr_read_cdxa ssr_read_cdxa 1025 #define sr_read_mode1 ssr_read_mode1 1026 #define sr_read_mode2 ssr_read_mode2 1027 #define sr_read_cd_mode2 ssr_read_cd_mode2 1028 #define sr_sector_mode ssr_sector_mode 1029 #define sr_eject ssr_eject 1030 #define sr_ejected ssr_ejected 1031 #define sr_check_wp ssr_check_wp 1032 #define sd_check_media ssd_check_media 1033 #define sd_media_watch_cb ssd_media_watch_cb 1034 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1035 #define sr_volume_ctrl ssr_volume_ctrl 1036 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1037 #define sd_log_page_supported ssd_log_page_supported 1038 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1039 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1040 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1041 #define sd_range_lock ssd_range_lock 1042 #define sd_get_range ssd_get_range 1043 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1044 #define sd_range_unlock ssd_range_unlock 1045 #define sd_read_modify_write_task ssd_read_modify_write_task 1046 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1047 1048 #define sd_iostart_chain ssd_iostart_chain 1049 #define sd_iodone_chain ssd_iodone_chain 1050 #define sd_initpkt_map ssd_initpkt_map 1051 #define sd_destroypkt_map ssd_destroypkt_map 1052 #define sd_chain_type_map ssd_chain_type_map 1053 #define sd_chain_index_map ssd_chain_index_map 1054 1055 #define sd_failfast_flushctl ssd_failfast_flushctl 1056 #define sd_failfast_flushq ssd_failfast_flushq 1057 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1058 1059 #define sd_is_lsi ssd_is_lsi 1060 1061 #endif /* #if (defined(__fibre)) */ 1062 1063 1064 int _init(void); 1065 int _fini(void); 1066 int _info(struct modinfo *modinfop); 1067 1068 /*PRINTFLIKE3*/ 1069 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1070 /*PRINTFLIKE3*/ 1071 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1072 /*PRINTFLIKE3*/ 1073 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1074 1075 static int sdprobe(dev_info_t *devi); 1076 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1077 void **result); 1078 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1079 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1080 1081 /* 1082 * Smart probe for parallel scsi 1083 */ 1084 static void sd_scsi_probe_cache_init(void); 1085 static void sd_scsi_probe_cache_fini(void); 1086 static void sd_scsi_clear_probe_cache(void); 1087 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1088 1089 static int sd_spin_up_unit(struct sd_lun *un); 1090 #ifdef _LP64 1091 static void sd_enable_descr_sense(struct sd_lun *un); 1092 #endif /* _LP64 */ 1093 static void sd_set_mmc_caps(struct sd_lun *un); 1094 1095 static void sd_read_unit_properties(struct sd_lun *un); 1096 static int sd_process_sdconf_file(struct sd_lun *un); 1097 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1098 int *data_list, sd_tunables *values); 1099 static void sd_process_sdconf_table(struct sd_lun *un); 1100 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1101 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1102 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1103 int list_len, char *dataname_ptr); 1104 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1105 sd_tunables *prop_list); 1106 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1107 1108 #if defined(_SUNOS_VTOC_16) 1109 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1110 #endif 1111 1112 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1113 int path_flag); 1114 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1115 int path_flag); 1116 static void sd_get_physical_geometry(struct sd_lun *un, 1117 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1118 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1119 int lbasize); 1120 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1121 static void sd_swap_efi_gpt(efi_gpt_t *); 1122 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1123 static int sd_validate_efi(efi_gpt_t *); 1124 static int sd_use_efi(struct sd_lun *, int); 1125 static void sd_build_default_label(struct sd_lun *un); 1126 1127 #if defined(_FIRMWARE_NEEDS_FDISK) 1128 static int sd_has_max_chs_vals(struct ipart *fdp); 1129 #endif 1130 static void sd_inq_fill(char *p, int l, char *s); 1131 1132 1133 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1134 int reservation_flag); 1135 static daddr_t sd_get_devid_block(struct sd_lun *un); 1136 static int sd_get_devid(struct sd_lun *un); 1137 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1138 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1139 static int sd_write_deviceid(struct sd_lun *un); 1140 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1141 static int sd_check_vpd_page_support(struct sd_lun *un); 1142 1143 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1144 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1145 1146 static int sd_ddi_suspend(dev_info_t *devi); 1147 static int sd_ddi_pm_suspend(struct sd_lun *un); 1148 static int sd_ddi_resume(dev_info_t *devi); 1149 static int sd_ddi_pm_resume(struct sd_lun *un); 1150 static int sdpower(dev_info_t *devi, int component, int level); 1151 1152 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1153 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1154 static int sd_unit_attach(dev_info_t *devi); 1155 static int sd_unit_detach(dev_info_t *devi); 1156 1157 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1158 static void sd_create_errstats(struct sd_lun *un, int instance); 1159 static void sd_set_errstats(struct sd_lun *un); 1160 static void sd_set_pstats(struct sd_lun *un); 1161 1162 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1163 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1164 static int sd_send_polled_RQS(struct sd_lun *un); 1165 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1166 1167 #if (defined(__fibre)) 1168 /* 1169 * Event callbacks (photon) 1170 */ 1171 static void sd_init_event_callbacks(struct sd_lun *un); 1172 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1173 #endif 1174 1175 1176 static int sd_disable_caching(struct sd_lun *un); 1177 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1178 static dev_t sd_make_device(dev_info_t *devi); 1179 1180 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1181 uint64_t capacity); 1182 1183 /* 1184 * Driver entry point functions. 1185 */ 1186 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1187 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1188 static int sd_ready_and_valid(struct sd_lun *un); 1189 1190 static void sdmin(struct buf *bp); 1191 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1192 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1193 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1194 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1195 1196 static int sdstrategy(struct buf *bp); 1197 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1198 1199 /* 1200 * Function prototypes for layering functions in the iostart chain. 1201 */ 1202 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1203 struct buf *bp); 1204 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1205 struct buf *bp); 1206 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1210 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1211 1212 /* 1213 * Function prototypes for layering functions in the iodone chain. 1214 */ 1215 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1216 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1217 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1218 struct buf *bp); 1219 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1220 struct buf *bp); 1221 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1225 1226 /* 1227 * Prototypes for functions to support buf(9S) based IO. 1228 */ 1229 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1230 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1231 static void sd_destroypkt_for_buf(struct buf *); 1232 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1233 struct buf *bp, int flags, 1234 int (*callback)(caddr_t), caddr_t callback_arg, 1235 diskaddr_t lba, uint32_t blockcount); 1236 #if defined(__i386) || defined(__amd64) 1237 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1238 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1239 #endif /* defined(__i386) || defined(__amd64) */ 1240 1241 /* 1242 * Prototypes for functions to support USCSI IO. 1243 */ 1244 static int sd_uscsi_strategy(struct buf *bp); 1245 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1246 static void sd_destroypkt_for_uscsi(struct buf *); 1247 1248 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1249 uchar_t chain_type, void *pktinfop); 1250 1251 static int sd_pm_entry(struct sd_lun *un); 1252 static void sd_pm_exit(struct sd_lun *un); 1253 1254 static void sd_pm_idletimeout_handler(void *arg); 1255 1256 /* 1257 * sd_core internal functions (used at the sd_core_io layer). 1258 */ 1259 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1260 static void sdintr(struct scsi_pkt *pktp); 1261 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1262 1263 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1264 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1265 int path_flag); 1266 1267 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1268 daddr_t blkno, int (*func)(struct buf *)); 1269 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1270 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1271 static void sd_bioclone_free(struct buf *bp); 1272 static void sd_shadow_buf_free(struct buf *bp); 1273 1274 static void sd_print_transport_rejected_message(struct sd_lun *un, 1275 struct sd_xbuf *xp, int code); 1276 1277 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1278 int retry_check_flag, 1279 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1280 int c), 1281 void *user_arg, int failure_code, clock_t retry_delay, 1282 void (*statp)(kstat_io_t *)); 1283 1284 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1285 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1286 1287 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1288 struct scsi_pkt *pktp); 1289 static void sd_start_retry_command(void *arg); 1290 static void sd_start_direct_priority_command(void *arg); 1291 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1292 int errcode); 1293 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1294 struct buf *bp, int errcode); 1295 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1296 static void sd_sync_with_callback(struct sd_lun *un); 1297 static int sdrunout(caddr_t arg); 1298 1299 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1300 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1301 1302 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1303 static void sd_restore_throttle(void *arg); 1304 1305 static void sd_init_cdb_limits(struct sd_lun *un); 1306 1307 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1308 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1309 1310 /* 1311 * Error handling functions 1312 */ 1313 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1314 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1318 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 1322 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1323 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1324 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1325 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1326 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp); 1328 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 1331 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1332 void *arg, int code); 1333 static diskaddr_t sd_extract_sense_info_descr( 1334 struct scsi_descr_sense_hdr *sdsp); 1335 1336 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1339 uint8_t asc, 1340 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1341 static void sd_sense_key_not_ready(struct sd_lun *un, 1342 uint8_t asc, uint8_t ascq, 1343 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1345 int sense_key, uint8_t asc, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1348 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1349 static void sd_sense_key_unit_attention(struct sd_lun *un, 1350 uint8_t asc, 1351 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1352 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1353 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1354 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1355 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1357 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_default(struct sd_lun *un, 1359 int sense_key, 1360 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1361 1362 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1363 void *arg, int flag); 1364 1365 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1366 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1368 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1369 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1370 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 1382 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1383 1384 static void sd_start_stop_unit_callback(void *arg); 1385 static void sd_start_stop_unit_task(void *arg); 1386 1387 static void sd_taskq_create(void); 1388 static void sd_taskq_delete(void); 1389 static void sd_media_change_task(void *arg); 1390 1391 static int sd_handle_mchange(struct sd_lun *un); 1392 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1393 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1394 uint32_t *lbap, int path_flag); 1395 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1396 uint32_t *lbap, int path_flag); 1397 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1398 int path_flag); 1399 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1400 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1401 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1402 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1403 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1404 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1405 uchar_t usr_cmd, uchar_t *usr_bufp); 1406 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1407 struct dk_callback *dkc); 1408 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1409 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1410 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1411 uchar_t *bufaddr, uint_t buflen); 1412 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1413 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1414 uchar_t *bufaddr, uint_t buflen, char feature); 1415 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1416 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1417 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1418 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1419 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1420 size_t buflen, daddr_t start_block, int path_flag); 1421 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1422 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1423 path_flag) 1424 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1425 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1426 path_flag) 1427 1428 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1429 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1430 uint16_t param_ptr, int path_flag); 1431 1432 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1433 static void sd_free_rqs(struct sd_lun *un); 1434 1435 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1436 uchar_t *data, int len, int fmt); 1437 1438 /* 1439 * Disk Ioctl Function Prototypes 1440 */ 1441 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1442 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1444 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1445 int geom_validated); 1446 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1447 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1448 int geom_validated); 1449 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1450 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1451 int geom_validated); 1452 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1453 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1454 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1455 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1456 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1457 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1458 static int sd_write_label(dev_t dev); 1459 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1460 static void sd_clear_vtoc(struct sd_lun *un); 1461 static void sd_clear_efi(struct sd_lun *un); 1462 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1463 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1464 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1465 static void sd_setup_default_geometry(struct sd_lun *un); 1466 #if defined(__i386) || defined(__amd64) 1467 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1468 #endif 1469 1470 /* 1471 * Multi-host Ioctl Prototypes 1472 */ 1473 static int sd_check_mhd(dev_t dev, int interval); 1474 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1475 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1476 static char *sd_sname(uchar_t status); 1477 static void sd_mhd_resvd_recover(void *arg); 1478 static void sd_resv_reclaim_thread(); 1479 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1480 static int sd_reserve_release(dev_t dev, int cmd); 1481 static void sd_rmv_resv_reclaim_req(dev_t dev); 1482 static void sd_mhd_reset_notify_cb(caddr_t arg); 1483 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1484 mhioc_inkeys_t *usrp, int flag); 1485 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1486 mhioc_inresvs_t *usrp, int flag); 1487 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1488 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1489 static int sd_mhdioc_release(dev_t dev); 1490 static int sd_mhdioc_register_devid(dev_t dev); 1491 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1492 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1493 1494 /* 1495 * SCSI removable prototypes 1496 */ 1497 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1498 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1499 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1500 static int sr_pause_resume(dev_t dev, int mode); 1501 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1502 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1503 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1504 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1505 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1506 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1507 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1508 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1509 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1510 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1511 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1512 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1513 static int sr_eject(dev_t dev); 1514 static void sr_ejected(register struct sd_lun *un); 1515 static int sr_check_wp(dev_t dev); 1516 static int sd_check_media(dev_t dev, enum dkio_state state); 1517 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_delayed_cv_broadcast(void *arg); 1519 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1520 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1521 1522 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1523 1524 /* 1525 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1526 */ 1527 static void sd_check_for_writable_cd(struct sd_lun *un); 1528 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1529 static void sd_wm_cache_destructor(void *wm, void *un); 1530 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1531 daddr_t endb, ushort_t typ); 1532 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1533 daddr_t endb); 1534 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1535 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1536 static void sd_read_modify_write_task(void * arg); 1537 static int 1538 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1539 struct buf **bpp); 1540 1541 1542 /* 1543 * Function prototypes for failfast support. 1544 */ 1545 static void sd_failfast_flushq(struct sd_lun *un); 1546 static int sd_failfast_flushq_callback(struct buf *bp); 1547 1548 /* 1549 * Function prototypes to check for lsi devices 1550 */ 1551 static void sd_is_lsi(struct sd_lun *un); 1552 1553 /* 1554 * Function prototypes for x86 support 1555 */ 1556 #if defined(__i386) || defined(__amd64) 1557 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1558 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1559 #endif 1560 1561 /* 1562 * Constants for failfast support: 1563 * 1564 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1565 * failfast processing being performed. 1566 * 1567 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1568 * failfast processing on all bufs with B_FAILFAST set. 1569 */ 1570 1571 #define SD_FAILFAST_INACTIVE 0 1572 #define SD_FAILFAST_ACTIVE 1 1573 1574 /* 1575 * Bitmask to control behavior of buf(9S) flushes when a transition to 1576 * the failfast state occurs. Optional bits include: 1577 * 1578 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1579 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1580 * be flushed. 1581 * 1582 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1583 * driver, in addition to the regular wait queue. This includes the xbuf 1584 * queues. When clear, only the driver's wait queue will be flushed. 1585 */ 1586 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1587 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1588 1589 /* 1590 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1591 * to flush all queues within the driver. 1592 */ 1593 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1594 1595 1596 /* 1597 * SD Testing Fault Injection 1598 */ 1599 #ifdef SD_FAULT_INJECTION 1600 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1601 static void sd_faultinjection(struct scsi_pkt *pktp); 1602 static void sd_injection_log(char *buf, struct sd_lun *un); 1603 #endif 1604 1605 /* 1606 * Device driver ops vector 1607 */ 1608 static struct cb_ops sd_cb_ops = { 1609 sdopen, /* open */ 1610 sdclose, /* close */ 1611 sdstrategy, /* strategy */ 1612 nodev, /* print */ 1613 sddump, /* dump */ 1614 sdread, /* read */ 1615 sdwrite, /* write */ 1616 sdioctl, /* ioctl */ 1617 nodev, /* devmap */ 1618 nodev, /* mmap */ 1619 nodev, /* segmap */ 1620 nochpoll, /* poll */ 1621 sd_prop_op, /* cb_prop_op */ 1622 0, /* streamtab */ 1623 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1624 CB_REV, /* cb_rev */ 1625 sdaread, /* async I/O read entry point */ 1626 sdawrite /* async I/O write entry point */ 1627 }; 1628 1629 static struct dev_ops sd_ops = { 1630 DEVO_REV, /* devo_rev, */ 1631 0, /* refcnt */ 1632 sdinfo, /* info */ 1633 nulldev, /* identify */ 1634 sdprobe, /* probe */ 1635 sdattach, /* attach */ 1636 sddetach, /* detach */ 1637 nodev, /* reset */ 1638 &sd_cb_ops, /* driver operations */ 1639 NULL, /* bus operations */ 1640 sdpower /* power */ 1641 }; 1642 1643 1644 /* 1645 * This is the loadable module wrapper. 1646 */ 1647 #include <sys/modctl.h> 1648 1649 static struct modldrv modldrv = { 1650 &mod_driverops, /* Type of module. This one is a driver */ 1651 SD_MODULE_NAME, /* Module name. */ 1652 &sd_ops /* driver ops */ 1653 }; 1654 1655 1656 static struct modlinkage modlinkage = { 1657 MODREV_1, 1658 &modldrv, 1659 NULL 1660 }; 1661 1662 1663 static struct scsi_asq_key_strings sd_additional_codes[] = { 1664 0x81, 0, "Logical Unit is Reserved", 1665 0x85, 0, "Audio Address Not Valid", 1666 0xb6, 0, "Media Load Mechanism Failed", 1667 0xB9, 0, "Audio Play Operation Aborted", 1668 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1669 0x53, 2, "Medium removal prevented", 1670 0x6f, 0, "Authentication failed during key exchange", 1671 0x6f, 1, "Key not present", 1672 0x6f, 2, "Key not established", 1673 0x6f, 3, "Read without proper authentication", 1674 0x6f, 4, "Mismatched region to this logical unit", 1675 0x6f, 5, "Region reset count error", 1676 0xffff, 0x0, NULL 1677 }; 1678 1679 1680 /* 1681 * Struct for passing printing information for sense data messages 1682 */ 1683 struct sd_sense_info { 1684 int ssi_severity; 1685 int ssi_pfa_flag; 1686 }; 1687 1688 /* 1689 * Table of function pointers for iostart-side routines. Seperate "chains" 1690 * of layered function calls are formed by placing the function pointers 1691 * sequentially in the desired order. Functions are called according to an 1692 * incrementing table index ordering. The last function in each chain must 1693 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1694 * in the sd_iodone_chain[] array. 1695 * 1696 * Note: It may seem more natural to organize both the iostart and iodone 1697 * functions together, into an array of structures (or some similar 1698 * organization) with a common index, rather than two seperate arrays which 1699 * must be maintained in synchronization. The purpose of this division is 1700 * to achiece improved performance: individual arrays allows for more 1701 * effective cache line utilization on certain platforms. 1702 */ 1703 1704 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1705 1706 1707 static sd_chain_t sd_iostart_chain[] = { 1708 1709 /* Chain for buf IO for disk drive targets (PM enabled) */ 1710 sd_mapblockaddr_iostart, /* Index: 0 */ 1711 sd_pm_iostart, /* Index: 1 */ 1712 sd_core_iostart, /* Index: 2 */ 1713 1714 /* Chain for buf IO for disk drive targets (PM disabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 3 */ 1716 sd_core_iostart, /* Index: 4 */ 1717 1718 /* Chain for buf IO for removable-media targets (PM enabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 5 */ 1720 sd_mapblocksize_iostart, /* Index: 6 */ 1721 sd_pm_iostart, /* Index: 7 */ 1722 sd_core_iostart, /* Index: 8 */ 1723 1724 /* Chain for buf IO for removable-media targets (PM disabled) */ 1725 sd_mapblockaddr_iostart, /* Index: 9 */ 1726 sd_mapblocksize_iostart, /* Index: 10 */ 1727 sd_core_iostart, /* Index: 11 */ 1728 1729 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1730 sd_mapblockaddr_iostart, /* Index: 12 */ 1731 sd_checksum_iostart, /* Index: 13 */ 1732 sd_pm_iostart, /* Index: 14 */ 1733 sd_core_iostart, /* Index: 15 */ 1734 1735 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1736 sd_mapblockaddr_iostart, /* Index: 16 */ 1737 sd_checksum_iostart, /* Index: 17 */ 1738 sd_core_iostart, /* Index: 18 */ 1739 1740 /* Chain for USCSI commands (all targets) */ 1741 sd_pm_iostart, /* Index: 19 */ 1742 sd_core_iostart, /* Index: 20 */ 1743 1744 /* Chain for checksumming USCSI commands (all targets) */ 1745 sd_checksum_uscsi_iostart, /* Index: 21 */ 1746 sd_pm_iostart, /* Index: 22 */ 1747 sd_core_iostart, /* Index: 23 */ 1748 1749 /* Chain for "direct" USCSI commands (all targets) */ 1750 sd_core_iostart, /* Index: 24 */ 1751 1752 /* Chain for "direct priority" USCSI commands (all targets) */ 1753 sd_core_iostart, /* Index: 25 */ 1754 }; 1755 1756 /* 1757 * Macros to locate the first function of each iostart chain in the 1758 * sd_iostart_chain[] array. These are located by the index in the array. 1759 */ 1760 #define SD_CHAIN_DISK_IOSTART 0 1761 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1762 #define SD_CHAIN_RMMEDIA_IOSTART 5 1763 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1764 #define SD_CHAIN_CHKSUM_IOSTART 12 1765 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1766 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1767 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1768 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1769 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1770 1771 1772 /* 1773 * Table of function pointers for the iodone-side routines for the driver- 1774 * internal layering mechanism. The calling sequence for iodone routines 1775 * uses a decrementing table index, so the last routine called in a chain 1776 * must be at the lowest array index location for that chain. The last 1777 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1778 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1779 * of the functions in an iodone side chain must correspond to the ordering 1780 * of the iostart routines for that chain. Note that there is no iodone 1781 * side routine that corresponds to sd_core_iostart(), so there is no 1782 * entry in the table for this. 1783 */ 1784 1785 static sd_chain_t sd_iodone_chain[] = { 1786 1787 /* Chain for buf IO for disk drive targets (PM enabled) */ 1788 sd_buf_iodone, /* Index: 0 */ 1789 sd_mapblockaddr_iodone, /* Index: 1 */ 1790 sd_pm_iodone, /* Index: 2 */ 1791 1792 /* Chain for buf IO for disk drive targets (PM disabled) */ 1793 sd_buf_iodone, /* Index: 3 */ 1794 sd_mapblockaddr_iodone, /* Index: 4 */ 1795 1796 /* Chain for buf IO for removable-media targets (PM enabled) */ 1797 sd_buf_iodone, /* Index: 5 */ 1798 sd_mapblockaddr_iodone, /* Index: 6 */ 1799 sd_mapblocksize_iodone, /* Index: 7 */ 1800 sd_pm_iodone, /* Index: 8 */ 1801 1802 /* Chain for buf IO for removable-media targets (PM disabled) */ 1803 sd_buf_iodone, /* Index: 9 */ 1804 sd_mapblockaddr_iodone, /* Index: 10 */ 1805 sd_mapblocksize_iodone, /* Index: 11 */ 1806 1807 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1808 sd_buf_iodone, /* Index: 12 */ 1809 sd_mapblockaddr_iodone, /* Index: 13 */ 1810 sd_checksum_iodone, /* Index: 14 */ 1811 sd_pm_iodone, /* Index: 15 */ 1812 1813 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1814 sd_buf_iodone, /* Index: 16 */ 1815 sd_mapblockaddr_iodone, /* Index: 17 */ 1816 sd_checksum_iodone, /* Index: 18 */ 1817 1818 /* Chain for USCSI commands (non-checksum targets) */ 1819 sd_uscsi_iodone, /* Index: 19 */ 1820 sd_pm_iodone, /* Index: 20 */ 1821 1822 /* Chain for USCSI commands (checksum targets) */ 1823 sd_uscsi_iodone, /* Index: 21 */ 1824 sd_checksum_uscsi_iodone, /* Index: 22 */ 1825 sd_pm_iodone, /* Index: 22 */ 1826 1827 /* Chain for "direct" USCSI commands (all targets) */ 1828 sd_uscsi_iodone, /* Index: 24 */ 1829 1830 /* Chain for "direct priority" USCSI commands (all targets) */ 1831 sd_uscsi_iodone, /* Index: 25 */ 1832 }; 1833 1834 1835 /* 1836 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1837 * each iodone-side chain. These are located by the array index, but as the 1838 * iodone side functions are called in a decrementing-index order, the 1839 * highest index number in each chain must be specified (as these correspond 1840 * to the first function in the iodone chain that will be called by the core 1841 * at IO completion time). 1842 */ 1843 1844 #define SD_CHAIN_DISK_IODONE 2 1845 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1846 #define SD_CHAIN_RMMEDIA_IODONE 8 1847 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1848 #define SD_CHAIN_CHKSUM_IODONE 15 1849 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1850 #define SD_CHAIN_USCSI_CMD_IODONE 20 1851 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1852 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1853 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1854 1855 1856 1857 1858 /* 1859 * Array to map a layering chain index to the appropriate initpkt routine. 1860 * The redundant entries are present so that the index used for accessing 1861 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1862 * with this table as well. 1863 */ 1864 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1865 1866 static sd_initpkt_t sd_initpkt_map[] = { 1867 1868 /* Chain for buf IO for disk drive targets (PM enabled) */ 1869 sd_initpkt_for_buf, /* Index: 0 */ 1870 sd_initpkt_for_buf, /* Index: 1 */ 1871 sd_initpkt_for_buf, /* Index: 2 */ 1872 1873 /* Chain for buf IO for disk drive targets (PM disabled) */ 1874 sd_initpkt_for_buf, /* Index: 3 */ 1875 sd_initpkt_for_buf, /* Index: 4 */ 1876 1877 /* Chain for buf IO for removable-media targets (PM enabled) */ 1878 sd_initpkt_for_buf, /* Index: 5 */ 1879 sd_initpkt_for_buf, /* Index: 6 */ 1880 sd_initpkt_for_buf, /* Index: 7 */ 1881 sd_initpkt_for_buf, /* Index: 8 */ 1882 1883 /* Chain for buf IO for removable-media targets (PM disabled) */ 1884 sd_initpkt_for_buf, /* Index: 9 */ 1885 sd_initpkt_for_buf, /* Index: 10 */ 1886 sd_initpkt_for_buf, /* Index: 11 */ 1887 1888 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1889 sd_initpkt_for_buf, /* Index: 12 */ 1890 sd_initpkt_for_buf, /* Index: 13 */ 1891 sd_initpkt_for_buf, /* Index: 14 */ 1892 sd_initpkt_for_buf, /* Index: 15 */ 1893 1894 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1895 sd_initpkt_for_buf, /* Index: 16 */ 1896 sd_initpkt_for_buf, /* Index: 17 */ 1897 sd_initpkt_for_buf, /* Index: 18 */ 1898 1899 /* Chain for USCSI commands (non-checksum targets) */ 1900 sd_initpkt_for_uscsi, /* Index: 19 */ 1901 sd_initpkt_for_uscsi, /* Index: 20 */ 1902 1903 /* Chain for USCSI commands (checksum targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 21 */ 1905 sd_initpkt_for_uscsi, /* Index: 22 */ 1906 sd_initpkt_for_uscsi, /* Index: 22 */ 1907 1908 /* Chain for "direct" USCSI commands (all targets) */ 1909 sd_initpkt_for_uscsi, /* Index: 24 */ 1910 1911 /* Chain for "direct priority" USCSI commands (all targets) */ 1912 sd_initpkt_for_uscsi, /* Index: 25 */ 1913 1914 }; 1915 1916 1917 /* 1918 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1919 * The redundant entries are present so that the index used for accessing 1920 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1921 * with this table as well. 1922 */ 1923 typedef void (*sd_destroypkt_t)(struct buf *); 1924 1925 static sd_destroypkt_t sd_destroypkt_map[] = { 1926 1927 /* Chain for buf IO for disk drive targets (PM enabled) */ 1928 sd_destroypkt_for_buf, /* Index: 0 */ 1929 sd_destroypkt_for_buf, /* Index: 1 */ 1930 sd_destroypkt_for_buf, /* Index: 2 */ 1931 1932 /* Chain for buf IO for disk drive targets (PM disabled) */ 1933 sd_destroypkt_for_buf, /* Index: 3 */ 1934 sd_destroypkt_for_buf, /* Index: 4 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM enabled) */ 1937 sd_destroypkt_for_buf, /* Index: 5 */ 1938 sd_destroypkt_for_buf, /* Index: 6 */ 1939 sd_destroypkt_for_buf, /* Index: 7 */ 1940 sd_destroypkt_for_buf, /* Index: 8 */ 1941 1942 /* Chain for buf IO for removable-media targets (PM disabled) */ 1943 sd_destroypkt_for_buf, /* Index: 9 */ 1944 sd_destroypkt_for_buf, /* Index: 10 */ 1945 sd_destroypkt_for_buf, /* Index: 11 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1948 sd_destroypkt_for_buf, /* Index: 12 */ 1949 sd_destroypkt_for_buf, /* Index: 13 */ 1950 sd_destroypkt_for_buf, /* Index: 14 */ 1951 sd_destroypkt_for_buf, /* Index: 15 */ 1952 1953 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1954 sd_destroypkt_for_buf, /* Index: 16 */ 1955 sd_destroypkt_for_buf, /* Index: 17 */ 1956 sd_destroypkt_for_buf, /* Index: 18 */ 1957 1958 /* Chain for USCSI commands (non-checksum targets) */ 1959 sd_destroypkt_for_uscsi, /* Index: 19 */ 1960 sd_destroypkt_for_uscsi, /* Index: 20 */ 1961 1962 /* Chain for USCSI commands (checksum targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 21 */ 1964 sd_destroypkt_for_uscsi, /* Index: 22 */ 1965 sd_destroypkt_for_uscsi, /* Index: 22 */ 1966 1967 /* Chain for "direct" USCSI commands (all targets) */ 1968 sd_destroypkt_for_uscsi, /* Index: 24 */ 1969 1970 /* Chain for "direct priority" USCSI commands (all targets) */ 1971 sd_destroypkt_for_uscsi, /* Index: 25 */ 1972 1973 }; 1974 1975 1976 1977 /* 1978 * Array to map a layering chain index to the appropriate chain "type". 1979 * The chain type indicates a specific property/usage of the chain. 1980 * The redundant entries are present so that the index used for accessing 1981 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1982 * with this table as well. 1983 */ 1984 1985 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1986 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1987 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1988 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1989 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1990 /* (for error recovery) */ 1991 1992 static int sd_chain_type_map[] = { 1993 1994 /* Chain for buf IO for disk drive targets (PM enabled) */ 1995 SD_CHAIN_BUFIO, /* Index: 0 */ 1996 SD_CHAIN_BUFIO, /* Index: 1 */ 1997 SD_CHAIN_BUFIO, /* Index: 2 */ 1998 1999 /* Chain for buf IO for disk drive targets (PM disabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 3 */ 2001 SD_CHAIN_BUFIO, /* Index: 4 */ 2002 2003 /* Chain for buf IO for removable-media targets (PM enabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 5 */ 2005 SD_CHAIN_BUFIO, /* Index: 6 */ 2006 SD_CHAIN_BUFIO, /* Index: 7 */ 2007 SD_CHAIN_BUFIO, /* Index: 8 */ 2008 2009 /* Chain for buf IO for removable-media targets (PM disabled) */ 2010 SD_CHAIN_BUFIO, /* Index: 9 */ 2011 SD_CHAIN_BUFIO, /* Index: 10 */ 2012 SD_CHAIN_BUFIO, /* Index: 11 */ 2013 2014 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2015 SD_CHAIN_BUFIO, /* Index: 12 */ 2016 SD_CHAIN_BUFIO, /* Index: 13 */ 2017 SD_CHAIN_BUFIO, /* Index: 14 */ 2018 SD_CHAIN_BUFIO, /* Index: 15 */ 2019 2020 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2021 SD_CHAIN_BUFIO, /* Index: 16 */ 2022 SD_CHAIN_BUFIO, /* Index: 17 */ 2023 SD_CHAIN_BUFIO, /* Index: 18 */ 2024 2025 /* Chain for USCSI commands (non-checksum targets) */ 2026 SD_CHAIN_USCSI, /* Index: 19 */ 2027 SD_CHAIN_USCSI, /* Index: 20 */ 2028 2029 /* Chain for USCSI commands (checksum targets) */ 2030 SD_CHAIN_USCSI, /* Index: 21 */ 2031 SD_CHAIN_USCSI, /* Index: 22 */ 2032 SD_CHAIN_USCSI, /* Index: 22 */ 2033 2034 /* Chain for "direct" USCSI commands (all targets) */ 2035 SD_CHAIN_DIRECT, /* Index: 24 */ 2036 2037 /* Chain for "direct priority" USCSI commands (all targets) */ 2038 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2039 }; 2040 2041 2042 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2043 #define SD_IS_BUFIO(xp) \ 2044 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2045 2046 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2047 #define SD_IS_DIRECT_PRIORITY(xp) \ 2048 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2049 2050 2051 2052 /* 2053 * Struct, array, and macros to map a specific chain to the appropriate 2054 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2055 * 2056 * The sd_chain_index_map[] array is used at attach time to set the various 2057 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2058 * chain to be used with the instance. This allows different instances to use 2059 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2060 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2061 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2062 * dynamically & without the use of locking; and (2) a layer may update the 2063 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2064 * to allow for deferred processing of an IO within the same chain from a 2065 * different execution context. 2066 */ 2067 2068 struct sd_chain_index { 2069 int sci_iostart_index; 2070 int sci_iodone_index; 2071 }; 2072 2073 static struct sd_chain_index sd_chain_index_map[] = { 2074 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2075 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2076 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2077 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2078 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2079 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2080 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2081 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2082 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2083 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2084 }; 2085 2086 2087 /* 2088 * The following are indexes into the sd_chain_index_map[] array. 2089 */ 2090 2091 /* un->un_buf_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_DISK 0 2093 #define SD_CHAIN_INFO_DISK_NO_PM 1 2094 #define SD_CHAIN_INFO_RMMEDIA 2 2095 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2096 #define SD_CHAIN_INFO_CHKSUM 4 2097 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2098 2099 /* un->un_uscsi_chain_type must be set to one of these */ 2100 #define SD_CHAIN_INFO_USCSI_CMD 6 2101 /* USCSI with PM disabled is the same as DIRECT */ 2102 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2103 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2104 2105 /* un->un_direct_chain_type must be set to one of these */ 2106 #define SD_CHAIN_INFO_DIRECT_CMD 8 2107 2108 /* un->un_priority_chain_type must be set to one of these */ 2109 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2110 2111 /* size for devid inquiries */ 2112 #define MAX_INQUIRY_SIZE 0xF0 2113 2114 /* 2115 * Macros used by functions to pass a given buf(9S) struct along to the 2116 * next function in the layering chain for further processing. 2117 * 2118 * In the following macros, passing more than three arguments to the called 2119 * routines causes the optimizer for the SPARC compiler to stop doing tail 2120 * call elimination which results in significant performance degradation. 2121 */ 2122 #define SD_BEGIN_IOSTART(index, un, bp) \ 2123 ((*(sd_iostart_chain[index]))(index, un, bp)) 2124 2125 #define SD_BEGIN_IODONE(index, un, bp) \ 2126 ((*(sd_iodone_chain[index]))(index, un, bp)) 2127 2128 #define SD_NEXT_IOSTART(index, un, bp) \ 2129 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2130 2131 #define SD_NEXT_IODONE(index, un, bp) \ 2132 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2133 2134 2135 /* 2136 * Function: _init 2137 * 2138 * Description: This is the driver _init(9E) entry point. 2139 * 2140 * Return Code: Returns the value from mod_install(9F) or 2141 * ddi_soft_state_init(9F) as appropriate. 2142 * 2143 * Context: Called when driver module loaded. 2144 */ 2145 2146 int 2147 _init(void) 2148 { 2149 int err; 2150 2151 /* establish driver name from module name */ 2152 sd_label = mod_modname(&modlinkage); 2153 2154 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2155 SD_MAXUNIT); 2156 2157 if (err != 0) { 2158 return (err); 2159 } 2160 2161 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2162 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2163 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2164 2165 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2166 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2167 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2168 2169 /* 2170 * it's ok to init here even for fibre device 2171 */ 2172 sd_scsi_probe_cache_init(); 2173 2174 /* 2175 * Creating taskq before mod_install ensures that all callers (threads) 2176 * that enter the module after a successfull mod_install encounter 2177 * a valid taskq. 2178 */ 2179 sd_taskq_create(); 2180 2181 err = mod_install(&modlinkage); 2182 if (err != 0) { 2183 /* delete taskq if install fails */ 2184 sd_taskq_delete(); 2185 2186 mutex_destroy(&sd_detach_mutex); 2187 mutex_destroy(&sd_log_mutex); 2188 mutex_destroy(&sd_label_mutex); 2189 2190 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2191 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2192 cv_destroy(&sd_tr.srq_inprocess_cv); 2193 2194 sd_scsi_probe_cache_fini(); 2195 2196 ddi_soft_state_fini(&sd_state); 2197 return (err); 2198 } 2199 2200 return (err); 2201 } 2202 2203 2204 /* 2205 * Function: _fini 2206 * 2207 * Description: This is the driver _fini(9E) entry point. 2208 * 2209 * Return Code: Returns the value from mod_remove(9F) 2210 * 2211 * Context: Called when driver module is unloaded. 2212 */ 2213 2214 int 2215 _fini(void) 2216 { 2217 int err; 2218 2219 if ((err = mod_remove(&modlinkage)) != 0) { 2220 return (err); 2221 } 2222 2223 sd_taskq_delete(); 2224 2225 mutex_destroy(&sd_detach_mutex); 2226 mutex_destroy(&sd_log_mutex); 2227 mutex_destroy(&sd_label_mutex); 2228 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2229 2230 sd_scsi_probe_cache_fini(); 2231 2232 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2233 cv_destroy(&sd_tr.srq_inprocess_cv); 2234 2235 ddi_soft_state_fini(&sd_state); 2236 2237 return (err); 2238 } 2239 2240 2241 /* 2242 * Function: _info 2243 * 2244 * Description: This is the driver _info(9E) entry point. 2245 * 2246 * Arguments: modinfop - pointer to the driver modinfo structure 2247 * 2248 * Return Code: Returns the value from mod_info(9F). 2249 * 2250 * Context: Kernel thread context 2251 */ 2252 2253 int 2254 _info(struct modinfo *modinfop) 2255 { 2256 return (mod_info(&modlinkage, modinfop)); 2257 } 2258 2259 2260 /* 2261 * The following routines implement the driver message logging facility. 2262 * They provide component- and level- based debug output filtering. 2263 * Output may also be restricted to messages for a single instance by 2264 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2265 * to NULL, then messages for all instances are printed. 2266 * 2267 * These routines have been cloned from each other due to the language 2268 * constraints of macros and variable argument list processing. 2269 */ 2270 2271 2272 /* 2273 * Function: sd_log_err 2274 * 2275 * Description: This routine is called by the SD_ERROR macro for debug 2276 * logging of error conditions. 2277 * 2278 * Arguments: comp - driver component being logged 2279 * dev - pointer to driver info structure 2280 * fmt - error string and format to be logged 2281 */ 2282 2283 static void 2284 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2285 { 2286 va_list ap; 2287 dev_info_t *dev; 2288 2289 ASSERT(un != NULL); 2290 dev = SD_DEVINFO(un); 2291 ASSERT(dev != NULL); 2292 2293 /* 2294 * Filter messages based on the global component and level masks. 2295 * Also print if un matches the value of sd_debug_un, or if 2296 * sd_debug_un is set to NULL. 2297 */ 2298 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2299 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2300 mutex_enter(&sd_log_mutex); 2301 va_start(ap, fmt); 2302 (void) vsprintf(sd_log_buf, fmt, ap); 2303 va_end(ap); 2304 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2305 mutex_exit(&sd_log_mutex); 2306 } 2307 #ifdef SD_FAULT_INJECTION 2308 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2309 if (un->sd_injection_mask & comp) { 2310 mutex_enter(&sd_log_mutex); 2311 va_start(ap, fmt); 2312 (void) vsprintf(sd_log_buf, fmt, ap); 2313 va_end(ap); 2314 sd_injection_log(sd_log_buf, un); 2315 mutex_exit(&sd_log_mutex); 2316 } 2317 #endif 2318 } 2319 2320 2321 /* 2322 * Function: sd_log_info 2323 * 2324 * Description: This routine is called by the SD_INFO macro for debug 2325 * logging of general purpose informational conditions. 2326 * 2327 * Arguments: comp - driver component being logged 2328 * dev - pointer to driver info structure 2329 * fmt - info string and format to be logged 2330 */ 2331 2332 static void 2333 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2334 { 2335 va_list ap; 2336 dev_info_t *dev; 2337 2338 ASSERT(un != NULL); 2339 dev = SD_DEVINFO(un); 2340 ASSERT(dev != NULL); 2341 2342 /* 2343 * Filter messages based on the global component and level masks. 2344 * Also print if un matches the value of sd_debug_un, or if 2345 * sd_debug_un is set to NULL. 2346 */ 2347 if ((sd_component_mask & component) && 2348 (sd_level_mask & SD_LOGMASK_INFO) && 2349 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2350 mutex_enter(&sd_log_mutex); 2351 va_start(ap, fmt); 2352 (void) vsprintf(sd_log_buf, fmt, ap); 2353 va_end(ap); 2354 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2355 mutex_exit(&sd_log_mutex); 2356 } 2357 #ifdef SD_FAULT_INJECTION 2358 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2359 if (un->sd_injection_mask & component) { 2360 mutex_enter(&sd_log_mutex); 2361 va_start(ap, fmt); 2362 (void) vsprintf(sd_log_buf, fmt, ap); 2363 va_end(ap); 2364 sd_injection_log(sd_log_buf, un); 2365 mutex_exit(&sd_log_mutex); 2366 } 2367 #endif 2368 } 2369 2370 2371 /* 2372 * Function: sd_log_trace 2373 * 2374 * Description: This routine is called by the SD_TRACE macro for debug 2375 * logging of trace conditions (i.e. function entry/exit). 2376 * 2377 * Arguments: comp - driver component being logged 2378 * dev - pointer to driver info structure 2379 * fmt - trace string and format to be logged 2380 */ 2381 2382 static void 2383 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2384 { 2385 va_list ap; 2386 dev_info_t *dev; 2387 2388 ASSERT(un != NULL); 2389 dev = SD_DEVINFO(un); 2390 ASSERT(dev != NULL); 2391 2392 /* 2393 * Filter messages based on the global component and level masks. 2394 * Also print if un matches the value of sd_debug_un, or if 2395 * sd_debug_un is set to NULL. 2396 */ 2397 if ((sd_component_mask & component) && 2398 (sd_level_mask & SD_LOGMASK_TRACE) && 2399 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2400 mutex_enter(&sd_log_mutex); 2401 va_start(ap, fmt); 2402 (void) vsprintf(sd_log_buf, fmt, ap); 2403 va_end(ap); 2404 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2405 mutex_exit(&sd_log_mutex); 2406 } 2407 #ifdef SD_FAULT_INJECTION 2408 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2409 if (un->sd_injection_mask & component) { 2410 mutex_enter(&sd_log_mutex); 2411 va_start(ap, fmt); 2412 (void) vsprintf(sd_log_buf, fmt, ap); 2413 va_end(ap); 2414 sd_injection_log(sd_log_buf, un); 2415 mutex_exit(&sd_log_mutex); 2416 } 2417 #endif 2418 } 2419 2420 2421 /* 2422 * Function: sdprobe 2423 * 2424 * Description: This is the driver probe(9e) entry point function. 2425 * 2426 * Arguments: devi - opaque device info handle 2427 * 2428 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2429 * DDI_PROBE_FAILURE: If the probe failed. 2430 * DDI_PROBE_PARTIAL: If the instance is not present now, 2431 * but may be present in the future. 2432 */ 2433 2434 static int 2435 sdprobe(dev_info_t *devi) 2436 { 2437 struct scsi_device *devp; 2438 int rval; 2439 int instance; 2440 2441 /* 2442 * if it wasn't for pln, sdprobe could actually be nulldev 2443 * in the "__fibre" case. 2444 */ 2445 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2446 return (DDI_PROBE_DONTCARE); 2447 } 2448 2449 devp = ddi_get_driver_private(devi); 2450 2451 if (devp == NULL) { 2452 /* Ooops... nexus driver is mis-configured... */ 2453 return (DDI_PROBE_FAILURE); 2454 } 2455 2456 instance = ddi_get_instance(devi); 2457 2458 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2459 return (DDI_PROBE_PARTIAL); 2460 } 2461 2462 /* 2463 * Call the SCSA utility probe routine to see if we actually 2464 * have a target at this SCSI nexus. 2465 */ 2466 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2467 case SCSIPROBE_EXISTS: 2468 switch (devp->sd_inq->inq_dtype) { 2469 case DTYPE_DIRECT: 2470 rval = DDI_PROBE_SUCCESS; 2471 break; 2472 case DTYPE_RODIRECT: 2473 /* CDs etc. Can be removable media */ 2474 rval = DDI_PROBE_SUCCESS; 2475 break; 2476 case DTYPE_OPTICAL: 2477 /* 2478 * Rewritable optical driver HP115AA 2479 * Can also be removable media 2480 */ 2481 2482 /* 2483 * Do not attempt to bind to DTYPE_OPTICAL if 2484 * pre solaris 9 sparc sd behavior is required 2485 * 2486 * If first time through and sd_dtype_optical_bind 2487 * has not been set in /etc/system check properties 2488 */ 2489 2490 if (sd_dtype_optical_bind < 0) { 2491 sd_dtype_optical_bind = ddi_prop_get_int 2492 (DDI_DEV_T_ANY, devi, 0, 2493 "optical-device-bind", 1); 2494 } 2495 2496 if (sd_dtype_optical_bind == 0) { 2497 rval = DDI_PROBE_FAILURE; 2498 } else { 2499 rval = DDI_PROBE_SUCCESS; 2500 } 2501 break; 2502 2503 case DTYPE_NOTPRESENT: 2504 default: 2505 rval = DDI_PROBE_FAILURE; 2506 break; 2507 } 2508 break; 2509 default: 2510 rval = DDI_PROBE_PARTIAL; 2511 break; 2512 } 2513 2514 /* 2515 * This routine checks for resource allocation prior to freeing, 2516 * so it will take care of the "smart probing" case where a 2517 * scsi_probe() may or may not have been issued and will *not* 2518 * free previously-freed resources. 2519 */ 2520 scsi_unprobe(devp); 2521 return (rval); 2522 } 2523 2524 2525 /* 2526 * Function: sdinfo 2527 * 2528 * Description: This is the driver getinfo(9e) entry point function. 2529 * Given the device number, return the devinfo pointer from 2530 * the scsi_device structure or the instance number 2531 * associated with the dev_t. 2532 * 2533 * Arguments: dip - pointer to device info structure 2534 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2535 * DDI_INFO_DEVT2INSTANCE) 2536 * arg - driver dev_t 2537 * resultp - user buffer for request response 2538 * 2539 * Return Code: DDI_SUCCESS 2540 * DDI_FAILURE 2541 */ 2542 /* ARGSUSED */ 2543 static int 2544 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2545 { 2546 struct sd_lun *un; 2547 dev_t dev; 2548 int instance; 2549 int error; 2550 2551 switch (infocmd) { 2552 case DDI_INFO_DEVT2DEVINFO: 2553 dev = (dev_t)arg; 2554 instance = SDUNIT(dev); 2555 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2556 return (DDI_FAILURE); 2557 } 2558 *result = (void *) SD_DEVINFO(un); 2559 error = DDI_SUCCESS; 2560 break; 2561 case DDI_INFO_DEVT2INSTANCE: 2562 dev = (dev_t)arg; 2563 instance = SDUNIT(dev); 2564 *result = (void *)(uintptr_t)instance; 2565 error = DDI_SUCCESS; 2566 break; 2567 default: 2568 error = DDI_FAILURE; 2569 } 2570 return (error); 2571 } 2572 2573 /* 2574 * Function: sd_prop_op 2575 * 2576 * Description: This is the driver prop_op(9e) entry point function. 2577 * Return the number of blocks for the partition in question 2578 * or forward the request to the property facilities. 2579 * 2580 * Arguments: dev - device number 2581 * dip - pointer to device info structure 2582 * prop_op - property operator 2583 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2584 * name - pointer to property name 2585 * valuep - pointer or address of the user buffer 2586 * lengthp - property length 2587 * 2588 * Return Code: DDI_PROP_SUCCESS 2589 * DDI_PROP_NOT_FOUND 2590 * DDI_PROP_UNDEFINED 2591 * DDI_PROP_NO_MEMORY 2592 * DDI_PROP_BUF_TOO_SMALL 2593 */ 2594 2595 static int 2596 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2597 char *name, caddr_t valuep, int *lengthp) 2598 { 2599 int instance = ddi_get_instance(dip); 2600 struct sd_lun *un; 2601 uint64_t nblocks64; 2602 2603 /* 2604 * Our dynamic properties are all device specific and size oriented. 2605 * Requests issued under conditions where size is valid are passed 2606 * to ddi_prop_op_nblocks with the size information, otherwise the 2607 * request is passed to ddi_prop_op. Size depends on valid geometry. 2608 */ 2609 un = ddi_get_soft_state(sd_state, instance); 2610 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2611 (un->un_f_geometry_is_valid == FALSE)) { 2612 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2613 name, valuep, lengthp)); 2614 } else { 2615 /* get nblocks value */ 2616 ASSERT(!mutex_owned(SD_MUTEX(un))); 2617 mutex_enter(SD_MUTEX(un)); 2618 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2619 mutex_exit(SD_MUTEX(un)); 2620 2621 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2622 name, valuep, lengthp, nblocks64)); 2623 } 2624 } 2625 2626 /* 2627 * The following functions are for smart probing: 2628 * sd_scsi_probe_cache_init() 2629 * sd_scsi_probe_cache_fini() 2630 * sd_scsi_clear_probe_cache() 2631 * sd_scsi_probe_with_cache() 2632 */ 2633 2634 /* 2635 * Function: sd_scsi_probe_cache_init 2636 * 2637 * Description: Initializes the probe response cache mutex and head pointer. 2638 * 2639 * Context: Kernel thread context 2640 */ 2641 2642 static void 2643 sd_scsi_probe_cache_init(void) 2644 { 2645 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2646 sd_scsi_probe_cache_head = NULL; 2647 } 2648 2649 2650 /* 2651 * Function: sd_scsi_probe_cache_fini 2652 * 2653 * Description: Frees all resources associated with the probe response cache. 2654 * 2655 * Context: Kernel thread context 2656 */ 2657 2658 static void 2659 sd_scsi_probe_cache_fini(void) 2660 { 2661 struct sd_scsi_probe_cache *cp; 2662 struct sd_scsi_probe_cache *ncp; 2663 2664 /* Clean up our smart probing linked list */ 2665 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2666 ncp = cp->next; 2667 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2668 } 2669 sd_scsi_probe_cache_head = NULL; 2670 mutex_destroy(&sd_scsi_probe_cache_mutex); 2671 } 2672 2673 2674 /* 2675 * Function: sd_scsi_clear_probe_cache 2676 * 2677 * Description: This routine clears the probe response cache. This is 2678 * done when open() returns ENXIO so that when deferred 2679 * attach is attempted (possibly after a device has been 2680 * turned on) we will retry the probe. Since we don't know 2681 * which target we failed to open, we just clear the 2682 * entire cache. 2683 * 2684 * Context: Kernel thread context 2685 */ 2686 2687 static void 2688 sd_scsi_clear_probe_cache(void) 2689 { 2690 struct sd_scsi_probe_cache *cp; 2691 int i; 2692 2693 mutex_enter(&sd_scsi_probe_cache_mutex); 2694 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2695 /* 2696 * Reset all entries to SCSIPROBE_EXISTS. This will 2697 * force probing to be performed the next time 2698 * sd_scsi_probe_with_cache is called. 2699 */ 2700 for (i = 0; i < NTARGETS_WIDE; i++) { 2701 cp->cache[i] = SCSIPROBE_EXISTS; 2702 } 2703 } 2704 mutex_exit(&sd_scsi_probe_cache_mutex); 2705 } 2706 2707 2708 /* 2709 * Function: sd_scsi_probe_with_cache 2710 * 2711 * Description: This routine implements support for a scsi device probe 2712 * with cache. The driver maintains a cache of the target 2713 * responses to scsi probes. If we get no response from a 2714 * target during a probe inquiry, we remember that, and we 2715 * avoid additional calls to scsi_probe on non-zero LUNs 2716 * on the same target until the cache is cleared. By doing 2717 * so we avoid the 1/4 sec selection timeout for nonzero 2718 * LUNs. lun0 of a target is always probed. 2719 * 2720 * Arguments: devp - Pointer to a scsi_device(9S) structure 2721 * waitfunc - indicates what the allocator routines should 2722 * do when resources are not available. This value 2723 * is passed on to scsi_probe() when that routine 2724 * is called. 2725 * 2726 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2727 * otherwise the value returned by scsi_probe(9F). 2728 * 2729 * Context: Kernel thread context 2730 */ 2731 2732 static int 2733 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2734 { 2735 struct sd_scsi_probe_cache *cp; 2736 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2737 int lun, tgt; 2738 2739 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2740 SCSI_ADDR_PROP_LUN, 0); 2741 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_TARGET, -1); 2743 2744 /* Make sure caching enabled and target in range */ 2745 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2746 /* do it the old way (no cache) */ 2747 return (scsi_probe(devp, waitfn)); 2748 } 2749 2750 mutex_enter(&sd_scsi_probe_cache_mutex); 2751 2752 /* Find the cache for this scsi bus instance */ 2753 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2754 if (cp->pdip == pdip) { 2755 break; 2756 } 2757 } 2758 2759 /* If we can't find a cache for this pdip, create one */ 2760 if (cp == NULL) { 2761 int i; 2762 2763 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2764 KM_SLEEP); 2765 cp->pdip = pdip; 2766 cp->next = sd_scsi_probe_cache_head; 2767 sd_scsi_probe_cache_head = cp; 2768 for (i = 0; i < NTARGETS_WIDE; i++) { 2769 cp->cache[i] = SCSIPROBE_EXISTS; 2770 } 2771 } 2772 2773 mutex_exit(&sd_scsi_probe_cache_mutex); 2774 2775 /* Recompute the cache for this target if LUN zero */ 2776 if (lun == 0) { 2777 cp->cache[tgt] = SCSIPROBE_EXISTS; 2778 } 2779 2780 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2781 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2782 return (SCSIPROBE_NORESP); 2783 } 2784 2785 /* Do the actual probe; save & return the result */ 2786 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2787 } 2788 2789 2790 /* 2791 * Function: sd_spin_up_unit 2792 * 2793 * Description: Issues the following commands to spin-up the device: 2794 * START STOP UNIT, and INQUIRY. 2795 * 2796 * Arguments: un - driver soft state (unit) structure 2797 * 2798 * Return Code: 0 - success 2799 * EIO - failure 2800 * EACCES - reservation conflict 2801 * 2802 * Context: Kernel thread context 2803 */ 2804 2805 static int 2806 sd_spin_up_unit(struct sd_lun *un) 2807 { 2808 size_t resid = 0; 2809 int has_conflict = FALSE; 2810 uchar_t *bufaddr; 2811 2812 ASSERT(un != NULL); 2813 2814 /* 2815 * Send a throwaway START UNIT command. 2816 * 2817 * If we fail on this, we don't care presently what precisely 2818 * is wrong. EMC's arrays will also fail this with a check 2819 * condition (0x2/0x4/0x3) if the device is "inactive," but 2820 * we don't want to fail the attach because it may become 2821 * "active" later. 2822 */ 2823 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2824 == EACCES) 2825 has_conflict = TRUE; 2826 2827 /* 2828 * Send another INQUIRY command to the target. This is necessary for 2829 * non-removable media direct access devices because their INQUIRY data 2830 * may not be fully qualified until they are spun up (perhaps via the 2831 * START command above). Note: This seems to be needed for some 2832 * legacy devices only.) The INQUIRY command should succeed even if a 2833 * Reservation Conflict is present. 2834 */ 2835 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2836 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2837 kmem_free(bufaddr, SUN_INQSIZE); 2838 return (EIO); 2839 } 2840 2841 /* 2842 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2843 * Note that this routine does not return a failure here even if the 2844 * INQUIRY command did not return any data. This is a legacy behavior. 2845 */ 2846 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2847 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2848 } 2849 2850 kmem_free(bufaddr, SUN_INQSIZE); 2851 2852 /* If we hit a reservation conflict above, tell the caller. */ 2853 if (has_conflict == TRUE) { 2854 return (EACCES); 2855 } 2856 2857 return (0); 2858 } 2859 2860 #ifdef _LP64 2861 /* 2862 * Function: sd_enable_descr_sense 2863 * 2864 * Description: This routine attempts to select descriptor sense format 2865 * using the Control mode page. Devices that support 64 bit 2866 * LBAs (for >2TB luns) should also implement descriptor 2867 * sense data so we will call this function whenever we see 2868 * a lun larger than 2TB. If for some reason the device 2869 * supports 64 bit LBAs but doesn't support descriptor sense 2870 * presumably the mode select will fail. Everything will 2871 * continue to work normally except that we will not get 2872 * complete sense data for commands that fail with an LBA 2873 * larger than 32 bits. 2874 * 2875 * Arguments: un - driver soft state (unit) structure 2876 * 2877 * Context: Kernel thread context only 2878 */ 2879 2880 static void 2881 sd_enable_descr_sense(struct sd_lun *un) 2882 { 2883 uchar_t *header; 2884 struct mode_control_scsi3 *ctrl_bufp; 2885 size_t buflen; 2886 size_t bd_len; 2887 2888 /* 2889 * Read MODE SENSE page 0xA, Control Mode Page 2890 */ 2891 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2892 sizeof (struct mode_control_scsi3); 2893 header = kmem_zalloc(buflen, KM_SLEEP); 2894 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2895 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2896 SD_ERROR(SD_LOG_COMMON, un, 2897 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2898 goto eds_exit; 2899 } 2900 2901 /* 2902 * Determine size of Block Descriptors in order to locate 2903 * the mode page data. ATAPI devices return 0, SCSI devices 2904 * should return MODE_BLK_DESC_LENGTH. 2905 */ 2906 bd_len = ((struct mode_header *)header)->bdesc_length; 2907 2908 ctrl_bufp = (struct mode_control_scsi3 *) 2909 (header + MODE_HEADER_LENGTH + bd_len); 2910 2911 /* 2912 * Clear PS bit for MODE SELECT 2913 */ 2914 ctrl_bufp->mode_page.ps = 0; 2915 2916 /* 2917 * Set D_SENSE to enable descriptor sense format. 2918 */ 2919 ctrl_bufp->d_sense = 1; 2920 2921 /* 2922 * Use MODE SELECT to commit the change to the D_SENSE bit 2923 */ 2924 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2925 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2926 SD_INFO(SD_LOG_COMMON, un, 2927 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2928 goto eds_exit; 2929 } 2930 2931 eds_exit: 2932 kmem_free(header, buflen); 2933 } 2934 #endif /* _LP64 */ 2935 2936 2937 /* 2938 * Function: sd_set_mmc_caps 2939 * 2940 * Description: This routine determines if the device is MMC compliant and if 2941 * the device supports CDDA via a mode sense of the CDVD 2942 * capabilities mode page. Also checks if the device is a 2943 * dvdram writable device. 2944 * 2945 * Arguments: un - driver soft state (unit) structure 2946 * 2947 * Context: Kernel thread context only 2948 */ 2949 2950 static void 2951 sd_set_mmc_caps(struct sd_lun *un) 2952 { 2953 struct mode_header_grp2 *sense_mhp; 2954 uchar_t *sense_page; 2955 caddr_t buf; 2956 int bd_len; 2957 int status; 2958 struct uscsi_cmd com; 2959 int rtn; 2960 uchar_t *out_data_rw, *out_data_hd; 2961 uchar_t *rqbuf_rw, *rqbuf_hd; 2962 2963 ASSERT(un != NULL); 2964 2965 /* 2966 * The flags which will be set in this function are - mmc compliant, 2967 * dvdram writable device, cdda support. Initialize them to FALSE 2968 * and if a capability is detected - it will be set to TRUE. 2969 */ 2970 un->un_f_mmc_cap = FALSE; 2971 un->un_f_dvdram_writable_device = FALSE; 2972 un->un_f_cfg_cdda = FALSE; 2973 2974 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2975 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2976 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2977 2978 if (status != 0) { 2979 /* command failed; just return */ 2980 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2981 return; 2982 } 2983 /* 2984 * If the mode sense request for the CDROM CAPABILITIES 2985 * page (0x2A) succeeds the device is assumed to be MMC. 2986 */ 2987 un->un_f_mmc_cap = TRUE; 2988 2989 /* Get to the page data */ 2990 sense_mhp = (struct mode_header_grp2 *)buf; 2991 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2992 sense_mhp->bdesc_length_lo; 2993 if (bd_len > MODE_BLK_DESC_LENGTH) { 2994 /* 2995 * We did not get back the expected block descriptor 2996 * length so we cannot determine if the device supports 2997 * CDDA. However, we still indicate the device is MMC 2998 * according to the successful response to the page 2999 * 0x2A mode sense request. 3000 */ 3001 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3002 "sd_set_mmc_caps: Mode Sense returned " 3003 "invalid block descriptor length\n"); 3004 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3005 return; 3006 } 3007 3008 /* See if read CDDA is supported */ 3009 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3010 bd_len); 3011 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3012 3013 /* See if writing DVD RAM is supported. */ 3014 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3015 if (un->un_f_dvdram_writable_device == TRUE) { 3016 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3017 return; 3018 } 3019 3020 /* 3021 * If the device presents DVD or CD capabilities in the mode 3022 * page, we can return here since a RRD will not have 3023 * these capabilities. 3024 */ 3025 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3026 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3027 return; 3028 } 3029 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3030 3031 /* 3032 * If un->un_f_dvdram_writable_device is still FALSE, 3033 * check for a Removable Rigid Disk (RRD). A RRD 3034 * device is identified by the features RANDOM_WRITABLE and 3035 * HARDWARE_DEFECT_MANAGEMENT. 3036 */ 3037 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3038 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3039 3040 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3041 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3042 RANDOM_WRITABLE); 3043 if (rtn != 0) { 3044 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3045 kmem_free(rqbuf_rw, SENSE_LENGTH); 3046 return; 3047 } 3048 3049 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3050 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3051 3052 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3053 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3054 HARDWARE_DEFECT_MANAGEMENT); 3055 if (rtn == 0) { 3056 /* 3057 * We have good information, check for random writable 3058 * and hardware defect features. 3059 */ 3060 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3061 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3062 un->un_f_dvdram_writable_device = TRUE; 3063 } 3064 } 3065 3066 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3067 kmem_free(rqbuf_rw, SENSE_LENGTH); 3068 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3069 kmem_free(rqbuf_hd, SENSE_LENGTH); 3070 } 3071 3072 /* 3073 * Function: sd_check_for_writable_cd 3074 * 3075 * Description: This routine determines if the media in the device is 3076 * writable or not. It uses the get configuration command (0x46) 3077 * to determine if the media is writable 3078 * 3079 * Arguments: un - driver soft state (unit) structure 3080 * 3081 * Context: Never called at interrupt context. 3082 */ 3083 3084 static void 3085 sd_check_for_writable_cd(struct sd_lun *un) 3086 { 3087 struct uscsi_cmd com; 3088 uchar_t *out_data; 3089 uchar_t *rqbuf; 3090 int rtn; 3091 uchar_t *out_data_rw, *out_data_hd; 3092 uchar_t *rqbuf_rw, *rqbuf_hd; 3093 struct mode_header_grp2 *sense_mhp; 3094 uchar_t *sense_page; 3095 caddr_t buf; 3096 int bd_len; 3097 int status; 3098 3099 ASSERT(un != NULL); 3100 ASSERT(mutex_owned(SD_MUTEX(un))); 3101 3102 /* 3103 * Initialize the writable media to false, if configuration info. 3104 * tells us otherwise then only we will set it. 3105 */ 3106 un->un_f_mmc_writable_media = FALSE; 3107 mutex_exit(SD_MUTEX(un)); 3108 3109 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3110 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3111 3112 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3113 out_data, SD_PROFILE_HEADER_LEN); 3114 3115 mutex_enter(SD_MUTEX(un)); 3116 if (rtn == 0) { 3117 /* 3118 * We have good information, check for writable DVD. 3119 */ 3120 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3121 un->un_f_mmc_writable_media = TRUE; 3122 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3123 kmem_free(rqbuf, SENSE_LENGTH); 3124 return; 3125 } 3126 } 3127 3128 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3129 kmem_free(rqbuf, SENSE_LENGTH); 3130 3131 /* 3132 * Determine if this is a RRD type device. 3133 */ 3134 mutex_exit(SD_MUTEX(un)); 3135 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3136 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3137 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3138 mutex_enter(SD_MUTEX(un)); 3139 if (status != 0) { 3140 /* command failed; just return */ 3141 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3142 return; 3143 } 3144 3145 /* Get to the page data */ 3146 sense_mhp = (struct mode_header_grp2 *)buf; 3147 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3148 if (bd_len > MODE_BLK_DESC_LENGTH) { 3149 /* 3150 * We did not get back the expected block descriptor length so 3151 * we cannot check the mode page. 3152 */ 3153 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3154 "sd_check_for_writable_cd: Mode Sense returned " 3155 "invalid block descriptor length\n"); 3156 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3157 return; 3158 } 3159 3160 /* 3161 * If the device presents DVD or CD capabilities in the mode 3162 * page, we can return here since a RRD device will not have 3163 * these capabilities. 3164 */ 3165 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3166 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3167 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3168 return; 3169 } 3170 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3171 3172 /* 3173 * If un->un_f_mmc_writable_media is still FALSE, 3174 * check for RRD type media. A RRD device is identified 3175 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3176 */ 3177 mutex_exit(SD_MUTEX(un)); 3178 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3179 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3180 3181 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3182 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3183 RANDOM_WRITABLE); 3184 if (rtn != 0) { 3185 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3186 kmem_free(rqbuf_rw, SENSE_LENGTH); 3187 mutex_enter(SD_MUTEX(un)); 3188 return; 3189 } 3190 3191 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3192 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3193 3194 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3195 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3196 HARDWARE_DEFECT_MANAGEMENT); 3197 mutex_enter(SD_MUTEX(un)); 3198 if (rtn == 0) { 3199 /* 3200 * We have good information, check for random writable 3201 * and hardware defect features as current. 3202 */ 3203 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3204 (out_data_rw[10] & 0x1) && 3205 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3206 (out_data_hd[10] & 0x1)) { 3207 un->un_f_mmc_writable_media = TRUE; 3208 } 3209 } 3210 3211 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3212 kmem_free(rqbuf_rw, SENSE_LENGTH); 3213 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3214 kmem_free(rqbuf_hd, SENSE_LENGTH); 3215 } 3216 3217 /* 3218 * Function: sd_read_unit_properties 3219 * 3220 * Description: The following implements a property lookup mechanism. 3221 * Properties for particular disks (keyed on vendor, model 3222 * and rev numbers) are sought in the sd.conf file via 3223 * sd_process_sdconf_file(), and if not found there, are 3224 * looked for in a list hardcoded in this driver via 3225 * sd_process_sdconf_table() Once located the properties 3226 * are used to update the driver unit structure. 3227 * 3228 * Arguments: un - driver soft state (unit) structure 3229 */ 3230 3231 static void 3232 sd_read_unit_properties(struct sd_lun *un) 3233 { 3234 /* 3235 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3236 * the "sd-config-list" property (from the sd.conf file) or if 3237 * there was not a match for the inquiry vid/pid. If this event 3238 * occurs the static driver configuration table is searched for 3239 * a match. 3240 */ 3241 ASSERT(un != NULL); 3242 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3243 sd_process_sdconf_table(un); 3244 } 3245 3246 /* check for LSI device */ 3247 sd_is_lsi(un); 3248 3249 /* 3250 * Set this in sd.conf to 0 in order to disable kstats. The default 3251 * is 1, so they are enabled by default. 3252 */ 3253 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3254 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3255 } 3256 3257 3258 /* 3259 * Function: sd_process_sdconf_file 3260 * 3261 * Description: Use ddi_getlongprop to obtain the properties from the 3262 * driver's config file (ie, sd.conf) and update the driver 3263 * soft state structure accordingly. 3264 * 3265 * Arguments: un - driver soft state (unit) structure 3266 * 3267 * Return Code: SD_SUCCESS - The properties were successfully set according 3268 * to the driver configuration file. 3269 * SD_FAILURE - The driver config list was not obtained or 3270 * there was no vid/pid match. This indicates that 3271 * the static config table should be used. 3272 * 3273 * The config file has a property, "sd-config-list", which consists of 3274 * one or more duplets as follows: 3275 * 3276 * sd-config-list= 3277 * <duplet>, 3278 * [<duplet>,] 3279 * [<duplet>]; 3280 * 3281 * The structure of each duplet is as follows: 3282 * 3283 * <duplet>:= <vid+pid>,<data-property-name_list> 3284 * 3285 * The first entry of the duplet is the device ID string (the concatenated 3286 * vid & pid; not to be confused with a device_id). This is defined in 3287 * the same way as in the sd_disk_table. 3288 * 3289 * The second part of the duplet is a string that identifies a 3290 * data-property-name-list. The data-property-name-list is defined as 3291 * follows: 3292 * 3293 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3294 * 3295 * The syntax of <data-property-name> depends on the <version> field. 3296 * 3297 * If version = SD_CONF_VERSION_1 we have the following syntax: 3298 * 3299 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3300 * 3301 * where the prop0 value will be used to set prop0 if bit0 set in the 3302 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3303 * 3304 */ 3305 3306 static int 3307 sd_process_sdconf_file(struct sd_lun *un) 3308 { 3309 char *config_list = NULL; 3310 int config_list_len; 3311 int len; 3312 int dupletlen = 0; 3313 char *vidptr; 3314 int vidlen; 3315 char *dnlist_ptr; 3316 char *dataname_ptr; 3317 int dnlist_len; 3318 int dataname_len; 3319 int *data_list; 3320 int data_list_len; 3321 int rval = SD_FAILURE; 3322 int i; 3323 3324 ASSERT(un != NULL); 3325 3326 /* Obtain the configuration list associated with the .conf file */ 3327 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3328 sd_config_list, (caddr_t)&config_list, &config_list_len) 3329 != DDI_PROP_SUCCESS) { 3330 return (SD_FAILURE); 3331 } 3332 3333 /* 3334 * Compare vids in each duplet to the inquiry vid - if a match is 3335 * made, get the data value and update the soft state structure 3336 * accordingly. 3337 * 3338 * Note: This algorithm is complex and difficult to maintain. It should 3339 * be replaced with a more robust implementation. 3340 */ 3341 for (len = config_list_len, vidptr = config_list; len > 0; 3342 vidptr += dupletlen, len -= dupletlen) { 3343 /* 3344 * Note: The assumption here is that each vid entry is on 3345 * a unique line from its associated duplet. 3346 */ 3347 vidlen = dupletlen = (int)strlen(vidptr); 3348 if ((vidlen == 0) || 3349 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3350 dupletlen++; 3351 continue; 3352 } 3353 3354 /* 3355 * dnlist contains 1 or more blank separated 3356 * data-property-name entries 3357 */ 3358 dnlist_ptr = vidptr + vidlen + 1; 3359 dnlist_len = (int)strlen(dnlist_ptr); 3360 dupletlen += dnlist_len + 2; 3361 3362 /* 3363 * Set a pointer for the first data-property-name 3364 * entry in the list 3365 */ 3366 dataname_ptr = dnlist_ptr; 3367 dataname_len = 0; 3368 3369 /* 3370 * Loop through all data-property-name entries in the 3371 * data-property-name-list setting the properties for each. 3372 */ 3373 while (dataname_len < dnlist_len) { 3374 int version; 3375 3376 /* 3377 * Determine the length of the current 3378 * data-property-name entry by indexing until a 3379 * blank or NULL is encountered. When the space is 3380 * encountered reset it to a NULL for compliance 3381 * with ddi_getlongprop(). 3382 */ 3383 for (i = 0; ((dataname_ptr[i] != ' ') && 3384 (dataname_ptr[i] != '\0')); i++) { 3385 ; 3386 } 3387 3388 dataname_len += i; 3389 /* If not null terminated, Make it so */ 3390 if (dataname_ptr[i] == ' ') { 3391 dataname_ptr[i] = '\0'; 3392 } 3393 dataname_len++; 3394 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3395 "sd_process_sdconf_file: disk:%s, data:%s\n", 3396 vidptr, dataname_ptr); 3397 3398 /* Get the data list */ 3399 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3400 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3401 != DDI_PROP_SUCCESS) { 3402 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3403 "sd_process_sdconf_file: data property (%s)" 3404 " has no value\n", dataname_ptr); 3405 dataname_ptr = dnlist_ptr + dataname_len; 3406 continue; 3407 } 3408 3409 version = data_list[0]; 3410 3411 if (version == SD_CONF_VERSION_1) { 3412 sd_tunables values; 3413 3414 /* Set the properties */ 3415 if (sd_chk_vers1_data(un, data_list[1], 3416 &data_list[2], data_list_len, dataname_ptr) 3417 == SD_SUCCESS) { 3418 sd_get_tunables_from_conf(un, 3419 data_list[1], &data_list[2], 3420 &values); 3421 sd_set_vers1_properties(un, 3422 data_list[1], &values); 3423 rval = SD_SUCCESS; 3424 } else { 3425 rval = SD_FAILURE; 3426 } 3427 } else { 3428 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3429 "data property %s version 0x%x is invalid.", 3430 dataname_ptr, version); 3431 rval = SD_FAILURE; 3432 } 3433 kmem_free(data_list, data_list_len); 3434 dataname_ptr = dnlist_ptr + dataname_len; 3435 } 3436 } 3437 3438 /* free up the memory allocated by ddi_getlongprop */ 3439 if (config_list) { 3440 kmem_free(config_list, config_list_len); 3441 } 3442 3443 return (rval); 3444 } 3445 3446 /* 3447 * Function: sd_get_tunables_from_conf() 3448 * 3449 * 3450 * This function reads the data list from the sd.conf file and pulls 3451 * the values that can have numeric values as arguments and places 3452 * the values in the apropriate sd_tunables member. 3453 * Since the order of the data list members varies across platforms 3454 * This function reads them from the data list in a platform specific 3455 * order and places them into the correct sd_tunable member that is 3456 * a consistant across all platforms. 3457 */ 3458 static void 3459 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3460 sd_tunables *values) 3461 { 3462 int i; 3463 int mask; 3464 3465 bzero(values, sizeof (sd_tunables)); 3466 3467 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3468 3469 mask = 1 << i; 3470 if (mask > flags) { 3471 break; 3472 } 3473 3474 switch (mask & flags) { 3475 case 0: /* This mask bit not set in flags */ 3476 continue; 3477 case SD_CONF_BSET_THROTTLE: 3478 values->sdt_throttle = data_list[i]; 3479 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3480 "sd_get_tunables_from_conf: throttle = %d\n", 3481 values->sdt_throttle); 3482 break; 3483 case SD_CONF_BSET_CTYPE: 3484 values->sdt_ctype = data_list[i]; 3485 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3486 "sd_get_tunables_from_conf: ctype = %d\n", 3487 values->sdt_ctype); 3488 break; 3489 case SD_CONF_BSET_NRR_COUNT: 3490 values->sdt_not_rdy_retries = data_list[i]; 3491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3492 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3493 values->sdt_not_rdy_retries); 3494 break; 3495 case SD_CONF_BSET_BSY_RETRY_COUNT: 3496 values->sdt_busy_retries = data_list[i]; 3497 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3498 "sd_get_tunables_from_conf: busy_retries = %d\n", 3499 values->sdt_busy_retries); 3500 break; 3501 case SD_CONF_BSET_RST_RETRIES: 3502 values->sdt_reset_retries = data_list[i]; 3503 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3504 "sd_get_tunables_from_conf: reset_retries = %d\n", 3505 values->sdt_reset_retries); 3506 break; 3507 case SD_CONF_BSET_RSV_REL_TIME: 3508 values->sdt_reserv_rel_time = data_list[i]; 3509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3510 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3511 values->sdt_reserv_rel_time); 3512 break; 3513 case SD_CONF_BSET_MIN_THROTTLE: 3514 values->sdt_min_throttle = data_list[i]; 3515 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3516 "sd_get_tunables_from_conf: min_throttle = %d\n", 3517 values->sdt_min_throttle); 3518 break; 3519 case SD_CONF_BSET_DISKSORT_DISABLED: 3520 values->sdt_disk_sort_dis = data_list[i]; 3521 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3522 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3523 values->sdt_disk_sort_dis); 3524 break; 3525 case SD_CONF_BSET_LUN_RESET_ENABLED: 3526 values->sdt_lun_reset_enable = data_list[i]; 3527 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3528 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3529 "\n", values->sdt_lun_reset_enable); 3530 break; 3531 } 3532 } 3533 } 3534 3535 /* 3536 * Function: sd_process_sdconf_table 3537 * 3538 * Description: Search the static configuration table for a match on the 3539 * inquiry vid/pid and update the driver soft state structure 3540 * according to the table property values for the device. 3541 * 3542 * The form of a configuration table entry is: 3543 * <vid+pid>,<flags>,<property-data> 3544 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3545 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3546 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3547 * 3548 * Arguments: un - driver soft state (unit) structure 3549 */ 3550 3551 static void 3552 sd_process_sdconf_table(struct sd_lun *un) 3553 { 3554 char *id = NULL; 3555 int table_index; 3556 int idlen; 3557 3558 ASSERT(un != NULL); 3559 for (table_index = 0; table_index < sd_disk_table_size; 3560 table_index++) { 3561 id = sd_disk_table[table_index].device_id; 3562 idlen = strlen(id); 3563 if (idlen == 0) { 3564 continue; 3565 } 3566 3567 /* 3568 * The static configuration table currently does not 3569 * implement version 10 properties. Additionally, 3570 * multiple data-property-name entries are not 3571 * implemented in the static configuration table. 3572 */ 3573 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3574 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3575 "sd_process_sdconf_table: disk %s\n", id); 3576 sd_set_vers1_properties(un, 3577 sd_disk_table[table_index].flags, 3578 sd_disk_table[table_index].properties); 3579 break; 3580 } 3581 } 3582 } 3583 3584 3585 /* 3586 * Function: sd_sdconf_id_match 3587 * 3588 * Description: This local function implements a case sensitive vid/pid 3589 * comparison as well as the boundary cases of wild card and 3590 * multiple blanks. 3591 * 3592 * Note: An implicit assumption made here is that the scsi 3593 * inquiry structure will always keep the vid, pid and 3594 * revision strings in consecutive sequence, so they can be 3595 * read as a single string. If this assumption is not the 3596 * case, a separate string, to be used for the check, needs 3597 * to be built with these strings concatenated. 3598 * 3599 * Arguments: un - driver soft state (unit) structure 3600 * id - table or config file vid/pid 3601 * idlen - length of the vid/pid (bytes) 3602 * 3603 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3604 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3605 */ 3606 3607 static int 3608 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3609 { 3610 struct scsi_inquiry *sd_inq; 3611 int rval = SD_SUCCESS; 3612 3613 ASSERT(un != NULL); 3614 sd_inq = un->un_sd->sd_inq; 3615 ASSERT(id != NULL); 3616 3617 /* 3618 * We use the inq_vid as a pointer to a buffer containing the 3619 * vid and pid and use the entire vid/pid length of the table 3620 * entry for the comparison. This works because the inq_pid 3621 * data member follows inq_vid in the scsi_inquiry structure. 3622 */ 3623 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3624 /* 3625 * The user id string is compared to the inquiry vid/pid 3626 * using a case insensitive comparison and ignoring 3627 * multiple spaces. 3628 */ 3629 rval = sd_blank_cmp(un, id, idlen); 3630 if (rval != SD_SUCCESS) { 3631 /* 3632 * User id strings that start and end with a "*" 3633 * are a special case. These do not have a 3634 * specific vendor, and the product string can 3635 * appear anywhere in the 16 byte PID portion of 3636 * the inquiry data. This is a simple strstr() 3637 * type search for the user id in the inquiry data. 3638 */ 3639 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3640 char *pidptr = &id[1]; 3641 int i; 3642 int j; 3643 int pidstrlen = idlen - 2; 3644 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3645 pidstrlen; 3646 3647 if (j < 0) { 3648 return (SD_FAILURE); 3649 } 3650 for (i = 0; i < j; i++) { 3651 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3652 pidptr, pidstrlen) == 0) { 3653 rval = SD_SUCCESS; 3654 break; 3655 } 3656 } 3657 } 3658 } 3659 } 3660 return (rval); 3661 } 3662 3663 3664 /* 3665 * Function: sd_blank_cmp 3666 * 3667 * Description: If the id string starts and ends with a space, treat 3668 * multiple consecutive spaces as equivalent to a single 3669 * space. For example, this causes a sd_disk_table entry 3670 * of " NEC CDROM " to match a device's id string of 3671 * "NEC CDROM". 3672 * 3673 * Note: The success exit condition for this routine is if 3674 * the pointer to the table entry is '\0' and the cnt of 3675 * the inquiry length is zero. This will happen if the inquiry 3676 * string returned by the device is padded with spaces to be 3677 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3678 * SCSI spec states that the inquiry string is to be padded with 3679 * spaces. 3680 * 3681 * Arguments: un - driver soft state (unit) structure 3682 * id - table or config file vid/pid 3683 * idlen - length of the vid/pid (bytes) 3684 * 3685 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3686 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3687 */ 3688 3689 static int 3690 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3691 { 3692 char *p1; 3693 char *p2; 3694 int cnt; 3695 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3696 sizeof (SD_INQUIRY(un)->inq_pid); 3697 3698 ASSERT(un != NULL); 3699 p2 = un->un_sd->sd_inq->inq_vid; 3700 ASSERT(id != NULL); 3701 p1 = id; 3702 3703 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3704 /* 3705 * Note: string p1 is terminated by a NUL but string p2 3706 * isn't. The end of p2 is determined by cnt. 3707 */ 3708 for (;;) { 3709 /* skip over any extra blanks in both strings */ 3710 while ((*p1 != '\0') && (*p1 == ' ')) { 3711 p1++; 3712 } 3713 while ((cnt != 0) && (*p2 == ' ')) { 3714 p2++; 3715 cnt--; 3716 } 3717 3718 /* compare the two strings */ 3719 if ((cnt == 0) || 3720 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3721 break; 3722 } 3723 while ((cnt > 0) && 3724 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3725 p1++; 3726 p2++; 3727 cnt--; 3728 } 3729 } 3730 } 3731 3732 /* return SD_SUCCESS if both strings match */ 3733 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3734 } 3735 3736 3737 /* 3738 * Function: sd_chk_vers1_data 3739 * 3740 * Description: Verify the version 1 device properties provided by the 3741 * user via the configuration file 3742 * 3743 * Arguments: un - driver soft state (unit) structure 3744 * flags - integer mask indicating properties to be set 3745 * prop_list - integer list of property values 3746 * list_len - length of user provided data 3747 * 3748 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3749 * SD_FAILURE - Indicates the user provided data is invalid 3750 */ 3751 3752 static int 3753 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3754 int list_len, char *dataname_ptr) 3755 { 3756 int i; 3757 int mask = 1; 3758 int index = 0; 3759 3760 ASSERT(un != NULL); 3761 3762 /* Check for a NULL property name and list */ 3763 if (dataname_ptr == NULL) { 3764 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3765 "sd_chk_vers1_data: NULL data property name."); 3766 return (SD_FAILURE); 3767 } 3768 if (prop_list == NULL) { 3769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3770 "sd_chk_vers1_data: %s NULL data property list.", 3771 dataname_ptr); 3772 return (SD_FAILURE); 3773 } 3774 3775 /* Display a warning if undefined bits are set in the flags */ 3776 if (flags & ~SD_CONF_BIT_MASK) { 3777 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3778 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3779 "Properties not set.", 3780 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3781 return (SD_FAILURE); 3782 } 3783 3784 /* 3785 * Verify the length of the list by identifying the highest bit set 3786 * in the flags and validating that the property list has a length 3787 * up to the index of this bit. 3788 */ 3789 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3790 if (flags & mask) { 3791 index++; 3792 } 3793 mask = 1 << i; 3794 } 3795 if ((list_len / sizeof (int)) < (index + 2)) { 3796 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3797 "sd_chk_vers1_data: " 3798 "Data property list %s size is incorrect. " 3799 "Properties not set.", dataname_ptr); 3800 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3801 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3802 return (SD_FAILURE); 3803 } 3804 return (SD_SUCCESS); 3805 } 3806 3807 3808 /* 3809 * Function: sd_set_vers1_properties 3810 * 3811 * Description: Set version 1 device properties based on a property list 3812 * retrieved from the driver configuration file or static 3813 * configuration table. Version 1 properties have the format: 3814 * 3815 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3816 * 3817 * where the prop0 value will be used to set prop0 if bit0 3818 * is set in the flags 3819 * 3820 * Arguments: un - driver soft state (unit) structure 3821 * flags - integer mask indicating properties to be set 3822 * prop_list - integer list of property values 3823 */ 3824 3825 static void 3826 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3827 { 3828 ASSERT(un != NULL); 3829 3830 /* 3831 * Set the flag to indicate cache is to be disabled. An attempt 3832 * to disable the cache via sd_disable_caching() will be made 3833 * later during attach once the basic initialization is complete. 3834 */ 3835 if (flags & SD_CONF_BSET_NOCACHE) { 3836 un->un_f_opt_disable_cache = TRUE; 3837 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3838 "sd_set_vers1_properties: caching disabled flag set\n"); 3839 } 3840 3841 /* CD-specific configuration parameters */ 3842 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3843 un->un_f_cfg_playmsf_bcd = TRUE; 3844 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3845 "sd_set_vers1_properties: playmsf_bcd set\n"); 3846 } 3847 if (flags & SD_CONF_BSET_READSUB_BCD) { 3848 un->un_f_cfg_readsub_bcd = TRUE; 3849 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3850 "sd_set_vers1_properties: readsub_bcd set\n"); 3851 } 3852 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3853 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3854 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3855 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3856 } 3857 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3858 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3859 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3860 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3861 } 3862 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3863 un->un_f_cfg_no_read_header = TRUE; 3864 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3865 "sd_set_vers1_properties: no_read_header set\n"); 3866 } 3867 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3868 un->un_f_cfg_read_cd_xd4 = TRUE; 3869 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3870 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3871 } 3872 3873 /* Support for devices which do not have valid/unique serial numbers */ 3874 if (flags & SD_CONF_BSET_FAB_DEVID) { 3875 un->un_f_opt_fab_devid = TRUE; 3876 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3877 "sd_set_vers1_properties: fab_devid bit set\n"); 3878 } 3879 3880 /* Support for user throttle configuration */ 3881 if (flags & SD_CONF_BSET_THROTTLE) { 3882 ASSERT(prop_list != NULL); 3883 un->un_saved_throttle = un->un_throttle = 3884 prop_list->sdt_throttle; 3885 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3886 "sd_set_vers1_properties: throttle set to %d\n", 3887 prop_list->sdt_throttle); 3888 } 3889 3890 /* Set the per disk retry count according to the conf file or table. */ 3891 if (flags & SD_CONF_BSET_NRR_COUNT) { 3892 ASSERT(prop_list != NULL); 3893 if (prop_list->sdt_not_rdy_retries) { 3894 un->un_notready_retry_count = 3895 prop_list->sdt_not_rdy_retries; 3896 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3897 "sd_set_vers1_properties: not ready retry count" 3898 " set to %d\n", un->un_notready_retry_count); 3899 } 3900 } 3901 3902 /* The controller type is reported for generic disk driver ioctls */ 3903 if (flags & SD_CONF_BSET_CTYPE) { 3904 ASSERT(prop_list != NULL); 3905 switch (prop_list->sdt_ctype) { 3906 case CTYPE_CDROM: 3907 un->un_ctype = prop_list->sdt_ctype; 3908 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3909 "sd_set_vers1_properties: ctype set to " 3910 "CTYPE_CDROM\n"); 3911 break; 3912 case CTYPE_CCS: 3913 un->un_ctype = prop_list->sdt_ctype; 3914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3915 "sd_set_vers1_properties: ctype set to " 3916 "CTYPE_CCS\n"); 3917 break; 3918 case CTYPE_ROD: /* RW optical */ 3919 un->un_ctype = prop_list->sdt_ctype; 3920 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3921 "sd_set_vers1_properties: ctype set to " 3922 "CTYPE_ROD\n"); 3923 break; 3924 default: 3925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3926 "sd_set_vers1_properties: Could not set " 3927 "invalid ctype value (%d)", 3928 prop_list->sdt_ctype); 3929 } 3930 } 3931 3932 /* Purple failover timeout */ 3933 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3934 ASSERT(prop_list != NULL); 3935 un->un_busy_retry_count = 3936 prop_list->sdt_busy_retries; 3937 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3938 "sd_set_vers1_properties: " 3939 "busy retry count set to %d\n", 3940 un->un_busy_retry_count); 3941 } 3942 3943 /* Purple reset retry count */ 3944 if (flags & SD_CONF_BSET_RST_RETRIES) { 3945 ASSERT(prop_list != NULL); 3946 un->un_reset_retry_count = 3947 prop_list->sdt_reset_retries; 3948 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3949 "sd_set_vers1_properties: " 3950 "reset retry count set to %d\n", 3951 un->un_reset_retry_count); 3952 } 3953 3954 /* Purple reservation release timeout */ 3955 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3956 ASSERT(prop_list != NULL); 3957 un->un_reserve_release_time = 3958 prop_list->sdt_reserv_rel_time; 3959 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3960 "sd_set_vers1_properties: " 3961 "reservation release timeout set to %d\n", 3962 un->un_reserve_release_time); 3963 } 3964 3965 /* 3966 * Driver flag telling the driver to verify that no commands are pending 3967 * for a device before issuing a Test Unit Ready. This is a workaround 3968 * for a firmware bug in some Seagate eliteI drives. 3969 */ 3970 if (flags & SD_CONF_BSET_TUR_CHECK) { 3971 un->un_f_cfg_tur_check = TRUE; 3972 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3973 "sd_set_vers1_properties: tur queue check set\n"); 3974 } 3975 3976 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3977 un->un_min_throttle = prop_list->sdt_min_throttle; 3978 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3979 "sd_set_vers1_properties: min throttle set to %d\n", 3980 un->un_min_throttle); 3981 } 3982 3983 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3984 un->un_f_disksort_disabled = 3985 (prop_list->sdt_disk_sort_dis != 0) ? 3986 TRUE : FALSE; 3987 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3988 "sd_set_vers1_properties: disksort disabled " 3989 "flag set to %d\n", 3990 prop_list->sdt_disk_sort_dis); 3991 } 3992 3993 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3994 un->un_f_lun_reset_enabled = 3995 (prop_list->sdt_lun_reset_enable != 0) ? 3996 TRUE : FALSE; 3997 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3998 "sd_set_vers1_properties: lun reset enabled " 3999 "flag set to %d\n", 4000 prop_list->sdt_lun_reset_enable); 4001 } 4002 4003 /* 4004 * Validate the throttle values. 4005 * If any of the numbers are invalid, set everything to defaults. 4006 */ 4007 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4008 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4009 (un->un_min_throttle > un->un_throttle)) { 4010 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4011 un->un_min_throttle = sd_min_throttle; 4012 } 4013 } 4014 4015 /* 4016 * Function: sd_is_lsi() 4017 * 4018 * Description: Check for lsi devices, step throught the static device 4019 * table to match vid/pid. 4020 * 4021 * Args: un - ptr to sd_lun 4022 * 4023 * Notes: When creating new LSI property, need to add the new LSI property 4024 * to this function. 4025 */ 4026 static void 4027 sd_is_lsi(struct sd_lun *un) 4028 { 4029 char *id = NULL; 4030 int table_index; 4031 int idlen; 4032 void *prop; 4033 4034 ASSERT(un != NULL); 4035 for (table_index = 0; table_index < sd_disk_table_size; 4036 table_index++) { 4037 id = sd_disk_table[table_index].device_id; 4038 idlen = strlen(id); 4039 if (idlen == 0) { 4040 continue; 4041 } 4042 4043 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4044 prop = sd_disk_table[table_index].properties; 4045 if (prop == &lsi_properties || 4046 prop == &lsi_oem_properties || 4047 prop == &lsi_properties_scsi || 4048 prop == &symbios_properties) { 4049 un->un_f_cfg_is_lsi = TRUE; 4050 } 4051 break; 4052 } 4053 } 4054 } 4055 4056 4057 /* 4058 * The following routines support reading and interpretation of disk labels, 4059 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 4060 * fdisk tables. 4061 */ 4062 4063 /* 4064 * Function: sd_validate_geometry 4065 * 4066 * Description: Read the label from the disk (if present). Update the unit's 4067 * geometry and vtoc information from the data in the label. 4068 * Verify that the label is valid. 4069 * 4070 * Arguments: un - driver soft state (unit) structure 4071 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4072 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4073 * to use the USCSI "direct" chain and bypass the normal 4074 * command waitq. 4075 * 4076 * Return Code: 0 - Successful completion 4077 * EINVAL - Invalid value in un->un_tgt_blocksize or 4078 * un->un_blockcount; or label on disk is corrupted 4079 * or unreadable. 4080 * EACCES - Reservation conflict at the device. 4081 * ENOMEM - Resource allocation error 4082 * ENOTSUP - geometry not applicable 4083 * 4084 * Context: Kernel thread only (can sleep). 4085 */ 4086 4087 static int 4088 sd_validate_geometry(struct sd_lun *un, int path_flag) 4089 { 4090 static char labelstring[128]; 4091 static char buf[256]; 4092 char *label = NULL; 4093 int label_error = 0; 4094 int gvalid = un->un_f_geometry_is_valid; 4095 int lbasize; 4096 uint_t capacity; 4097 int count; 4098 4099 ASSERT(un != NULL); 4100 ASSERT(mutex_owned(SD_MUTEX(un))); 4101 4102 /* 4103 * If the required values are not valid, then try getting them 4104 * once via read capacity. If that fails, then fail this call. 4105 * This is necessary with the new mpxio failover behavior in 4106 * the T300 where we can get an attach for the inactive path 4107 * before the active path. The inactive path fails commands with 4108 * sense data of 02,04,88 which happens to the read capacity 4109 * before mpxio has had sufficient knowledge to know if it should 4110 * force a fail over or not. (Which it won't do at attach anyhow). 4111 * If the read capacity at attach time fails, un_tgt_blocksize and 4112 * un_blockcount won't be valid. 4113 */ 4114 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4115 (un->un_f_blockcount_is_valid != TRUE)) { 4116 uint64_t cap; 4117 uint32_t lbasz; 4118 int rval; 4119 4120 mutex_exit(SD_MUTEX(un)); 4121 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 4122 &lbasz, SD_PATH_DIRECT); 4123 mutex_enter(SD_MUTEX(un)); 4124 if (rval == 0) { 4125 /* 4126 * The following relies on 4127 * sd_send_scsi_READ_CAPACITY never 4128 * returning 0 for capacity and/or lbasize. 4129 */ 4130 sd_update_block_info(un, lbasz, cap); 4131 } 4132 4133 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4134 (un->un_f_blockcount_is_valid != TRUE)) { 4135 return (EINVAL); 4136 } 4137 } 4138 4139 /* 4140 * Copy the lbasize and capacity so that if they're reset while we're 4141 * not holding the SD_MUTEX, we will continue to use valid values 4142 * after the SD_MUTEX is reacquired. (4119659) 4143 */ 4144 lbasize = un->un_tgt_blocksize; 4145 capacity = un->un_blockcount; 4146 4147 #if defined(_SUNOS_VTOC_16) 4148 /* 4149 * Set up the "whole disk" fdisk partition; this should always 4150 * exist, regardless of whether the disk contains an fdisk table 4151 * or vtoc. 4152 */ 4153 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4154 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4155 #endif 4156 4157 /* 4158 * Refresh the logical and physical geometry caches. 4159 * (data from MODE SENSE format/rigid disk geometry pages, 4160 * and scsi_ifgetcap("geometry"). 4161 */ 4162 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4163 4164 label_error = sd_use_efi(un, path_flag); 4165 if (label_error == 0) { 4166 /* found a valid EFI label */ 4167 SD_TRACE(SD_LOG_IO_PARTITION, un, 4168 "sd_validate_geometry: found EFI label\n"); 4169 un->un_solaris_offset = 0; 4170 un->un_solaris_size = capacity; 4171 return (ENOTSUP); 4172 } 4173 if (un->un_blockcount > DK_MAX_BLOCKS) { 4174 if (label_error == ESRCH) { 4175 /* 4176 * they've configured a LUN over 1TB, but used 4177 * format.dat to restrict format's view of the 4178 * capacity to be under 1TB 4179 */ 4180 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4181 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4182 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4183 "size to be < 1TB or relabel the disk with an EFI label"); 4184 } else { 4185 /* unlabeled disk over 1TB */ 4186 return (ENOTSUP); 4187 } 4188 } 4189 label_error = 0; 4190 4191 /* 4192 * at this point it is either labeled with a VTOC or it is 4193 * under 1TB 4194 */ 4195 4196 /* 4197 * Only DIRECT ACCESS devices will have Sun labels. 4198 * CD's supposedly have a Sun label, too 4199 */ 4200 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4201 struct dk_label *dkl; 4202 offset_t dkl1; 4203 offset_t label_addr, real_addr; 4204 int rval; 4205 size_t buffer_size; 4206 4207 /* 4208 * Note: This will set up un->un_solaris_size and 4209 * un->un_solaris_offset. 4210 */ 4211 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4212 case SD_CMD_RESERVATION_CONFLICT: 4213 ASSERT(mutex_owned(SD_MUTEX(un))); 4214 return (EACCES); 4215 case SD_CMD_FAILURE: 4216 ASSERT(mutex_owned(SD_MUTEX(un))); 4217 return (ENOMEM); 4218 } 4219 4220 if (un->un_solaris_size <= DK_LABEL_LOC) { 4221 /* 4222 * Found fdisk table but no Solaris partition entry, 4223 * so don't call sd_uselabel() and don't create 4224 * a default label. 4225 */ 4226 label_error = 0; 4227 un->un_f_geometry_is_valid = TRUE; 4228 goto no_solaris_partition; 4229 } 4230 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4231 4232 /* 4233 * sys_blocksize != tgt_blocksize, need to re-adjust 4234 * blkno and save the index to beginning of dk_label 4235 */ 4236 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4237 buffer_size = SD_REQBYTES2TGTBYTES(un, 4238 sizeof (struct dk_label)); 4239 4240 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4241 "label_addr: 0x%x allocation size: 0x%x\n", 4242 label_addr, buffer_size); 4243 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4244 if (dkl == NULL) { 4245 return (ENOMEM); 4246 } 4247 4248 mutex_exit(SD_MUTEX(un)); 4249 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4250 path_flag); 4251 mutex_enter(SD_MUTEX(un)); 4252 4253 switch (rval) { 4254 case 0: 4255 /* 4256 * sd_uselabel will establish that the geometry 4257 * is valid. 4258 * For sys_blocksize != tgt_blocksize, need 4259 * to index into the beginning of dk_label 4260 */ 4261 dkl1 = (daddr_t)dkl 4262 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4263 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4264 path_flag) != SD_LABEL_IS_VALID) { 4265 label_error = EINVAL; 4266 } 4267 break; 4268 case EACCES: 4269 label_error = EACCES; 4270 break; 4271 default: 4272 label_error = EINVAL; 4273 break; 4274 } 4275 4276 kmem_free(dkl, buffer_size); 4277 4278 #if defined(_SUNOS_VTOC_8) 4279 label = (char *)un->un_asciilabel; 4280 #elif defined(_SUNOS_VTOC_16) 4281 label = (char *)un->un_vtoc.v_asciilabel; 4282 #else 4283 #error "No VTOC format defined." 4284 #endif 4285 } 4286 4287 /* 4288 * If a valid label was not found, AND if no reservation conflict 4289 * was detected, then go ahead and create a default label (4069506). 4290 * 4291 * Note: currently, for VTOC_8 devices, the default label is created 4292 * for removables only. For VTOC_16 devices, the default label will 4293 * be created for both removables and non-removables alike. 4294 * (see sd_build_default_label) 4295 */ 4296 #if defined(_SUNOS_VTOC_8) 4297 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4298 #elif defined(_SUNOS_VTOC_16) 4299 if (label_error != EACCES) { 4300 #endif 4301 if (un->un_f_geometry_is_valid == FALSE) { 4302 sd_build_default_label(un); 4303 } 4304 label_error = 0; 4305 } 4306 4307 no_solaris_partition: 4308 if ((!ISREMOVABLE(un) || 4309 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4310 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4311 /* 4312 * Print out a message indicating who and what we are. 4313 * We do this only when we happen to really validate the 4314 * geometry. We may call sd_validate_geometry() at other 4315 * times, e.g., ioctl()'s like Get VTOC in which case we 4316 * don't want to print the label. 4317 * If the geometry is valid, print the label string, 4318 * else print vendor and product info, if available 4319 */ 4320 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4321 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4322 } else { 4323 mutex_enter(&sd_label_mutex); 4324 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4325 labelstring); 4326 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4327 &labelstring[64]); 4328 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4329 labelstring, &labelstring[64]); 4330 if (un->un_f_blockcount_is_valid == TRUE) { 4331 (void) sprintf(&buf[strlen(buf)], 4332 ", %llu %u byte blocks\n", 4333 (longlong_t)un->un_blockcount, 4334 un->un_tgt_blocksize); 4335 } else { 4336 (void) sprintf(&buf[strlen(buf)], 4337 ", (unknown capacity)\n"); 4338 } 4339 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4340 mutex_exit(&sd_label_mutex); 4341 } 4342 } 4343 4344 #if defined(_SUNOS_VTOC_16) 4345 /* 4346 * If we have valid geometry, set up the remaining fdisk partitions. 4347 * Note that dkl_cylno is not used for the fdisk map entries, so 4348 * we set it to an entirely bogus value. 4349 */ 4350 for (count = 0; count < FD_NUMPART; count++) { 4351 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4352 un->un_map[FDISK_P1 + count].dkl_nblk = 4353 un->un_fmap[count].fmap_nblk; 4354 4355 un->un_offset[FDISK_P1 + count] = 4356 un->un_fmap[count].fmap_start; 4357 } 4358 #endif 4359 4360 for (count = 0; count < NDKMAP; count++) { 4361 #if defined(_SUNOS_VTOC_8) 4362 struct dk_map *lp = &un->un_map[count]; 4363 un->un_offset[count] = 4364 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4365 #elif defined(_SUNOS_VTOC_16) 4366 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4367 4368 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4369 #else 4370 #error "No VTOC format defined." 4371 #endif 4372 } 4373 4374 return (label_error); 4375 } 4376 4377 4378 #if defined(_SUNOS_VTOC_16) 4379 /* 4380 * Macro: MAX_BLKS 4381 * 4382 * This macro is used for table entries where we need to have the largest 4383 * possible sector value for that head & SPT (sectors per track) 4384 * combination. Other entries for some smaller disk sizes are set by 4385 * convention to match those used by X86 BIOS usage. 4386 */ 4387 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4388 4389 /* 4390 * Function: sd_convert_geometry 4391 * 4392 * Description: Convert physical geometry into a dk_geom structure. In 4393 * other words, make sure we don't wrap 16-bit values. 4394 * e.g. converting from geom_cache to dk_geom 4395 * 4396 * Context: Kernel thread only 4397 */ 4398 static void 4399 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4400 { 4401 int i; 4402 static const struct chs_values { 4403 uint_t max_cap; /* Max Capacity for this HS. */ 4404 uint_t nhead; /* Heads to use. */ 4405 uint_t nsect; /* SPT to use. */ 4406 } CHS_values[] = { 4407 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4408 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4409 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4410 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4411 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4412 }; 4413 4414 /* Unlabeled SCSI floppy device */ 4415 if (capacity <= 0x1000) { 4416 un_g->dkg_nhead = 2; 4417 un_g->dkg_ncyl = 80; 4418 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4419 return; 4420 } 4421 4422 /* 4423 * For all devices we calculate cylinders using the 4424 * heads and sectors we assign based on capacity of the 4425 * device. The table is designed to be compatible with the 4426 * way other operating systems lay out fdisk tables for X86 4427 * and to insure that the cylinders never exceed 65535 to 4428 * prevent problems with X86 ioctls that report geometry. 4429 * We use SPT that are multiples of 63, since other OSes that 4430 * are not limited to 16-bits for cylinders stop at 63 SPT 4431 * we make do by using multiples of 63 SPT. 4432 * 4433 * Note than capacities greater than or equal to 1TB will simply 4434 * get the largest geometry from the table. This should be okay 4435 * since disks this large shouldn't be using CHS values anyway. 4436 */ 4437 for (i = 0; CHS_values[i].max_cap < capacity && 4438 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4439 ; 4440 4441 un_g->dkg_nhead = CHS_values[i].nhead; 4442 un_g->dkg_nsect = CHS_values[i].nsect; 4443 } 4444 #endif 4445 4446 4447 /* 4448 * Function: sd_resync_geom_caches 4449 * 4450 * Description: (Re)initialize both geometry caches: the virtual geometry 4451 * information is extracted from the HBA (the "geometry" 4452 * capability), and the physical geometry cache data is 4453 * generated by issuing MODE SENSE commands. 4454 * 4455 * Arguments: un - driver soft state (unit) structure 4456 * capacity - disk capacity in #blocks 4457 * lbasize - disk block size in bytes 4458 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4459 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4460 * to use the USCSI "direct" chain and bypass the normal 4461 * command waitq. 4462 * 4463 * Context: Kernel thread only (can sleep). 4464 */ 4465 4466 static void 4467 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4468 int path_flag) 4469 { 4470 struct geom_cache pgeom; 4471 struct geom_cache *pgeom_p = &pgeom; 4472 int spc; 4473 unsigned short nhead; 4474 unsigned short nsect; 4475 4476 ASSERT(un != NULL); 4477 ASSERT(mutex_owned(SD_MUTEX(un))); 4478 4479 /* 4480 * Ask the controller for its logical geometry. 4481 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4482 * then the lgeom cache will be invalid. 4483 */ 4484 sd_get_virtual_geometry(un, capacity, lbasize); 4485 4486 /* 4487 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4488 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4489 */ 4490 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4491 /* 4492 * Note: Perhaps this needs to be more adaptive? The rationale 4493 * is that, if there's no HBA geometry from the HBA driver, any 4494 * guess is good, since this is the physical geometry. If MODE 4495 * SENSE fails this gives a max cylinder size for non-LBA access 4496 */ 4497 nhead = 255; 4498 nsect = 63; 4499 } else { 4500 nhead = un->un_lgeom.g_nhead; 4501 nsect = un->un_lgeom.g_nsect; 4502 } 4503 4504 if (ISCD(un)) { 4505 pgeom_p->g_nhead = 1; 4506 pgeom_p->g_nsect = nsect * nhead; 4507 } else { 4508 pgeom_p->g_nhead = nhead; 4509 pgeom_p->g_nsect = nsect; 4510 } 4511 4512 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4513 pgeom_p->g_capacity = capacity; 4514 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4515 pgeom_p->g_acyl = 0; 4516 4517 /* 4518 * Retrieve fresh geometry data from the hardware, stash it 4519 * here temporarily before we rebuild the incore label. 4520 * 4521 * We want to use the MODE SENSE commands to derive the 4522 * physical geometry of the device, but if either command 4523 * fails, the logical geometry is used as the fallback for 4524 * disk label geometry. 4525 */ 4526 mutex_exit(SD_MUTEX(un)); 4527 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4528 mutex_enter(SD_MUTEX(un)); 4529 4530 /* 4531 * Now update the real copy while holding the mutex. This 4532 * way the global copy is never in an inconsistent state. 4533 */ 4534 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4535 4536 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4537 "(cached from lgeom)\n"); 4538 SD_INFO(SD_LOG_COMMON, un, 4539 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4540 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4541 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4542 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4543 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4544 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4545 un->un_pgeom.g_rpm); 4546 } 4547 4548 4549 /* 4550 * Function: sd_read_fdisk 4551 * 4552 * Description: utility routine to read the fdisk table. 4553 * 4554 * Arguments: un - driver soft state (unit) structure 4555 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4556 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4557 * to use the USCSI "direct" chain and bypass the normal 4558 * command waitq. 4559 * 4560 * Return Code: SD_CMD_SUCCESS 4561 * SD_CMD_FAILURE 4562 * 4563 * Context: Kernel thread only (can sleep). 4564 */ 4565 /* ARGSUSED */ 4566 static int 4567 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4568 { 4569 #if defined(_NO_FDISK_PRESENT) 4570 4571 un->un_solaris_offset = 0; 4572 un->un_solaris_size = capacity; 4573 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4574 return (SD_CMD_SUCCESS); 4575 4576 #elif defined(_FIRMWARE_NEEDS_FDISK) 4577 4578 struct ipart *fdp; 4579 struct mboot *mbp; 4580 struct ipart fdisk[FD_NUMPART]; 4581 int i; 4582 char sigbuf[2]; 4583 caddr_t bufp; 4584 int uidx; 4585 int rval; 4586 int lba = 0; 4587 uint_t solaris_offset; /* offset to solaris part. */ 4588 daddr_t solaris_size; /* size of solaris partition */ 4589 uint32_t blocksize; 4590 4591 ASSERT(un != NULL); 4592 ASSERT(mutex_owned(SD_MUTEX(un))); 4593 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4594 4595 blocksize = un->un_tgt_blocksize; 4596 4597 /* 4598 * Start off assuming no fdisk table 4599 */ 4600 solaris_offset = 0; 4601 solaris_size = capacity; 4602 4603 mutex_exit(SD_MUTEX(un)); 4604 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4605 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4606 mutex_enter(SD_MUTEX(un)); 4607 4608 if (rval != 0) { 4609 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4610 "sd_read_fdisk: fdisk read err\n"); 4611 kmem_free(bufp, blocksize); 4612 return (SD_CMD_FAILURE); 4613 } 4614 4615 mbp = (struct mboot *)bufp; 4616 4617 /* 4618 * The fdisk table does not begin on a 4-byte boundary within the 4619 * master boot record, so we copy it to an aligned structure to avoid 4620 * alignment exceptions on some processors. 4621 */ 4622 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4623 4624 /* 4625 * Check for lba support before verifying sig; sig might not be 4626 * there, say on a blank disk, but the max_chs mark may still 4627 * be present. 4628 * 4629 * Note: LBA support and BEFs are an x86-only concept but this 4630 * code should work OK on SPARC as well. 4631 */ 4632 4633 /* 4634 * First, check for lba-access-ok on root node (or prom root node) 4635 * if present there, don't need to search fdisk table. 4636 */ 4637 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4638 "lba-access-ok", 0) != 0) { 4639 /* All drives do LBA; don't search fdisk table */ 4640 lba = 1; 4641 } else { 4642 /* Okay, look for mark in fdisk table */ 4643 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4644 /* accumulate "lba" value from all partitions */ 4645 lba = (lba || sd_has_max_chs_vals(fdp)); 4646 } 4647 } 4648 4649 if (lba != 0) { 4650 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4651 4652 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4653 "lba-access-ok", 0) == 0) { 4654 /* not found; create it */ 4655 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4656 "lba-access-ok", (caddr_t)NULL, 0) != 4657 DDI_PROP_SUCCESS) { 4658 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4659 "sd_read_fdisk: Can't create lba property " 4660 "for instance %d\n", 4661 ddi_get_instance(SD_DEVINFO(un))); 4662 } 4663 } 4664 } 4665 4666 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4667 4668 /* 4669 * Endian-independent signature check 4670 */ 4671 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4672 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4673 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4674 "sd_read_fdisk: no fdisk\n"); 4675 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4676 rval = SD_CMD_SUCCESS; 4677 goto done; 4678 } 4679 4680 #ifdef SDDEBUG 4681 if (sd_level_mask & SD_LOGMASK_INFO) { 4682 fdp = fdisk; 4683 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4684 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4685 "numsect sysid bootid\n"); 4686 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4687 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4688 " %d: %8d %8d 0x%08x 0x%08x\n", 4689 i, fdp->relsect, fdp->numsect, 4690 fdp->systid, fdp->bootid); 4691 } 4692 } 4693 #endif 4694 4695 /* 4696 * Try to find the unix partition 4697 */ 4698 uidx = -1; 4699 solaris_offset = 0; 4700 solaris_size = 0; 4701 4702 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4703 int relsect; 4704 int numsect; 4705 4706 if (fdp->numsect == 0) { 4707 un->un_fmap[i].fmap_start = 0; 4708 un->un_fmap[i].fmap_nblk = 0; 4709 continue; 4710 } 4711 4712 /* 4713 * Data in the fdisk table is little-endian. 4714 */ 4715 relsect = LE_32(fdp->relsect); 4716 numsect = LE_32(fdp->numsect); 4717 4718 un->un_fmap[i].fmap_start = relsect; 4719 un->un_fmap[i].fmap_nblk = numsect; 4720 4721 if (fdp->systid != SUNIXOS && 4722 fdp->systid != SUNIXOS2 && 4723 fdp->systid != EFI_PMBR) { 4724 continue; 4725 } 4726 4727 /* 4728 * use the last active solaris partition id found 4729 * (there should only be 1 active partition id) 4730 * 4731 * if there are no active solaris partition id 4732 * then use the first inactive solaris partition id 4733 */ 4734 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4735 uidx = i; 4736 solaris_offset = relsect; 4737 solaris_size = numsect; 4738 } 4739 } 4740 4741 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4742 un->un_solaris_offset, un->un_solaris_size); 4743 4744 rval = SD_CMD_SUCCESS; 4745 4746 done: 4747 4748 /* 4749 * Clear the VTOC info, only if the Solaris partition entry 4750 * has moved, changed size, been deleted, or if the size of 4751 * the partition is too small to even fit the label sector. 4752 */ 4753 if ((un->un_solaris_offset != solaris_offset) || 4754 (un->un_solaris_size != solaris_size) || 4755 solaris_size <= DK_LABEL_LOC) { 4756 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4757 solaris_offset, solaris_size); 4758 bzero(&un->un_g, sizeof (struct dk_geom)); 4759 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4760 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4761 un->un_f_geometry_is_valid = FALSE; 4762 } 4763 un->un_solaris_offset = solaris_offset; 4764 un->un_solaris_size = solaris_size; 4765 kmem_free(bufp, blocksize); 4766 return (rval); 4767 4768 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4769 #error "fdisk table presence undetermined for this platform." 4770 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4771 } 4772 4773 4774 /* 4775 * Function: sd_get_physical_geometry 4776 * 4777 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4778 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4779 * target, and use this information to initialize the physical 4780 * geometry cache specified by pgeom_p. 4781 * 4782 * MODE SENSE is an optional command, so failure in this case 4783 * does not necessarily denote an error. We want to use the 4784 * MODE SENSE commands to derive the physical geometry of the 4785 * device, but if either command fails, the logical geometry is 4786 * used as the fallback for disk label geometry. 4787 * 4788 * This requires that un->un_blockcount and un->un_tgt_blocksize 4789 * have already been initialized for the current target and 4790 * that the current values be passed as args so that we don't 4791 * end up ever trying to use -1 as a valid value. This could 4792 * happen if either value is reset while we're not holding 4793 * the mutex. 4794 * 4795 * Arguments: un - driver soft state (unit) structure 4796 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4797 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4798 * to use the USCSI "direct" chain and bypass the normal 4799 * command waitq. 4800 * 4801 * Context: Kernel thread only (can sleep). 4802 */ 4803 4804 static void 4805 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4806 int capacity, int lbasize, int path_flag) 4807 { 4808 struct mode_format *page3p; 4809 struct mode_geometry *page4p; 4810 struct mode_header *headerp; 4811 int sector_size; 4812 int nsect; 4813 int nhead; 4814 int ncyl; 4815 int intrlv; 4816 int spc; 4817 int modesense_capacity; 4818 int rpm; 4819 int bd_len; 4820 int mode_header_length; 4821 uchar_t *p3bufp; 4822 uchar_t *p4bufp; 4823 int cdbsize; 4824 4825 ASSERT(un != NULL); 4826 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4827 4828 if (un->un_f_blockcount_is_valid != TRUE) { 4829 return; 4830 } 4831 4832 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4833 return; 4834 } 4835 4836 if (lbasize == 0) { 4837 if (ISCD(un)) { 4838 lbasize = 2048; 4839 } else { 4840 lbasize = un->un_sys_blocksize; 4841 } 4842 } 4843 pgeom_p->g_secsize = (unsigned short)lbasize; 4844 4845 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4846 4847 /* 4848 * Retrieve MODE SENSE page 3 - Format Device Page 4849 */ 4850 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4851 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4852 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4853 != 0) { 4854 SD_ERROR(SD_LOG_COMMON, un, 4855 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4856 goto page3_exit; 4857 } 4858 4859 /* 4860 * Determine size of Block Descriptors in order to locate the mode 4861 * page data. ATAPI devices return 0, SCSI devices should return 4862 * MODE_BLK_DESC_LENGTH. 4863 */ 4864 headerp = (struct mode_header *)p3bufp; 4865 if (un->un_f_cfg_is_atapi == TRUE) { 4866 struct mode_header_grp2 *mhp = 4867 (struct mode_header_grp2 *)headerp; 4868 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4869 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4870 } else { 4871 mode_header_length = MODE_HEADER_LENGTH; 4872 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4873 } 4874 4875 if (bd_len > MODE_BLK_DESC_LENGTH) { 4876 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4877 "received unexpected bd_len of %d, page3\n", bd_len); 4878 goto page3_exit; 4879 } 4880 4881 page3p = (struct mode_format *) 4882 ((caddr_t)headerp + mode_header_length + bd_len); 4883 4884 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4885 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4886 "mode sense pg3 code mismatch %d\n", 4887 page3p->mode_page.code); 4888 goto page3_exit; 4889 } 4890 4891 /* 4892 * Use this physical geometry data only if BOTH MODE SENSE commands 4893 * complete successfully; otherwise, revert to the logical geometry. 4894 * So, we need to save everything in temporary variables. 4895 */ 4896 sector_size = BE_16(page3p->data_bytes_sect); 4897 4898 /* 4899 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4900 */ 4901 if (sector_size == 0) { 4902 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4903 } else { 4904 sector_size &= ~(un->un_sys_blocksize - 1); 4905 } 4906 4907 nsect = BE_16(page3p->sect_track); 4908 intrlv = BE_16(page3p->interleave); 4909 4910 SD_INFO(SD_LOG_COMMON, un, 4911 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4912 SD_INFO(SD_LOG_COMMON, un, 4913 " mode page: %d; nsect: %d; sector size: %d;\n", 4914 page3p->mode_page.code, nsect, sector_size); 4915 SD_INFO(SD_LOG_COMMON, un, 4916 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4917 BE_16(page3p->track_skew), 4918 BE_16(page3p->cylinder_skew)); 4919 4920 4921 /* 4922 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4923 */ 4924 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4925 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4926 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4927 != 0) { 4928 SD_ERROR(SD_LOG_COMMON, un, 4929 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4930 goto page4_exit; 4931 } 4932 4933 /* 4934 * Determine size of Block Descriptors in order to locate the mode 4935 * page data. ATAPI devices return 0, SCSI devices should return 4936 * MODE_BLK_DESC_LENGTH. 4937 */ 4938 headerp = (struct mode_header *)p4bufp; 4939 if (un->un_f_cfg_is_atapi == TRUE) { 4940 struct mode_header_grp2 *mhp = 4941 (struct mode_header_grp2 *)headerp; 4942 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4943 } else { 4944 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4945 } 4946 4947 if (bd_len > MODE_BLK_DESC_LENGTH) { 4948 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4949 "received unexpected bd_len of %d, page4\n", bd_len); 4950 goto page4_exit; 4951 } 4952 4953 page4p = (struct mode_geometry *) 4954 ((caddr_t)headerp + mode_header_length + bd_len); 4955 4956 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4957 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4958 "mode sense pg4 code mismatch %d\n", 4959 page4p->mode_page.code); 4960 goto page4_exit; 4961 } 4962 4963 /* 4964 * Stash the data now, after we know that both commands completed. 4965 */ 4966 4967 mutex_enter(SD_MUTEX(un)); 4968 4969 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4970 spc = nhead * nsect; 4971 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4972 rpm = BE_16(page4p->rpm); 4973 4974 modesense_capacity = spc * ncyl; 4975 4976 SD_INFO(SD_LOG_COMMON, un, 4977 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4978 SD_INFO(SD_LOG_COMMON, un, 4979 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4980 SD_INFO(SD_LOG_COMMON, un, 4981 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4982 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4983 (void *)pgeom_p, capacity); 4984 4985 /* 4986 * Compensate if the drive's geometry is not rectangular, i.e., 4987 * the product of C * H * S returned by MODE SENSE >= that returned 4988 * by read capacity. This is an idiosyncrasy of the original x86 4989 * disk subsystem. 4990 */ 4991 if (modesense_capacity >= capacity) { 4992 SD_INFO(SD_LOG_COMMON, un, 4993 "sd_get_physical_geometry: adjusting acyl; " 4994 "old: %d; new: %d\n", pgeom_p->g_acyl, 4995 (modesense_capacity - capacity + spc - 1) / spc); 4996 if (sector_size != 0) { 4997 /* 1243403: NEC D38x7 drives don't support sec size */ 4998 pgeom_p->g_secsize = (unsigned short)sector_size; 4999 } 5000 pgeom_p->g_nsect = (unsigned short)nsect; 5001 pgeom_p->g_nhead = (unsigned short)nhead; 5002 pgeom_p->g_capacity = capacity; 5003 pgeom_p->g_acyl = 5004 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5005 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5006 } 5007 5008 pgeom_p->g_rpm = (unsigned short)rpm; 5009 pgeom_p->g_intrlv = (unsigned short)intrlv; 5010 5011 SD_INFO(SD_LOG_COMMON, un, 5012 "sd_get_physical_geometry: mode sense geometry:\n"); 5013 SD_INFO(SD_LOG_COMMON, un, 5014 " nsect: %d; sector size: %d; interlv: %d\n", 5015 nsect, sector_size, intrlv); 5016 SD_INFO(SD_LOG_COMMON, un, 5017 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5018 nhead, ncyl, rpm, modesense_capacity); 5019 SD_INFO(SD_LOG_COMMON, un, 5020 "sd_get_physical_geometry: (cached)\n"); 5021 SD_INFO(SD_LOG_COMMON, un, 5022 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5023 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 5024 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 5025 SD_INFO(SD_LOG_COMMON, un, 5026 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5027 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 5028 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 5029 5030 mutex_exit(SD_MUTEX(un)); 5031 5032 page4_exit: 5033 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5034 page3_exit: 5035 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5036 } 5037 5038 5039 /* 5040 * Function: sd_get_virtual_geometry 5041 * 5042 * Description: Ask the controller to tell us about the target device. 5043 * 5044 * Arguments: un - pointer to softstate 5045 * capacity - disk capacity in #blocks 5046 * lbasize - disk block size in bytes 5047 * 5048 * Context: Kernel thread only 5049 */ 5050 5051 static void 5052 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 5053 { 5054 struct geom_cache *lgeom_p = &un->un_lgeom; 5055 uint_t geombuf; 5056 int spc; 5057 5058 ASSERT(un != NULL); 5059 ASSERT(mutex_owned(SD_MUTEX(un))); 5060 5061 mutex_exit(SD_MUTEX(un)); 5062 5063 /* Set sector size, and total number of sectors */ 5064 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5065 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5066 5067 /* Let the HBA tell us its geometry */ 5068 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5069 5070 mutex_enter(SD_MUTEX(un)); 5071 5072 /* A value of -1 indicates an undefined "geometry" property */ 5073 if (geombuf == (-1)) { 5074 return; 5075 } 5076 5077 /* Initialize the logical geometry cache. */ 5078 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5079 lgeom_p->g_nsect = geombuf & 0xffff; 5080 lgeom_p->g_secsize = un->un_sys_blocksize; 5081 5082 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5083 5084 /* 5085 * Note: The driver originally converted the capacity value from 5086 * target blocks to system blocks. However, the capacity value passed 5087 * to this routine is already in terms of system blocks (this scaling 5088 * is done when the READ CAPACITY command is issued and processed). 5089 * This 'error' may have gone undetected because the usage of g_ncyl 5090 * (which is based upon g_capacity) is very limited within the driver 5091 */ 5092 lgeom_p->g_capacity = capacity; 5093 5094 /* 5095 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5096 * hba may return zero values if the device has been removed. 5097 */ 5098 if (spc == 0) { 5099 lgeom_p->g_ncyl = 0; 5100 } else { 5101 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5102 } 5103 lgeom_p->g_acyl = 0; 5104 5105 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5106 SD_INFO(SD_LOG_COMMON, un, 5107 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5108 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 5109 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 5110 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 5111 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 5112 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 5113 } 5114 5115 5116 /* 5117 * Function: sd_update_block_info 5118 * 5119 * Description: Calculate a byte count to sector count bitshift value 5120 * from sector size. 5121 * 5122 * Arguments: un: unit struct. 5123 * lbasize: new target sector size 5124 * capacity: new target capacity, ie. block count 5125 * 5126 * Context: Kernel thread context 5127 */ 5128 5129 static void 5130 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5131 { 5132 if (lbasize != 0) { 5133 un->un_tgt_blocksize = lbasize; 5134 un->un_f_tgt_blocksize_is_valid = TRUE; 5135 } 5136 5137 if (capacity != 0) { 5138 un->un_blockcount = capacity; 5139 un->un_f_blockcount_is_valid = TRUE; 5140 } 5141 } 5142 5143 5144 static void 5145 sd_swap_efi_gpt(efi_gpt_t *e) 5146 { 5147 _NOTE(ASSUMING_PROTECTED(*e)) 5148 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5149 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5150 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5151 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5152 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5153 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5154 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5155 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5156 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5157 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5158 e->efi_gpt_NumberOfPartitionEntries = 5159 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5160 e->efi_gpt_SizeOfPartitionEntry = 5161 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5162 e->efi_gpt_PartitionEntryArrayCRC32 = 5163 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5164 } 5165 5166 static void 5167 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5168 { 5169 int i; 5170 5171 _NOTE(ASSUMING_PROTECTED(*p)) 5172 for (i = 0; i < nparts; i++) { 5173 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5174 p[i].efi_gpe_PartitionTypeGUID); 5175 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5176 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5177 /* PartitionAttrs */ 5178 } 5179 } 5180 5181 static int 5182 sd_validate_efi(efi_gpt_t *labp) 5183 { 5184 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5185 return (EINVAL); 5186 /* at least 96 bytes in this version of the spec. */ 5187 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5188 labp->efi_gpt_HeaderSize) 5189 return (EINVAL); 5190 /* this should be 128 bytes */ 5191 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5192 return (EINVAL); 5193 return (0); 5194 } 5195 5196 static int 5197 sd_use_efi(struct sd_lun *un, int path_flag) 5198 { 5199 int i; 5200 int rval = 0; 5201 efi_gpe_t *partitions; 5202 uchar_t *buf; 5203 uint_t lbasize; 5204 uint64_t cap; 5205 uint_t nparts; 5206 diskaddr_t gpe_lba; 5207 5208 ASSERT(mutex_owned(SD_MUTEX(un))); 5209 lbasize = un->un_tgt_blocksize; 5210 5211 mutex_exit(SD_MUTEX(un)); 5212 5213 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5214 5215 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5216 rval = EINVAL; 5217 goto done_err; 5218 } 5219 5220 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5221 if (rval) { 5222 goto done_err; 5223 } 5224 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5225 /* not ours */ 5226 rval = ESRCH; 5227 goto done_err; 5228 } 5229 5230 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5231 if (rval) { 5232 goto done_err; 5233 } 5234 sd_swap_efi_gpt((efi_gpt_t *)buf); 5235 5236 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5237 /* 5238 * Couldn't read the primary, try the backup. Our 5239 * capacity at this point could be based on CHS, so 5240 * check what the device reports. 5241 */ 5242 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5243 path_flag); 5244 if (rval) { 5245 goto done_err; 5246 } 5247 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5248 cap - 1, path_flag)) != 0) { 5249 goto done_err; 5250 } 5251 sd_swap_efi_gpt((efi_gpt_t *)buf); 5252 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5253 goto done_err; 5254 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5255 "primary label corrupt; using backup\n"); 5256 } 5257 5258 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5259 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5260 5261 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5262 path_flag); 5263 if (rval) { 5264 goto done_err; 5265 } 5266 partitions = (efi_gpe_t *)buf; 5267 5268 if (nparts > MAXPART) { 5269 nparts = MAXPART; 5270 } 5271 sd_swap_efi_gpe(nparts, partitions); 5272 5273 mutex_enter(SD_MUTEX(un)); 5274 5275 /* Fill in partition table. */ 5276 for (i = 0; i < nparts; i++) { 5277 if (partitions->efi_gpe_StartingLBA != 0 || 5278 partitions->efi_gpe_EndingLBA != 0) { 5279 un->un_map[i].dkl_cylno = 5280 partitions->efi_gpe_StartingLBA; 5281 un->un_map[i].dkl_nblk = 5282 partitions->efi_gpe_EndingLBA - 5283 partitions->efi_gpe_StartingLBA + 1; 5284 un->un_offset[i] = 5285 partitions->efi_gpe_StartingLBA; 5286 } 5287 if (i == WD_NODE) { 5288 /* 5289 * minor number 7 corresponds to the whole disk 5290 */ 5291 un->un_map[i].dkl_cylno = 0; 5292 un->un_map[i].dkl_nblk = un->un_blockcount; 5293 un->un_offset[i] = 0; 5294 } 5295 partitions++; 5296 } 5297 un->un_solaris_offset = 0; 5298 un->un_solaris_size = cap; 5299 un->un_f_geometry_is_valid = TRUE; 5300 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5301 return (0); 5302 5303 done_err: 5304 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5305 mutex_enter(SD_MUTEX(un)); 5306 /* 5307 * if we didn't find something that could look like a VTOC 5308 * and the disk is over 1TB, we know there isn't a valid label. 5309 * Otherwise let sd_uselabel decide what to do. We only 5310 * want to invalidate this if we're certain the label isn't 5311 * valid because sd_prop_op will now fail, which in turn 5312 * causes things like opens and stats on the partition to fail. 5313 */ 5314 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5315 un->un_f_geometry_is_valid = FALSE; 5316 } 5317 return (rval); 5318 } 5319 5320 5321 /* 5322 * Function: sd_uselabel 5323 * 5324 * Description: Validate the disk label and update the relevant data (geometry, 5325 * partition, vtoc, and capacity data) in the sd_lun struct. 5326 * Marks the geometry of the unit as being valid. 5327 * 5328 * Arguments: un: unit struct. 5329 * dk_label: disk label 5330 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5331 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5332 * to use the USCSI "direct" chain and bypass the normal 5333 * command waitq. 5334 * 5335 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5336 * partition, vtoc, and capacity data are good. 5337 * 5338 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5339 * label; or computed capacity does not jibe with capacity 5340 * reported from the READ CAPACITY command. 5341 * 5342 * Context: Kernel thread only (can sleep). 5343 */ 5344 5345 static int 5346 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5347 { 5348 short *sp; 5349 short sum; 5350 short count; 5351 int label_error = SD_LABEL_IS_VALID; 5352 int i; 5353 int capacity; 5354 int part_end; 5355 int track_capacity; 5356 int err; 5357 #if defined(_SUNOS_VTOC_16) 5358 struct dkl_partition *vpartp; 5359 #endif 5360 ASSERT(un != NULL); 5361 ASSERT(mutex_owned(SD_MUTEX(un))); 5362 5363 /* Validate the magic number of the label. */ 5364 if (labp->dkl_magic != DKL_MAGIC) { 5365 #if defined(__sparc) 5366 if ((un->un_state == SD_STATE_NORMAL) && 5367 !ISREMOVABLE(un)) { 5368 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5369 "Corrupt label; wrong magic number\n"); 5370 } 5371 #endif 5372 return (SD_LABEL_IS_INVALID); 5373 } 5374 5375 /* Validate the checksum of the label. */ 5376 sp = (short *)labp; 5377 sum = 0; 5378 count = sizeof (struct dk_label) / sizeof (short); 5379 while (count--) { 5380 sum ^= *sp++; 5381 } 5382 5383 if (sum != 0) { 5384 #if defined(_SUNOS_VTOC_16) 5385 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5386 #elif defined(_SUNOS_VTOC_8) 5387 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5388 #endif 5389 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5390 "Corrupt label - label checksum failed\n"); 5391 } 5392 return (SD_LABEL_IS_INVALID); 5393 } 5394 5395 5396 /* 5397 * Fill in geometry structure with data from label. 5398 */ 5399 bzero(&un->un_g, sizeof (struct dk_geom)); 5400 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5401 un->un_g.dkg_acyl = labp->dkl_acyl; 5402 un->un_g.dkg_bcyl = 0; 5403 un->un_g.dkg_nhead = labp->dkl_nhead; 5404 un->un_g.dkg_nsect = labp->dkl_nsect; 5405 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5406 5407 #if defined(_SUNOS_VTOC_8) 5408 un->un_g.dkg_gap1 = labp->dkl_gap1; 5409 un->un_g.dkg_gap2 = labp->dkl_gap2; 5410 un->un_g.dkg_bhead = labp->dkl_bhead; 5411 #endif 5412 #if defined(_SUNOS_VTOC_16) 5413 un->un_dkg_skew = labp->dkl_skew; 5414 #endif 5415 5416 #if defined(__i386) || defined(__amd64) 5417 un->un_g.dkg_apc = labp->dkl_apc; 5418 #endif 5419 5420 /* 5421 * Currently we rely on the values in the label being accurate. If 5422 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5423 * 5424 * Note: In the future a MODE SENSE may be used to retrieve this data, 5425 * although this command is optional in SCSI-2. 5426 */ 5427 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5428 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5429 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5430 5431 /* 5432 * The Read and Write reinstruct values may not be valid 5433 * for older disks. 5434 */ 5435 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5436 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5437 5438 /* Fill in partition table. */ 5439 #if defined(_SUNOS_VTOC_8) 5440 for (i = 0; i < NDKMAP; i++) { 5441 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5442 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5443 } 5444 #endif 5445 #if defined(_SUNOS_VTOC_16) 5446 vpartp = labp->dkl_vtoc.v_part; 5447 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5448 5449 for (i = 0; i < NDKMAP; i++, vpartp++) { 5450 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5451 un->un_map[i].dkl_nblk = vpartp->p_size; 5452 } 5453 #endif 5454 5455 /* Fill in VTOC Structure. */ 5456 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5457 #if defined(_SUNOS_VTOC_8) 5458 /* 5459 * The 8-slice vtoc does not include the ascii label; save it into 5460 * the device's soft state structure here. 5461 */ 5462 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5463 #endif 5464 5465 /* Mark the geometry as valid. */ 5466 un->un_f_geometry_is_valid = TRUE; 5467 5468 /* Now look for a valid capacity. */ 5469 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5470 capacity = (un->un_g.dkg_ncyl * track_capacity); 5471 5472 if (un->un_g.dkg_acyl) { 5473 #if defined(__i386) || defined(__amd64) 5474 /* we may have > 1 alts cylinder */ 5475 capacity += (track_capacity * un->un_g.dkg_acyl); 5476 #else 5477 capacity += track_capacity; 5478 #endif 5479 } 5480 5481 /* 5482 * At this point, un->un_blockcount should contain valid data from 5483 * the READ CAPACITY command. 5484 */ 5485 if (un->un_f_blockcount_is_valid != TRUE) { 5486 /* 5487 * We have a situation where the target didn't give us a good 5488 * READ CAPACITY value, yet there appears to be a valid label. 5489 * In this case, we'll fake the capacity. 5490 */ 5491 un->un_blockcount = capacity; 5492 un->un_f_blockcount_is_valid = TRUE; 5493 goto done; 5494 } 5495 5496 5497 if ((capacity <= un->un_blockcount) || 5498 (un->un_state != SD_STATE_NORMAL)) { 5499 #if defined(_SUNOS_VTOC_8) 5500 /* 5501 * We can't let this happen on drives that are subdivided 5502 * into logical disks (i.e., that have an fdisk table). 5503 * The un_blockcount field should always hold the full media 5504 * size in sectors, period. This code would overwrite 5505 * un_blockcount with the size of the Solaris fdisk partition. 5506 */ 5507 SD_ERROR(SD_LOG_COMMON, un, 5508 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5509 capacity, un->un_blockcount); 5510 un->un_blockcount = capacity; 5511 un->un_f_blockcount_is_valid = TRUE; 5512 #endif /* defined(_SUNOS_VTOC_8) */ 5513 goto done; 5514 } 5515 5516 if (ISCD(un)) { 5517 /* For CDROMs, we trust that the data in the label is OK. */ 5518 #if defined(_SUNOS_VTOC_8) 5519 for (i = 0; i < NDKMAP; i++) { 5520 part_end = labp->dkl_nhead * labp->dkl_nsect * 5521 labp->dkl_map[i].dkl_cylno + 5522 labp->dkl_map[i].dkl_nblk - 1; 5523 5524 if ((labp->dkl_map[i].dkl_nblk) && 5525 (part_end > un->un_blockcount)) { 5526 un->un_f_geometry_is_valid = FALSE; 5527 break; 5528 } 5529 } 5530 #endif 5531 #if defined(_SUNOS_VTOC_16) 5532 vpartp = &(labp->dkl_vtoc.v_part[0]); 5533 for (i = 0; i < NDKMAP; i++, vpartp++) { 5534 part_end = vpartp->p_start + vpartp->p_size; 5535 if ((vpartp->p_size > 0) && 5536 (part_end > un->un_blockcount)) { 5537 un->un_f_geometry_is_valid = FALSE; 5538 break; 5539 } 5540 } 5541 #endif 5542 } else { 5543 uint64_t t_capacity; 5544 uint32_t t_lbasize; 5545 5546 mutex_exit(SD_MUTEX(un)); 5547 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5548 path_flag); 5549 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5550 mutex_enter(SD_MUTEX(un)); 5551 5552 if (err == 0) { 5553 sd_update_block_info(un, t_lbasize, t_capacity); 5554 } 5555 5556 if (capacity > un->un_blockcount) { 5557 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5558 "Corrupt label - bad geometry\n"); 5559 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5560 "Label says %u blocks; Drive says %llu blocks\n", 5561 capacity, (unsigned long long)un->un_blockcount); 5562 un->un_f_geometry_is_valid = FALSE; 5563 label_error = SD_LABEL_IS_INVALID; 5564 } 5565 } 5566 5567 done: 5568 5569 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5570 SD_INFO(SD_LOG_COMMON, un, 5571 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5572 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5573 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5574 SD_INFO(SD_LOG_COMMON, un, 5575 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5576 un->un_tgt_blocksize, un->un_blockcount, 5577 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5578 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5579 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5580 5581 ASSERT(mutex_owned(SD_MUTEX(un))); 5582 5583 return (label_error); 5584 } 5585 5586 5587 /* 5588 * Function: sd_build_default_label 5589 * 5590 * Description: Generate a default label for those devices that do not have 5591 * one, e.g., new media, removable cartridges, etc.. 5592 * 5593 * Context: Kernel thread only 5594 */ 5595 5596 static void 5597 sd_build_default_label(struct sd_lun *un) 5598 { 5599 #if defined(_SUNOS_VTOC_16) 5600 uint_t phys_spc; 5601 uint_t disksize; 5602 struct dk_geom un_g; 5603 #endif 5604 5605 ASSERT(un != NULL); 5606 ASSERT(mutex_owned(SD_MUTEX(un))); 5607 5608 #if defined(_SUNOS_VTOC_8) 5609 /* 5610 * Note: This is a legacy check for non-removable devices on VTOC_8 5611 * only. This may be a valid check for VTOC_16 as well. 5612 */ 5613 if (!ISREMOVABLE(un)) { 5614 return; 5615 } 5616 #endif 5617 5618 bzero(&un->un_g, sizeof (struct dk_geom)); 5619 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5620 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5621 5622 #if defined(_SUNOS_VTOC_8) 5623 5624 /* 5625 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5626 * But it is still necessary to set up various geometry information, 5627 * and we are doing this here. 5628 */ 5629 5630 /* 5631 * For the rpm, we use the minimum for the disk. For the head, cyl, 5632 * and number of sector per track, if the capacity <= 1GB, head = 64, 5633 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5634 * equal to C*H*S values. This will cause some truncation of size due 5635 * to round off errors. For CD-ROMs, this truncation can have adverse 5636 * side effects, so returning ncyl and nhead as 1. The nsect will 5637 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5638 */ 5639 if (ISCD(un)) { 5640 /* 5641 * Preserve the old behavior for non-writable 5642 * medias. Since dkg_nsect is a ushort, it 5643 * will lose bits as cdroms have more than 5644 * 65536 sectors. So if we recalculate 5645 * capacity, it will become much shorter. 5646 * But the dkg_* information is not 5647 * used for CDROMs so it is OK. But for 5648 * Writable CDs we need this information 5649 * to be valid (for newfs say). So we 5650 * make nsect and nhead > 1 that way 5651 * nsect can still stay within ushort limit 5652 * without losing any bits. 5653 */ 5654 if (un->un_f_mmc_writable_media == TRUE) { 5655 un->un_g.dkg_nhead = 64; 5656 un->un_g.dkg_nsect = 32; 5657 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5658 un->un_blockcount = un->un_g.dkg_ncyl * 5659 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5660 } else { 5661 un->un_g.dkg_ncyl = 1; 5662 un->un_g.dkg_nhead = 1; 5663 un->un_g.dkg_nsect = un->un_blockcount; 5664 } 5665 } else { 5666 if (un->un_blockcount <= 0x1000) { 5667 /* unlabeled SCSI floppy device */ 5668 un->un_g.dkg_nhead = 2; 5669 un->un_g.dkg_ncyl = 80; 5670 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5671 } else if (un->un_blockcount <= 0x200000) { 5672 un->un_g.dkg_nhead = 64; 5673 un->un_g.dkg_nsect = 32; 5674 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5675 } else { 5676 un->un_g.dkg_nhead = 255; 5677 un->un_g.dkg_nsect = 63; 5678 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5679 } 5680 un->un_blockcount = 5681 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5682 } 5683 5684 un->un_g.dkg_acyl = 0; 5685 un->un_g.dkg_bcyl = 0; 5686 un->un_g.dkg_rpm = 200; 5687 un->un_asciilabel[0] = '\0'; 5688 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5689 5690 un->un_map[0].dkl_cylno = 0; 5691 un->un_map[0].dkl_nblk = un->un_blockcount; 5692 un->un_map[2].dkl_cylno = 0; 5693 un->un_map[2].dkl_nblk = un->un_blockcount; 5694 5695 #elif defined(_SUNOS_VTOC_16) 5696 5697 if (un->un_solaris_size == 0) { 5698 /* 5699 * Got fdisk table but no solaris entry therefore 5700 * don't create a default label 5701 */ 5702 un->un_f_geometry_is_valid = TRUE; 5703 return; 5704 } 5705 5706 /* 5707 * For CDs we continue to use the physical geometry to calculate 5708 * number of cylinders. All other devices must convert the 5709 * physical geometry (geom_cache) to values that will fit 5710 * in a dk_geom structure. 5711 */ 5712 if (ISCD(un)) { 5713 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5714 } else { 5715 /* Convert physical geometry to disk geometry */ 5716 bzero(&un_g, sizeof (struct dk_geom)); 5717 sd_convert_geometry(un->un_blockcount, &un_g); 5718 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5719 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5720 } 5721 5722 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5723 un->un_g.dkg_acyl = DK_ACYL; 5724 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5725 disksize = un->un_g.dkg_ncyl * phys_spc; 5726 5727 if (ISCD(un)) { 5728 /* 5729 * CD's don't use the "heads * sectors * cyls"-type of 5730 * geometry, but instead use the entire capacity of the media. 5731 */ 5732 disksize = un->un_solaris_size; 5733 un->un_g.dkg_nhead = 1; 5734 un->un_g.dkg_nsect = 1; 5735 un->un_g.dkg_rpm = 5736 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5737 5738 un->un_vtoc.v_part[0].p_start = 0; 5739 un->un_vtoc.v_part[0].p_size = disksize; 5740 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5741 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5742 5743 un->un_map[0].dkl_cylno = 0; 5744 un->un_map[0].dkl_nblk = disksize; 5745 un->un_offset[0] = 0; 5746 5747 } else { 5748 /* 5749 * Hard disks and removable media cartridges 5750 */ 5751 un->un_g.dkg_rpm = 5752 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5753 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5754 5755 /* Add boot slice */ 5756 un->un_vtoc.v_part[8].p_start = 0; 5757 un->un_vtoc.v_part[8].p_size = phys_spc; 5758 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5759 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5760 5761 un->un_map[8].dkl_cylno = 0; 5762 un->un_map[8].dkl_nblk = phys_spc; 5763 un->un_offset[8] = 0; 5764 } 5765 5766 un->un_g.dkg_apc = 0; 5767 un->un_vtoc.v_nparts = V_NUMPAR; 5768 un->un_vtoc.v_version = V_VERSION; 5769 5770 /* Add backup slice */ 5771 un->un_vtoc.v_part[2].p_start = 0; 5772 un->un_vtoc.v_part[2].p_size = disksize; 5773 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5774 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5775 5776 un->un_map[2].dkl_cylno = 0; 5777 un->un_map[2].dkl_nblk = disksize; 5778 un->un_offset[2] = 0; 5779 5780 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5781 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5782 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5783 5784 #else 5785 #error "No VTOC format defined." 5786 #endif 5787 5788 un->un_g.dkg_read_reinstruct = 0; 5789 un->un_g.dkg_write_reinstruct = 0; 5790 5791 un->un_g.dkg_intrlv = 1; 5792 5793 un->un_vtoc.v_sanity = VTOC_SANE; 5794 5795 un->un_f_geometry_is_valid = TRUE; 5796 5797 SD_INFO(SD_LOG_COMMON, un, 5798 "sd_build_default_label: Default label created: " 5799 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5800 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5801 un->un_g.dkg_nsect, un->un_blockcount); 5802 } 5803 5804 5805 #if defined(_FIRMWARE_NEEDS_FDISK) 5806 /* 5807 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5808 */ 5809 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5810 #define LBA_MAX_CYL (1022 & 0xFF) 5811 #define LBA_MAX_HEAD (254) 5812 5813 5814 /* 5815 * Function: sd_has_max_chs_vals 5816 * 5817 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5818 * 5819 * Arguments: fdp - ptr to CHS info 5820 * 5821 * Return Code: True or false 5822 * 5823 * Context: Any. 5824 */ 5825 5826 static int 5827 sd_has_max_chs_vals(struct ipart *fdp) 5828 { 5829 return ((fdp->begcyl == LBA_MAX_CYL) && 5830 (fdp->beghead == LBA_MAX_HEAD) && 5831 (fdp->begsect == LBA_MAX_SECT) && 5832 (fdp->endcyl == LBA_MAX_CYL) && 5833 (fdp->endhead == LBA_MAX_HEAD) && 5834 (fdp->endsect == LBA_MAX_SECT)); 5835 } 5836 #endif 5837 5838 5839 /* 5840 * Function: sd_inq_fill 5841 * 5842 * Description: Print a piece of inquiry data, cleaned up for non-printable 5843 * characters and stopping at the first space character after 5844 * the beginning of the passed string; 5845 * 5846 * Arguments: p - source string 5847 * l - maximum length to copy 5848 * s - destination string 5849 * 5850 * Context: Any. 5851 */ 5852 5853 static void 5854 sd_inq_fill(char *p, int l, char *s) 5855 { 5856 unsigned i = 0; 5857 char c; 5858 5859 while (i++ < l) { 5860 if ((c = *p++) < ' ' || c >= 0x7F) { 5861 c = '*'; 5862 } else if (i != 1 && c == ' ') { 5863 break; 5864 } 5865 *s++ = c; 5866 } 5867 *s++ = 0; 5868 } 5869 5870 5871 /* 5872 * Function: sd_register_devid 5873 * 5874 * Description: This routine will obtain the device id information from the 5875 * target, obtain the serial number, and register the device 5876 * id with the ddi framework. 5877 * 5878 * Arguments: devi - the system's dev_info_t for the device. 5879 * un - driver soft state (unit) structure 5880 * reservation_flag - indicates if a reservation conflict 5881 * occurred during attach 5882 * 5883 * Context: Kernel Thread 5884 */ 5885 static void 5886 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5887 { 5888 int rval = 0; 5889 uchar_t *inq80 = NULL; 5890 size_t inq80_len = MAX_INQUIRY_SIZE; 5891 size_t inq80_resid = 0; 5892 uchar_t *inq83 = NULL; 5893 size_t inq83_len = MAX_INQUIRY_SIZE; 5894 size_t inq83_resid = 0; 5895 5896 ASSERT(un != NULL); 5897 ASSERT(mutex_owned(SD_MUTEX(un))); 5898 ASSERT((SD_DEVINFO(un)) == devi); 5899 5900 /* 5901 * This is the case of antiquated Sun disk drives that have the 5902 * FAB_DEVID property set in the disk_table. These drives 5903 * manage the devid's by storing them in last 2 available sectors 5904 * on the drive and have them fabricated by the ddi layer by calling 5905 * ddi_devid_init and passing the DEVID_FAB flag. 5906 */ 5907 if (un->un_f_opt_fab_devid == TRUE) { 5908 /* 5909 * Depending on EINVAL isn't reliable, since a reserved disk 5910 * may result in invalid geometry, so check to make sure a 5911 * reservation conflict did not occur during attach. 5912 */ 5913 if ((sd_get_devid(un) == EINVAL) && 5914 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5915 /* 5916 * The devid is invalid AND there is no reservation 5917 * conflict. Fabricate a new devid. 5918 */ 5919 (void) sd_create_devid(un); 5920 } 5921 5922 /* Register the devid if it exists */ 5923 if (un->un_devid != NULL) { 5924 (void) ddi_devid_register(SD_DEVINFO(un), 5925 un->un_devid); 5926 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5927 "sd_register_devid: Devid Fabricated\n"); 5928 } 5929 return; 5930 } 5931 5932 /* 5933 * We check the availibility of the World Wide Name (0x83) and Unit 5934 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5935 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5936 * 0x83 is availible, that is the best choice. Our next choice is 5937 * 0x80. If neither are availible, we munge the devid from the device 5938 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5939 * to fabricate a devid for non-Sun qualified disks. 5940 */ 5941 if (sd_check_vpd_page_support(un) == 0) { 5942 /* collect page 80 data if available */ 5943 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5944 5945 mutex_exit(SD_MUTEX(un)); 5946 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5947 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5948 0x01, 0x80, &inq80_resid); 5949 5950 if (rval != 0) { 5951 kmem_free(inq80, inq80_len); 5952 inq80 = NULL; 5953 inq80_len = 0; 5954 } 5955 mutex_enter(SD_MUTEX(un)); 5956 } 5957 5958 /* collect page 83 data if available */ 5959 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5960 5961 mutex_exit(SD_MUTEX(un)); 5962 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5963 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5964 0x01, 0x83, &inq83_resid); 5965 5966 if (rval != 0) { 5967 kmem_free(inq83, inq83_len); 5968 inq83 = NULL; 5969 inq83_len = 0; 5970 } 5971 mutex_enter(SD_MUTEX(un)); 5972 } 5973 } 5974 5975 /* encode best devid possible based on data available */ 5976 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5977 (char *)ddi_driver_name(SD_DEVINFO(un)), 5978 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5979 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5980 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5981 5982 /* devid successfully encoded, register devid */ 5983 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5984 5985 } else { 5986 /* 5987 * Unable to encode a devid based on data available. 5988 * This is not a Sun qualified disk. Older Sun disk 5989 * drives that have the SD_FAB_DEVID property 5990 * set in the disk_table and non Sun qualified 5991 * disks are treated in the same manner. These 5992 * drives manage the devid's by storing them in 5993 * last 2 available sectors on the drive and 5994 * have them fabricated by the ddi layer by 5995 * calling ddi_devid_init and passing the 5996 * DEVID_FAB flag. 5997 * Create a fabricate devid only if there's no 5998 * fabricate devid existed. 5999 */ 6000 if (sd_get_devid(un) == EINVAL) { 6001 (void) sd_create_devid(un); 6002 un->un_f_opt_fab_devid = TRUE; 6003 } 6004 6005 /* Register the devid if it exists */ 6006 if (un->un_devid != NULL) { 6007 (void) ddi_devid_register(SD_DEVINFO(un), 6008 un->un_devid); 6009 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6010 "sd_register_devid: devid fabricated using " 6011 "ddi framework\n"); 6012 } 6013 } 6014 6015 /* clean up resources */ 6016 if (inq80 != NULL) { 6017 kmem_free(inq80, inq80_len); 6018 } 6019 if (inq83 != NULL) { 6020 kmem_free(inq83, inq83_len); 6021 } 6022 } 6023 6024 static daddr_t 6025 sd_get_devid_block(struct sd_lun *un) 6026 { 6027 daddr_t spc, blk, head, cyl; 6028 6029 if (un->un_blockcount <= DK_MAX_BLOCKS) { 6030 /* this geometry doesn't allow us to write a devid */ 6031 if (un->un_g.dkg_acyl < 2) { 6032 return (-1); 6033 } 6034 6035 /* 6036 * Subtract 2 guarantees that the next to last cylinder 6037 * is used 6038 */ 6039 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 6040 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 6041 head = un->un_g.dkg_nhead - 1; 6042 blk = (cyl * (spc - un->un_g.dkg_apc)) + 6043 (head * un->un_g.dkg_nsect) + 1; 6044 } else { 6045 if (un->un_reserved != -1) { 6046 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 6047 } else { 6048 return (-1); 6049 } 6050 } 6051 return (blk); 6052 } 6053 6054 /* 6055 * Function: sd_get_devid 6056 * 6057 * Description: This routine will return 0 if a valid device id has been 6058 * obtained from the target and stored in the soft state. If a 6059 * valid device id has not been previously read and stored, a 6060 * read attempt will be made. 6061 * 6062 * Arguments: un - driver soft state (unit) structure 6063 * 6064 * Return Code: 0 if we successfully get the device id 6065 * 6066 * Context: Kernel Thread 6067 */ 6068 6069 static int 6070 sd_get_devid(struct sd_lun *un) 6071 { 6072 struct dk_devid *dkdevid; 6073 ddi_devid_t tmpid; 6074 uint_t *ip; 6075 size_t sz; 6076 daddr_t blk; 6077 int status; 6078 int chksum; 6079 int i; 6080 size_t buffer_size; 6081 6082 ASSERT(un != NULL); 6083 ASSERT(mutex_owned(SD_MUTEX(un))); 6084 6085 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 6086 un); 6087 6088 if (un->un_devid != NULL) { 6089 return (0); 6090 } 6091 6092 blk = sd_get_devid_block(un); 6093 if (blk < 0) 6094 return (EINVAL); 6095 6096 /* 6097 * Read and verify device id, stored in the reserved cylinders at the 6098 * end of the disk. Backup label is on the odd sectors of the last 6099 * track of the last cylinder. Device id will be on track of the next 6100 * to last cylinder. 6101 */ 6102 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 6103 mutex_exit(SD_MUTEX(un)); 6104 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 6105 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 6106 SD_PATH_DIRECT); 6107 if (status != 0) { 6108 goto error; 6109 } 6110 6111 /* Validate the revision */ 6112 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 6113 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 6114 status = EINVAL; 6115 goto error; 6116 } 6117 6118 /* Calculate the checksum */ 6119 chksum = 0; 6120 ip = (uint_t *)dkdevid; 6121 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6122 i++) { 6123 chksum ^= ip[i]; 6124 } 6125 6126 /* Compare the checksums */ 6127 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6128 status = EINVAL; 6129 goto error; 6130 } 6131 6132 /* Validate the device id */ 6133 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6134 status = EINVAL; 6135 goto error; 6136 } 6137 6138 /* 6139 * Store the device id in the driver soft state 6140 */ 6141 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6142 tmpid = kmem_alloc(sz, KM_SLEEP); 6143 6144 mutex_enter(SD_MUTEX(un)); 6145 6146 un->un_devid = tmpid; 6147 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6148 6149 kmem_free(dkdevid, buffer_size); 6150 6151 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6152 6153 return (status); 6154 error: 6155 mutex_enter(SD_MUTEX(un)); 6156 kmem_free(dkdevid, buffer_size); 6157 return (status); 6158 } 6159 6160 6161 /* 6162 * Function: sd_create_devid 6163 * 6164 * Description: This routine will fabricate the device id and write it 6165 * to the disk. 6166 * 6167 * Arguments: un - driver soft state (unit) structure 6168 * 6169 * Return Code: value of the fabricated device id 6170 * 6171 * Context: Kernel Thread 6172 */ 6173 6174 static ddi_devid_t 6175 sd_create_devid(struct sd_lun *un) 6176 { 6177 ASSERT(un != NULL); 6178 6179 /* Fabricate the devid */ 6180 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6181 == DDI_FAILURE) { 6182 return (NULL); 6183 } 6184 6185 /* Write the devid to disk */ 6186 if (sd_write_deviceid(un) != 0) { 6187 ddi_devid_free(un->un_devid); 6188 un->un_devid = NULL; 6189 } 6190 6191 return (un->un_devid); 6192 } 6193 6194 6195 /* 6196 * Function: sd_write_deviceid 6197 * 6198 * Description: This routine will write the device id to the disk 6199 * reserved sector. 6200 * 6201 * Arguments: un - driver soft state (unit) structure 6202 * 6203 * Return Code: EINVAL 6204 * value returned by sd_send_scsi_cmd 6205 * 6206 * Context: Kernel Thread 6207 */ 6208 6209 static int 6210 sd_write_deviceid(struct sd_lun *un) 6211 { 6212 struct dk_devid *dkdevid; 6213 daddr_t blk; 6214 uint_t *ip, chksum; 6215 int status; 6216 int i; 6217 6218 ASSERT(mutex_owned(SD_MUTEX(un))); 6219 6220 blk = sd_get_devid_block(un); 6221 if (blk < 0) 6222 return (-1); 6223 mutex_exit(SD_MUTEX(un)); 6224 6225 /* Allocate the buffer */ 6226 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6227 6228 /* Fill in the revision */ 6229 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6230 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6231 6232 /* Copy in the device id */ 6233 mutex_enter(SD_MUTEX(un)); 6234 bcopy(un->un_devid, &dkdevid->dkd_devid, 6235 ddi_devid_sizeof(un->un_devid)); 6236 mutex_exit(SD_MUTEX(un)); 6237 6238 /* Calculate the checksum */ 6239 chksum = 0; 6240 ip = (uint_t *)dkdevid; 6241 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6242 i++) { 6243 chksum ^= ip[i]; 6244 } 6245 6246 /* Fill-in checksum */ 6247 DKD_FORMCHKSUM(chksum, dkdevid); 6248 6249 /* Write the reserved sector */ 6250 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6251 SD_PATH_DIRECT); 6252 6253 kmem_free(dkdevid, un->un_sys_blocksize); 6254 6255 mutex_enter(SD_MUTEX(un)); 6256 return (status); 6257 } 6258 6259 6260 /* 6261 * Function: sd_check_vpd_page_support 6262 * 6263 * Description: This routine sends an inquiry command with the EVPD bit set and 6264 * a page code of 0x00 to the device. It is used to determine which 6265 * vital product pages are availible to find the devid. We are 6266 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6267 * device does not support that command. 6268 * 6269 * Arguments: un - driver soft state (unit) structure 6270 * 6271 * Return Code: 0 - success 6272 * 1 - check condition 6273 * 6274 * Context: This routine can sleep. 6275 */ 6276 6277 static int 6278 sd_check_vpd_page_support(struct sd_lun *un) 6279 { 6280 uchar_t *page_list = NULL; 6281 uchar_t page_length = 0xff; /* Use max possible length */ 6282 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6283 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6284 int rval = 0; 6285 int counter; 6286 6287 ASSERT(un != NULL); 6288 ASSERT(mutex_owned(SD_MUTEX(un))); 6289 6290 mutex_exit(SD_MUTEX(un)); 6291 6292 /* 6293 * We'll set the page length to the maximum to save figuring it out 6294 * with an additional call. 6295 */ 6296 page_list = kmem_zalloc(page_length, KM_SLEEP); 6297 6298 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6299 page_code, NULL); 6300 6301 mutex_enter(SD_MUTEX(un)); 6302 6303 /* 6304 * Now we must validate that the device accepted the command, as some 6305 * drives do not support it. If the drive does support it, we will 6306 * return 0, and the supported pages will be in un_vpd_page_mask. If 6307 * not, we return -1. 6308 */ 6309 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6310 /* Loop to find one of the 2 pages we need */ 6311 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6312 6313 /* 6314 * Pages are returned in ascending order, and 0x83 is what we 6315 * are hoping for. 6316 */ 6317 while ((page_list[counter] <= 0x83) && 6318 (counter <= (page_list[VPD_PAGE_LENGTH] + 6319 VPD_HEAD_OFFSET))) { 6320 /* 6321 * Add 3 because page_list[3] is the number of 6322 * pages minus 3 6323 */ 6324 6325 switch (page_list[counter]) { 6326 case 0x00: 6327 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6328 break; 6329 case 0x80: 6330 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6331 break; 6332 case 0x81: 6333 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6334 break; 6335 case 0x82: 6336 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6337 break; 6338 case 0x83: 6339 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6340 break; 6341 } 6342 counter++; 6343 } 6344 6345 } else { 6346 rval = -1; 6347 6348 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6349 "sd_check_vpd_page_support: This drive does not implement " 6350 "VPD pages.\n"); 6351 } 6352 6353 kmem_free(page_list, page_length); 6354 6355 return (rval); 6356 } 6357 6358 6359 /* 6360 * Function: sd_setup_pm 6361 * 6362 * Description: Initialize Power Management on the device 6363 * 6364 * Context: Kernel Thread 6365 */ 6366 6367 static void 6368 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6369 { 6370 uint_t log_page_size; 6371 uchar_t *log_page_data; 6372 int rval; 6373 6374 /* 6375 * Since we are called from attach, holding a mutex for 6376 * un is unnecessary. Because some of the routines called 6377 * from here require SD_MUTEX to not be held, assert this 6378 * right up front. 6379 */ 6380 ASSERT(!mutex_owned(SD_MUTEX(un))); 6381 /* 6382 * Since the sd device does not have the 'reg' property, 6383 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6384 * The following code is to tell cpr that this device 6385 * DOES need to be suspended and resumed. 6386 */ 6387 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6388 "pm-hardware-state", "needs-suspend-resume"); 6389 6390 /* 6391 * Check if HBA has set the "pm-capable" property. 6392 * If "pm-capable" exists and is non-zero then we can 6393 * power manage the device without checking the start/stop 6394 * cycle count log sense page. 6395 * 6396 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6397 * then we should not power manage the device. 6398 * 6399 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6400 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6401 * check the start/stop cycle count log sense page and power manage 6402 * the device if the cycle count limit has not been exceeded. 6403 */ 6404 un->un_pm_capable_prop = 6405 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6406 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6407 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6408 /* 6409 * pm-capable property exists. 6410 * 6411 * Convert "TRUE" values for un_pm_capable_prop to 6412 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6413 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6414 * and SD_PM_CAPABLE_UNDEFINED (-1) 6415 */ 6416 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6417 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6418 } 6419 6420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6421 "sd_unit_attach: un:0x%p pm-capable " 6422 "property set to %d.\n", un, un->un_pm_capable_prop); 6423 } 6424 6425 /* 6426 * This complies with the new power management framework 6427 * for certain desktop machines. Create the pm_components 6428 * property as a string array property. 6429 * 6430 * If this is a removable device or if the pm-capable property 6431 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6432 * pm_components property without checking for the existance of 6433 * the start-stop cycle counter log page 6434 */ 6435 if (ISREMOVABLE(un) || 6436 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6437 /* 6438 * not all devices have a motor, try it first. 6439 * some devices may return ILLEGAL REQUEST, some 6440 * will hang 6441 */ 6442 un->un_f_start_stop_supported = TRUE; 6443 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6444 SD_PATH_DIRECT) != 0) { 6445 un->un_f_start_stop_supported = FALSE; 6446 } 6447 6448 /* 6449 * create pm properties anyways otherwise the parent can't 6450 * go to sleep 6451 */ 6452 (void) sd_create_pm_components(devi, un); 6453 un->un_f_pm_is_enabled = TRUE; 6454 6455 /* 6456 * Need to create a zero length (Boolean) property 6457 * removable-media for the removable media devices. 6458 * Note that the return value of the property is not being 6459 * checked, since if unable to create the property 6460 * then do not want the attach to fail altogether. Consistent 6461 * with other property creation in attach. 6462 */ 6463 if (ISREMOVABLE(un)) { 6464 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6465 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6466 } 6467 return; 6468 } 6469 6470 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6471 6472 #ifdef SDDEBUG 6473 if (sd_force_pm_supported) { 6474 /* Force a successful result */ 6475 rval = 1; 6476 } 6477 #endif 6478 6479 /* 6480 * If the start-stop cycle counter log page is not supported 6481 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6482 * then we should not create the pm_components property. 6483 */ 6484 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6485 /* 6486 * Error. 6487 * Reading log sense failed, most likely this is 6488 * an older drive that does not support log sense. 6489 * If this fails auto-pm is not supported. 6490 */ 6491 un->un_power_level = SD_SPINDLE_ON; 6492 un->un_f_pm_is_enabled = FALSE; 6493 6494 } else if (rval == 0) { 6495 /* 6496 * Page not found. 6497 * The start stop cycle counter is implemented as page 6498 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6499 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6500 */ 6501 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6502 /* 6503 * Page found, use this one. 6504 */ 6505 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6506 un->un_f_pm_is_enabled = TRUE; 6507 } else { 6508 /* 6509 * Error or page not found. 6510 * auto-pm is not supported for this device. 6511 */ 6512 un->un_power_level = SD_SPINDLE_ON; 6513 un->un_f_pm_is_enabled = FALSE; 6514 } 6515 } else { 6516 /* 6517 * Page found, use it. 6518 */ 6519 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6520 un->un_f_pm_is_enabled = TRUE; 6521 } 6522 6523 6524 if (un->un_f_pm_is_enabled == TRUE) { 6525 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6526 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6527 6528 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6529 log_page_size, un->un_start_stop_cycle_page, 6530 0x01, 0, SD_PATH_DIRECT); 6531 #ifdef SDDEBUG 6532 if (sd_force_pm_supported) { 6533 /* Force a successful result */ 6534 rval = 0; 6535 } 6536 #endif 6537 6538 /* 6539 * If the Log sense for Page( Start/stop cycle counter page) 6540 * succeeds, then power managment is supported and we can 6541 * enable auto-pm. 6542 */ 6543 if (rval == 0) { 6544 (void) sd_create_pm_components(devi, un); 6545 } else { 6546 un->un_power_level = SD_SPINDLE_ON; 6547 un->un_f_pm_is_enabled = FALSE; 6548 } 6549 6550 kmem_free(log_page_data, log_page_size); 6551 } 6552 } 6553 6554 6555 /* 6556 * Function: sd_create_pm_components 6557 * 6558 * Description: Initialize PM property. 6559 * 6560 * Context: Kernel thread context 6561 */ 6562 6563 static void 6564 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6565 { 6566 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6567 6568 ASSERT(!mutex_owned(SD_MUTEX(un))); 6569 6570 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6571 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6572 /* 6573 * When components are initially created they are idle, 6574 * power up any non-removables. 6575 * Note: the return value of pm_raise_power can't be used 6576 * for determining if PM should be enabled for this device. 6577 * Even if you check the return values and remove this 6578 * property created above, the PM framework will not honor the 6579 * change after the first call to pm_raise_power. Hence, 6580 * removal of that property does not help if pm_raise_power 6581 * fails. In the case of removable media, the start/stop 6582 * will fail if the media is not present. 6583 */ 6584 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6585 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6586 mutex_enter(SD_MUTEX(un)); 6587 un->un_power_level = SD_SPINDLE_ON; 6588 mutex_enter(&un->un_pm_mutex); 6589 /* Set to on and not busy. */ 6590 un->un_pm_count = 0; 6591 } else { 6592 mutex_enter(SD_MUTEX(un)); 6593 un->un_power_level = SD_SPINDLE_OFF; 6594 mutex_enter(&un->un_pm_mutex); 6595 /* Set to off. */ 6596 un->un_pm_count = -1; 6597 } 6598 mutex_exit(&un->un_pm_mutex); 6599 mutex_exit(SD_MUTEX(un)); 6600 } else { 6601 un->un_power_level = SD_SPINDLE_ON; 6602 un->un_f_pm_is_enabled = FALSE; 6603 } 6604 } 6605 6606 6607 /* 6608 * Function: sd_ddi_suspend 6609 * 6610 * Description: Performs system power-down operations. This includes 6611 * setting the drive state to indicate its suspended so 6612 * that no new commands will be accepted. Also, wait for 6613 * all commands that are in transport or queued to a timer 6614 * for retry to complete. All timeout threads are cancelled. 6615 * 6616 * Return Code: DDI_FAILURE or DDI_SUCCESS 6617 * 6618 * Context: Kernel thread context 6619 */ 6620 6621 static int 6622 sd_ddi_suspend(dev_info_t *devi) 6623 { 6624 struct sd_lun *un; 6625 clock_t wait_cmds_complete; 6626 6627 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6628 if (un == NULL) { 6629 return (DDI_FAILURE); 6630 } 6631 6632 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6633 6634 mutex_enter(SD_MUTEX(un)); 6635 6636 /* Return success if the device is already suspended. */ 6637 if (un->un_state == SD_STATE_SUSPENDED) { 6638 mutex_exit(SD_MUTEX(un)); 6639 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6640 "device already suspended, exiting\n"); 6641 return (DDI_SUCCESS); 6642 } 6643 6644 /* Return failure if the device is being used by HA */ 6645 if (un->un_resvd_status & 6646 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6647 mutex_exit(SD_MUTEX(un)); 6648 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6649 "device in use by HA, exiting\n"); 6650 return (DDI_FAILURE); 6651 } 6652 6653 /* 6654 * Return failure if the device is in a resource wait 6655 * or power changing state. 6656 */ 6657 if ((un->un_state == SD_STATE_RWAIT) || 6658 (un->un_state == SD_STATE_PM_CHANGING)) { 6659 mutex_exit(SD_MUTEX(un)); 6660 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6661 "device in resource wait state, exiting\n"); 6662 return (DDI_FAILURE); 6663 } 6664 6665 6666 un->un_save_state = un->un_last_state; 6667 New_state(un, SD_STATE_SUSPENDED); 6668 6669 /* 6670 * Wait for all commands that are in transport or queued to a timer 6671 * for retry to complete. 6672 * 6673 * While waiting, no new commands will be accepted or sent because of 6674 * the new state we set above. 6675 * 6676 * Wait till current operation has completed. If we are in the resource 6677 * wait state (with an intr outstanding) then we need to wait till the 6678 * intr completes and starts the next cmd. We want to wait for 6679 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6680 */ 6681 wait_cmds_complete = ddi_get_lbolt() + 6682 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6683 6684 while (un->un_ncmds_in_transport != 0) { 6685 /* 6686 * Fail if commands do not finish in the specified time. 6687 */ 6688 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6689 wait_cmds_complete) == -1) { 6690 /* 6691 * Undo the state changes made above. Everything 6692 * must go back to it's original value. 6693 */ 6694 Restore_state(un); 6695 un->un_last_state = un->un_save_state; 6696 /* Wake up any threads that might be waiting. */ 6697 cv_broadcast(&un->un_suspend_cv); 6698 mutex_exit(SD_MUTEX(un)); 6699 SD_ERROR(SD_LOG_IO_PM, un, 6700 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6701 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6702 return (DDI_FAILURE); 6703 } 6704 } 6705 6706 /* 6707 * Cancel SCSI watch thread and timeouts, if any are active 6708 */ 6709 6710 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6711 opaque_t temp_token = un->un_swr_token; 6712 mutex_exit(SD_MUTEX(un)); 6713 scsi_watch_suspend(temp_token); 6714 mutex_enter(SD_MUTEX(un)); 6715 } 6716 6717 if (un->un_reset_throttle_timeid != NULL) { 6718 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6719 un->un_reset_throttle_timeid = NULL; 6720 mutex_exit(SD_MUTEX(un)); 6721 (void) untimeout(temp_id); 6722 mutex_enter(SD_MUTEX(un)); 6723 } 6724 6725 if (un->un_dcvb_timeid != NULL) { 6726 timeout_id_t temp_id = un->un_dcvb_timeid; 6727 un->un_dcvb_timeid = NULL; 6728 mutex_exit(SD_MUTEX(un)); 6729 (void) untimeout(temp_id); 6730 mutex_enter(SD_MUTEX(un)); 6731 } 6732 6733 mutex_enter(&un->un_pm_mutex); 6734 if (un->un_pm_timeid != NULL) { 6735 timeout_id_t temp_id = un->un_pm_timeid; 6736 un->un_pm_timeid = NULL; 6737 mutex_exit(&un->un_pm_mutex); 6738 mutex_exit(SD_MUTEX(un)); 6739 (void) untimeout(temp_id); 6740 mutex_enter(SD_MUTEX(un)); 6741 } else { 6742 mutex_exit(&un->un_pm_mutex); 6743 } 6744 6745 if (un->un_retry_timeid != NULL) { 6746 timeout_id_t temp_id = un->un_retry_timeid; 6747 un->un_retry_timeid = NULL; 6748 mutex_exit(SD_MUTEX(un)); 6749 (void) untimeout(temp_id); 6750 mutex_enter(SD_MUTEX(un)); 6751 } 6752 6753 if (un->un_direct_priority_timeid != NULL) { 6754 timeout_id_t temp_id = un->un_direct_priority_timeid; 6755 un->un_direct_priority_timeid = NULL; 6756 mutex_exit(SD_MUTEX(un)); 6757 (void) untimeout(temp_id); 6758 mutex_enter(SD_MUTEX(un)); 6759 } 6760 6761 if (un->un_f_is_fibre == TRUE) { 6762 /* 6763 * Remove callbacks for insert and remove events 6764 */ 6765 if (un->un_insert_event != NULL) { 6766 mutex_exit(SD_MUTEX(un)); 6767 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6768 mutex_enter(SD_MUTEX(un)); 6769 un->un_insert_event = NULL; 6770 } 6771 6772 if (un->un_remove_event != NULL) { 6773 mutex_exit(SD_MUTEX(un)); 6774 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6775 mutex_enter(SD_MUTEX(un)); 6776 un->un_remove_event = NULL; 6777 } 6778 } 6779 6780 mutex_exit(SD_MUTEX(un)); 6781 6782 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6783 6784 return (DDI_SUCCESS); 6785 } 6786 6787 6788 /* 6789 * Function: sd_ddi_pm_suspend 6790 * 6791 * Description: Set the drive state to low power. 6792 * Someone else is required to actually change the drive 6793 * power level. 6794 * 6795 * Arguments: un - driver soft state (unit) structure 6796 * 6797 * Return Code: DDI_FAILURE or DDI_SUCCESS 6798 * 6799 * Context: Kernel thread context 6800 */ 6801 6802 static int 6803 sd_ddi_pm_suspend(struct sd_lun *un) 6804 { 6805 ASSERT(un != NULL); 6806 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6807 6808 ASSERT(!mutex_owned(SD_MUTEX(un))); 6809 mutex_enter(SD_MUTEX(un)); 6810 6811 /* 6812 * Exit if power management is not enabled for this device, or if 6813 * the device is being used by HA. 6814 */ 6815 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6816 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6817 mutex_exit(SD_MUTEX(un)); 6818 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6819 return (DDI_SUCCESS); 6820 } 6821 6822 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6823 un->un_ncmds_in_driver); 6824 6825 /* 6826 * See if the device is not busy, ie.: 6827 * - we have no commands in the driver for this device 6828 * - not waiting for resources 6829 */ 6830 if ((un->un_ncmds_in_driver == 0) && 6831 (un->un_state != SD_STATE_RWAIT)) { 6832 /* 6833 * The device is not busy, so it is OK to go to low power state. 6834 * Indicate low power, but rely on someone else to actually 6835 * change it. 6836 */ 6837 mutex_enter(&un->un_pm_mutex); 6838 un->un_pm_count = -1; 6839 mutex_exit(&un->un_pm_mutex); 6840 un->un_power_level = SD_SPINDLE_OFF; 6841 } 6842 6843 mutex_exit(SD_MUTEX(un)); 6844 6845 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6846 6847 return (DDI_SUCCESS); 6848 } 6849 6850 6851 /* 6852 * Function: sd_ddi_resume 6853 * 6854 * Description: Performs system power-up operations.. 6855 * 6856 * Return Code: DDI_SUCCESS 6857 * DDI_FAILURE 6858 * 6859 * Context: Kernel thread context 6860 */ 6861 6862 static int 6863 sd_ddi_resume(dev_info_t *devi) 6864 { 6865 struct sd_lun *un; 6866 6867 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6868 if (un == NULL) { 6869 return (DDI_FAILURE); 6870 } 6871 6872 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6873 6874 mutex_enter(SD_MUTEX(un)); 6875 Restore_state(un); 6876 6877 /* 6878 * Restore the state which was saved to give the 6879 * the right state in un_last_state 6880 */ 6881 un->un_last_state = un->un_save_state; 6882 /* 6883 * Note: throttle comes back at full. 6884 * Also note: this MUST be done before calling pm_raise_power 6885 * otherwise the system can get hung in biowait. The scenario where 6886 * this'll happen is under cpr suspend. Writing of the system 6887 * state goes through sddump, which writes 0 to un_throttle. If 6888 * writing the system state then fails, example if the partition is 6889 * too small, then cpr attempts a resume. If throttle isn't restored 6890 * from the saved value until after calling pm_raise_power then 6891 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6892 * in biowait. 6893 */ 6894 un->un_throttle = un->un_saved_throttle; 6895 6896 /* 6897 * The chance of failure is very rare as the only command done in power 6898 * entry point is START command when you transition from 0->1 or 6899 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6900 * which suspend was done. Ignore the return value as the resume should 6901 * not be failed. In the case of removable media the media need not be 6902 * inserted and hence there is a chance that raise power will fail with 6903 * media not present. 6904 */ 6905 if (!ISREMOVABLE(un)) { 6906 mutex_exit(SD_MUTEX(un)); 6907 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6908 mutex_enter(SD_MUTEX(un)); 6909 } 6910 6911 /* 6912 * Don't broadcast to the suspend cv and therefore possibly 6913 * start I/O until after power has been restored. 6914 */ 6915 cv_broadcast(&un->un_suspend_cv); 6916 cv_broadcast(&un->un_state_cv); 6917 6918 /* restart thread */ 6919 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6920 scsi_watch_resume(un->un_swr_token); 6921 } 6922 6923 #if (defined(__fibre)) 6924 if (un->un_f_is_fibre == TRUE) { 6925 /* 6926 * Add callbacks for insert and remove events 6927 */ 6928 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6929 sd_init_event_callbacks(un); 6930 } 6931 } 6932 #endif 6933 6934 /* 6935 * Transport any pending commands to the target. 6936 * 6937 * If this is a low-activity device commands in queue will have to wait 6938 * until new commands come in, which may take awhile. Also, we 6939 * specifically don't check un_ncmds_in_transport because we know that 6940 * there really are no commands in progress after the unit was 6941 * suspended and we could have reached the throttle level, been 6942 * suspended, and have no new commands coming in for awhile. Highly 6943 * unlikely, but so is the low-activity disk scenario. 6944 */ 6945 ddi_xbuf_dispatch(un->un_xbuf_attr); 6946 6947 sd_start_cmds(un, NULL); 6948 mutex_exit(SD_MUTEX(un)); 6949 6950 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6951 6952 return (DDI_SUCCESS); 6953 } 6954 6955 6956 /* 6957 * Function: sd_ddi_pm_resume 6958 * 6959 * Description: Set the drive state to powered on. 6960 * Someone else is required to actually change the drive 6961 * power level. 6962 * 6963 * Arguments: un - driver soft state (unit) structure 6964 * 6965 * Return Code: DDI_SUCCESS 6966 * 6967 * Context: Kernel thread context 6968 */ 6969 6970 static int 6971 sd_ddi_pm_resume(struct sd_lun *un) 6972 { 6973 ASSERT(un != NULL); 6974 6975 ASSERT(!mutex_owned(SD_MUTEX(un))); 6976 mutex_enter(SD_MUTEX(un)); 6977 un->un_power_level = SD_SPINDLE_ON; 6978 6979 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6980 mutex_enter(&un->un_pm_mutex); 6981 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6982 un->un_pm_count++; 6983 ASSERT(un->un_pm_count == 0); 6984 /* 6985 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6986 * un_suspend_cv is for a system resume, not a power management 6987 * device resume. (4297749) 6988 * cv_broadcast(&un->un_suspend_cv); 6989 */ 6990 } 6991 mutex_exit(&un->un_pm_mutex); 6992 mutex_exit(SD_MUTEX(un)); 6993 6994 return (DDI_SUCCESS); 6995 } 6996 6997 6998 /* 6999 * Function: sd_pm_idletimeout_handler 7000 * 7001 * Description: A timer routine that's active only while a device is busy. 7002 * The purpose is to extend slightly the pm framework's busy 7003 * view of the device to prevent busy/idle thrashing for 7004 * back-to-back commands. Do this by comparing the current time 7005 * to the time at which the last command completed and when the 7006 * difference is greater than sd_pm_idletime, call 7007 * pm_idle_component. In addition to indicating idle to the pm 7008 * framework, update the chain type to again use the internal pm 7009 * layers of the driver. 7010 * 7011 * Arguments: arg - driver soft state (unit) structure 7012 * 7013 * Context: Executes in a timeout(9F) thread context 7014 */ 7015 7016 static void 7017 sd_pm_idletimeout_handler(void *arg) 7018 { 7019 struct sd_lun *un = arg; 7020 7021 time_t now; 7022 7023 mutex_enter(&sd_detach_mutex); 7024 if (un->un_detach_count != 0) { 7025 /* Abort if the instance is detaching */ 7026 mutex_exit(&sd_detach_mutex); 7027 return; 7028 } 7029 mutex_exit(&sd_detach_mutex); 7030 7031 now = ddi_get_time(); 7032 /* 7033 * Grab both mutexes, in the proper order, since we're accessing 7034 * both PM and softstate variables. 7035 */ 7036 mutex_enter(SD_MUTEX(un)); 7037 mutex_enter(&un->un_pm_mutex); 7038 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 7039 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 7040 /* 7041 * Update the chain types. 7042 * This takes affect on the next new command received. 7043 */ 7044 if (ISREMOVABLE(un)) { 7045 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7046 } else { 7047 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7048 } 7049 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7050 7051 SD_TRACE(SD_LOG_IO_PM, un, 7052 "sd_pm_idletimeout_handler: idling device\n"); 7053 (void) pm_idle_component(SD_DEVINFO(un), 0); 7054 un->un_pm_idle_timeid = NULL; 7055 } else { 7056 un->un_pm_idle_timeid = 7057 timeout(sd_pm_idletimeout_handler, un, 7058 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 7059 } 7060 mutex_exit(&un->un_pm_mutex); 7061 mutex_exit(SD_MUTEX(un)); 7062 } 7063 7064 7065 /* 7066 * Function: sd_pm_timeout_handler 7067 * 7068 * Description: Callback to tell framework we are idle. 7069 * 7070 * Context: timeout(9f) thread context. 7071 */ 7072 7073 static void 7074 sd_pm_timeout_handler(void *arg) 7075 { 7076 struct sd_lun *un = arg; 7077 7078 (void) pm_idle_component(SD_DEVINFO(un), 0); 7079 mutex_enter(&un->un_pm_mutex); 7080 un->un_pm_timeid = NULL; 7081 mutex_exit(&un->un_pm_mutex); 7082 } 7083 7084 7085 /* 7086 * Function: sdpower 7087 * 7088 * Description: PM entry point. 7089 * 7090 * Return Code: DDI_SUCCESS 7091 * DDI_FAILURE 7092 * 7093 * Context: Kernel thread context 7094 */ 7095 7096 static int 7097 sdpower(dev_info_t *devi, int component, int level) 7098 { 7099 struct sd_lun *un; 7100 int instance; 7101 int rval = DDI_SUCCESS; 7102 uint_t i, log_page_size, maxcycles, ncycles; 7103 uchar_t *log_page_data; 7104 int log_sense_page; 7105 int medium_present; 7106 time_t intvlp; 7107 dev_t dev; 7108 struct pm_trans_data sd_pm_tran_data; 7109 uchar_t save_state; 7110 int sval; 7111 uchar_t state_before_pm; 7112 int got_semaphore_here; 7113 7114 instance = ddi_get_instance(devi); 7115 7116 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 7117 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 7118 component != 0) { 7119 return (DDI_FAILURE); 7120 } 7121 7122 dev = sd_make_device(SD_DEVINFO(un)); 7123 7124 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7125 7126 /* 7127 * Must synchronize power down with close. 7128 * Attempt to decrement/acquire the open/close semaphore, 7129 * but do NOT wait on it. If it's not greater than zero, 7130 * ie. it can't be decremented without waiting, then 7131 * someone else, either open or close, already has it 7132 * and the try returns 0. Use that knowledge here to determine 7133 * if it's OK to change the device power level. 7134 * Also, only increment it on exit if it was decremented, ie. gotten, 7135 * here. 7136 */ 7137 got_semaphore_here = sema_tryp(&un->un_semoclose); 7138 7139 mutex_enter(SD_MUTEX(un)); 7140 7141 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7142 un->un_ncmds_in_driver); 7143 7144 /* 7145 * If un_ncmds_in_driver is non-zero it indicates commands are 7146 * already being processed in the driver, or if the semaphore was 7147 * not gotten here it indicates an open or close is being processed. 7148 * At the same time somebody is requesting to go low power which 7149 * can't happen, therefore we need to return failure. 7150 */ 7151 if ((level == SD_SPINDLE_OFF) && 7152 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7153 mutex_exit(SD_MUTEX(un)); 7154 7155 if (got_semaphore_here != 0) { 7156 sema_v(&un->un_semoclose); 7157 } 7158 SD_TRACE(SD_LOG_IO_PM, un, 7159 "sdpower: exit, device has queued cmds.\n"); 7160 return (DDI_FAILURE); 7161 } 7162 7163 /* 7164 * if it is OFFLINE that means the disk is completely dead 7165 * in our case we have to put the disk in on or off by sending commands 7166 * Of course that will fail anyway so return back here. 7167 * 7168 * Power changes to a device that's OFFLINE or SUSPENDED 7169 * are not allowed. 7170 */ 7171 if ((un->un_state == SD_STATE_OFFLINE) || 7172 (un->un_state == SD_STATE_SUSPENDED)) { 7173 mutex_exit(SD_MUTEX(un)); 7174 7175 if (got_semaphore_here != 0) { 7176 sema_v(&un->un_semoclose); 7177 } 7178 SD_TRACE(SD_LOG_IO_PM, un, 7179 "sdpower: exit, device is off-line.\n"); 7180 return (DDI_FAILURE); 7181 } 7182 7183 /* 7184 * Change the device's state to indicate it's power level 7185 * is being changed. Do this to prevent a power off in the 7186 * middle of commands, which is especially bad on devices 7187 * that are really powered off instead of just spun down. 7188 */ 7189 state_before_pm = un->un_state; 7190 un->un_state = SD_STATE_PM_CHANGING; 7191 7192 mutex_exit(SD_MUTEX(un)); 7193 7194 /* 7195 * Bypass checking the log sense information for removables 7196 * and devices for which the HBA set the pm-capable property. 7197 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7198 * then the HBA did not create the property. 7199 */ 7200 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7201 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7202 /* 7203 * Get the log sense information to understand whether the 7204 * the powercycle counts have gone beyond the threshhold. 7205 */ 7206 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7207 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7208 7209 mutex_enter(SD_MUTEX(un)); 7210 log_sense_page = un->un_start_stop_cycle_page; 7211 mutex_exit(SD_MUTEX(un)); 7212 7213 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7214 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7215 #ifdef SDDEBUG 7216 if (sd_force_pm_supported) { 7217 /* Force a successful result */ 7218 rval = 0; 7219 } 7220 #endif 7221 if (rval != 0) { 7222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7223 "Log Sense Failed\n"); 7224 kmem_free(log_page_data, log_page_size); 7225 /* Cannot support power management on those drives */ 7226 7227 if (got_semaphore_here != 0) { 7228 sema_v(&un->un_semoclose); 7229 } 7230 /* 7231 * On exit put the state back to it's original value 7232 * and broadcast to anyone waiting for the power 7233 * change completion. 7234 */ 7235 mutex_enter(SD_MUTEX(un)); 7236 un->un_state = state_before_pm; 7237 cv_broadcast(&un->un_suspend_cv); 7238 mutex_exit(SD_MUTEX(un)); 7239 SD_TRACE(SD_LOG_IO_PM, un, 7240 "sdpower: exit, Log Sense Failed.\n"); 7241 return (DDI_FAILURE); 7242 } 7243 7244 /* 7245 * From the page data - Convert the essential information to 7246 * pm_trans_data 7247 */ 7248 maxcycles = 7249 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7250 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7251 7252 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7253 7254 ncycles = 7255 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7256 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7257 7258 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7259 7260 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7261 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7262 log_page_data[8+i]; 7263 } 7264 7265 kmem_free(log_page_data, log_page_size); 7266 7267 /* 7268 * Call pm_trans_check routine to get the Ok from 7269 * the global policy 7270 */ 7271 7272 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7273 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7274 7275 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7276 #ifdef SDDEBUG 7277 if (sd_force_pm_supported) { 7278 /* Force a successful result */ 7279 rval = 1; 7280 } 7281 #endif 7282 switch (rval) { 7283 case 0: 7284 /* 7285 * Not Ok to Power cycle or error in parameters passed 7286 * Would have given the advised time to consider power 7287 * cycle. Based on the new intvlp parameter we are 7288 * supposed to pretend we are busy so that pm framework 7289 * will never call our power entry point. Because of 7290 * that install a timeout handler and wait for the 7291 * recommended time to elapse so that power management 7292 * can be effective again. 7293 * 7294 * To effect this behavior, call pm_busy_component to 7295 * indicate to the framework this device is busy. 7296 * By not adjusting un_pm_count the rest of PM in 7297 * the driver will function normally, and independant 7298 * of this but because the framework is told the device 7299 * is busy it won't attempt powering down until it gets 7300 * a matching idle. The timeout handler sends this. 7301 * Note: sd_pm_entry can't be called here to do this 7302 * because sdpower may have been called as a result 7303 * of a call to pm_raise_power from within sd_pm_entry. 7304 * 7305 * If a timeout handler is already active then 7306 * don't install another. 7307 */ 7308 mutex_enter(&un->un_pm_mutex); 7309 if (un->un_pm_timeid == NULL) { 7310 un->un_pm_timeid = 7311 timeout(sd_pm_timeout_handler, 7312 un, intvlp * drv_usectohz(1000000)); 7313 mutex_exit(&un->un_pm_mutex); 7314 (void) pm_busy_component(SD_DEVINFO(un), 0); 7315 } else { 7316 mutex_exit(&un->un_pm_mutex); 7317 } 7318 if (got_semaphore_here != 0) { 7319 sema_v(&un->un_semoclose); 7320 } 7321 /* 7322 * On exit put the state back to it's original value 7323 * and broadcast to anyone waiting for the power 7324 * change completion. 7325 */ 7326 mutex_enter(SD_MUTEX(un)); 7327 un->un_state = state_before_pm; 7328 cv_broadcast(&un->un_suspend_cv); 7329 mutex_exit(SD_MUTEX(un)); 7330 7331 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7332 "trans check Failed, not ok to power cycle.\n"); 7333 return (DDI_FAILURE); 7334 7335 case -1: 7336 if (got_semaphore_here != 0) { 7337 sema_v(&un->un_semoclose); 7338 } 7339 /* 7340 * On exit put the state back to it's original value 7341 * and broadcast to anyone waiting for the power 7342 * change completion. 7343 */ 7344 mutex_enter(SD_MUTEX(un)); 7345 un->un_state = state_before_pm; 7346 cv_broadcast(&un->un_suspend_cv); 7347 mutex_exit(SD_MUTEX(un)); 7348 SD_TRACE(SD_LOG_IO_PM, un, 7349 "sdpower: exit, trans check command Failed.\n"); 7350 return (DDI_FAILURE); 7351 } 7352 } 7353 7354 if (level == SD_SPINDLE_OFF) { 7355 /* 7356 * Save the last state... if the STOP FAILS we need it 7357 * for restoring 7358 */ 7359 mutex_enter(SD_MUTEX(un)); 7360 save_state = un->un_last_state; 7361 /* 7362 * There must not be any cmds. getting processed 7363 * in the driver when we get here. Power to the 7364 * device is potentially going off. 7365 */ 7366 ASSERT(un->un_ncmds_in_driver == 0); 7367 mutex_exit(SD_MUTEX(un)); 7368 7369 /* 7370 * For now suspend the device completely before spindle is 7371 * turned off 7372 */ 7373 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7374 if (got_semaphore_here != 0) { 7375 sema_v(&un->un_semoclose); 7376 } 7377 /* 7378 * On exit put the state back to it's original value 7379 * and broadcast to anyone waiting for the power 7380 * change completion. 7381 */ 7382 mutex_enter(SD_MUTEX(un)); 7383 un->un_state = state_before_pm; 7384 cv_broadcast(&un->un_suspend_cv); 7385 mutex_exit(SD_MUTEX(un)); 7386 SD_TRACE(SD_LOG_IO_PM, un, 7387 "sdpower: exit, PM suspend Failed.\n"); 7388 return (DDI_FAILURE); 7389 } 7390 } 7391 7392 /* 7393 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7394 * close, or strategy. Dump no long uses this routine, it uses it's 7395 * own code so it can be done in polled mode. 7396 */ 7397 7398 medium_present = TRUE; 7399 7400 /* 7401 * When powering up, issue a TUR in case the device is at unit 7402 * attention. Don't do retries. Bypass the PM layer, otherwise 7403 * a deadlock on un_pm_busy_cv will occur. 7404 */ 7405 if (level == SD_SPINDLE_ON) { 7406 (void) sd_send_scsi_TEST_UNIT_READY(un, 7407 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7408 } 7409 7410 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7411 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7412 7413 sval = sd_send_scsi_START_STOP_UNIT(un, 7414 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7415 SD_PATH_DIRECT); 7416 /* Command failed, check for media present. */ 7417 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7418 medium_present = FALSE; 7419 } 7420 7421 /* 7422 * The conditions of interest here are: 7423 * if a spindle off with media present fails, 7424 * then restore the state and return an error. 7425 * else if a spindle on fails, 7426 * then return an error (there's no state to restore). 7427 * In all other cases we setup for the new state 7428 * and return success. 7429 */ 7430 switch (level) { 7431 case SD_SPINDLE_OFF: 7432 if ((medium_present == TRUE) && (sval != 0)) { 7433 /* The stop command from above failed */ 7434 rval = DDI_FAILURE; 7435 /* 7436 * The stop command failed, and we have media 7437 * present. Put the level back by calling the 7438 * sd_pm_resume() and set the state back to 7439 * it's previous value. 7440 */ 7441 (void) sd_ddi_pm_resume(un); 7442 mutex_enter(SD_MUTEX(un)); 7443 un->un_last_state = save_state; 7444 mutex_exit(SD_MUTEX(un)); 7445 break; 7446 } 7447 /* 7448 * The stop command from above succeeded. 7449 */ 7450 if (ISREMOVABLE(un)) { 7451 /* 7452 * Terminate watch thread in case of removable media 7453 * devices going into low power state. This is as per 7454 * the requirements of pm framework, otherwise commands 7455 * will be generated for the device (through watch 7456 * thread), even when the device is in low power state. 7457 */ 7458 mutex_enter(SD_MUTEX(un)); 7459 un->un_f_watcht_stopped = FALSE; 7460 if (un->un_swr_token != NULL) { 7461 opaque_t temp_token = un->un_swr_token; 7462 un->un_f_watcht_stopped = TRUE; 7463 un->un_swr_token = NULL; 7464 mutex_exit(SD_MUTEX(un)); 7465 (void) scsi_watch_request_terminate(temp_token, 7466 SCSI_WATCH_TERMINATE_WAIT); 7467 } else { 7468 mutex_exit(SD_MUTEX(un)); 7469 } 7470 } 7471 break; 7472 7473 default: /* The level requested is spindle on... */ 7474 /* 7475 * Legacy behavior: return success on a failed spinup 7476 * if there is no media in the drive. 7477 * Do this by looking at medium_present here. 7478 */ 7479 if ((sval != 0) && medium_present) { 7480 /* The start command from above failed */ 7481 rval = DDI_FAILURE; 7482 break; 7483 } 7484 /* 7485 * The start command from above succeeded 7486 * Resume the devices now that we have 7487 * started the disks 7488 */ 7489 (void) sd_ddi_pm_resume(un); 7490 7491 /* 7492 * Resume the watch thread since it was suspended 7493 * when the device went into low power mode. 7494 */ 7495 if (ISREMOVABLE(un)) { 7496 mutex_enter(SD_MUTEX(un)); 7497 if (un->un_f_watcht_stopped == TRUE) { 7498 opaque_t temp_token; 7499 7500 un->un_f_watcht_stopped = FALSE; 7501 mutex_exit(SD_MUTEX(un)); 7502 temp_token = scsi_watch_request_submit( 7503 SD_SCSI_DEVP(un), 7504 sd_check_media_time, 7505 SENSE_LENGTH, sd_media_watch_cb, 7506 (caddr_t)dev); 7507 mutex_enter(SD_MUTEX(un)); 7508 un->un_swr_token = temp_token; 7509 } 7510 mutex_exit(SD_MUTEX(un)); 7511 } 7512 } 7513 if (got_semaphore_here != 0) { 7514 sema_v(&un->un_semoclose); 7515 } 7516 /* 7517 * On exit put the state back to it's original value 7518 * and broadcast to anyone waiting for the power 7519 * change completion. 7520 */ 7521 mutex_enter(SD_MUTEX(un)); 7522 un->un_state = state_before_pm; 7523 cv_broadcast(&un->un_suspend_cv); 7524 mutex_exit(SD_MUTEX(un)); 7525 7526 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7527 7528 return (rval); 7529 } 7530 7531 7532 7533 /* 7534 * Function: sdattach 7535 * 7536 * Description: Driver's attach(9e) entry point function. 7537 * 7538 * Arguments: devi - opaque device info handle 7539 * cmd - attach type 7540 * 7541 * Return Code: DDI_SUCCESS 7542 * DDI_FAILURE 7543 * 7544 * Context: Kernel thread context 7545 */ 7546 7547 static int 7548 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7549 { 7550 switch (cmd) { 7551 case DDI_ATTACH: 7552 return (sd_unit_attach(devi)); 7553 case DDI_RESUME: 7554 return (sd_ddi_resume(devi)); 7555 default: 7556 break; 7557 } 7558 return (DDI_FAILURE); 7559 } 7560 7561 7562 /* 7563 * Function: sddetach 7564 * 7565 * Description: Driver's detach(9E) entry point function. 7566 * 7567 * Arguments: devi - opaque device info handle 7568 * cmd - detach type 7569 * 7570 * Return Code: DDI_SUCCESS 7571 * DDI_FAILURE 7572 * 7573 * Context: Kernel thread context 7574 */ 7575 7576 static int 7577 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7578 { 7579 switch (cmd) { 7580 case DDI_DETACH: 7581 return (sd_unit_detach(devi)); 7582 case DDI_SUSPEND: 7583 return (sd_ddi_suspend(devi)); 7584 default: 7585 break; 7586 } 7587 return (DDI_FAILURE); 7588 } 7589 7590 7591 /* 7592 * Function: sd_sync_with_callback 7593 * 7594 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7595 * state while the callback routine is active. 7596 * 7597 * Arguments: un: softstate structure for the instance 7598 * 7599 * Context: Kernel thread context 7600 */ 7601 7602 static void 7603 sd_sync_with_callback(struct sd_lun *un) 7604 { 7605 ASSERT(un != NULL); 7606 7607 mutex_enter(SD_MUTEX(un)); 7608 7609 ASSERT(un->un_in_callback >= 0); 7610 7611 while (un->un_in_callback > 0) { 7612 mutex_exit(SD_MUTEX(un)); 7613 delay(2); 7614 mutex_enter(SD_MUTEX(un)); 7615 } 7616 7617 mutex_exit(SD_MUTEX(un)); 7618 } 7619 7620 /* 7621 * Function: sd_unit_attach 7622 * 7623 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7624 * the soft state structure for the device and performs 7625 * all necessary structure and device initializations. 7626 * 7627 * Arguments: devi: the system's dev_info_t for the device. 7628 * 7629 * Return Code: DDI_SUCCESS if attach is successful. 7630 * DDI_FAILURE if any part of the attach fails. 7631 * 7632 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7633 * Kernel thread context only. Can sleep. 7634 */ 7635 7636 static int 7637 sd_unit_attach(dev_info_t *devi) 7638 { 7639 struct scsi_device *devp; 7640 struct sd_lun *un; 7641 char *variantp; 7642 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7643 int instance; 7644 int rval; 7645 int wc_enabled; 7646 uint64_t capacity; 7647 uint_t lbasize; 7648 7649 /* 7650 * Retrieve the target driver's private data area. This was set 7651 * up by the HBA. 7652 */ 7653 devp = ddi_get_driver_private(devi); 7654 7655 /* 7656 * Since we have no idea what state things were left in by the last 7657 * user of the device, set up some 'default' settings, ie. turn 'em 7658 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7659 * Do this before the scsi_probe, which sends an inquiry. 7660 * This is a fix for bug (4430280). 7661 * Of special importance is wide-xfer. The drive could have been left 7662 * in wide transfer mode by the last driver to communicate with it, 7663 * this includes us. If that's the case, and if the following is not 7664 * setup properly or we don't re-negotiate with the drive prior to 7665 * transferring data to/from the drive, it causes bus parity errors, 7666 * data overruns, and unexpected interrupts. This first occurred when 7667 * the fix for bug (4378686) was made. 7668 */ 7669 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7670 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7671 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7672 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7673 7674 /* 7675 * Use scsi_probe() to issue an INQUIRY command to the device. 7676 * This call will allocate and fill in the scsi_inquiry structure 7677 * and point the sd_inq member of the scsi_device structure to it. 7678 * If the attach succeeds, then this memory will not be de-allocated 7679 * (via scsi_unprobe()) until the instance is detached. 7680 */ 7681 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7682 goto probe_failed; 7683 } 7684 7685 /* 7686 * Check the device type as specified in the inquiry data and 7687 * claim it if it is of a type that we support. 7688 */ 7689 switch (devp->sd_inq->inq_dtype) { 7690 case DTYPE_DIRECT: 7691 break; 7692 case DTYPE_RODIRECT: 7693 break; 7694 case DTYPE_OPTICAL: 7695 break; 7696 case DTYPE_NOTPRESENT: 7697 default: 7698 /* Unsupported device type; fail the attach. */ 7699 goto probe_failed; 7700 } 7701 7702 /* 7703 * Allocate the soft state structure for this unit. 7704 * 7705 * We rely upon this memory being set to all zeroes by 7706 * ddi_soft_state_zalloc(). We assume that any member of the 7707 * soft state structure that is not explicitly initialized by 7708 * this routine will have a value of zero. 7709 */ 7710 instance = ddi_get_instance(devp->sd_dev); 7711 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7712 goto probe_failed; 7713 } 7714 7715 /* 7716 * Retrieve a pointer to the newly-allocated soft state. 7717 * 7718 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7719 * was successful, unless something has gone horribly wrong and the 7720 * ddi's soft state internals are corrupt (in which case it is 7721 * probably better to halt here than just fail the attach....) 7722 */ 7723 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7724 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7725 instance); 7726 /*NOTREACHED*/ 7727 } 7728 7729 /* 7730 * Link the back ptr of the driver soft state to the scsi_device 7731 * struct for this lun. 7732 * Save a pointer to the softstate in the driver-private area of 7733 * the scsi_device struct. 7734 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7735 * we first set un->un_sd below. 7736 */ 7737 un->un_sd = devp; 7738 devp->sd_private = (opaque_t)un; 7739 7740 /* 7741 * The following must be after devp is stored in the soft state struct. 7742 */ 7743 #ifdef SDDEBUG 7744 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7745 "%s_unit_attach: un:0x%p instance:%d\n", 7746 ddi_driver_name(devi), un, instance); 7747 #endif 7748 7749 /* 7750 * Set up the device type and node type (for the minor nodes). 7751 * By default we assume that the device can at least support the 7752 * Common Command Set. Call it a CD-ROM if it reports itself 7753 * as a RODIRECT device. 7754 */ 7755 switch (devp->sd_inq->inq_dtype) { 7756 case DTYPE_RODIRECT: 7757 un->un_node_type = DDI_NT_CD_CHAN; 7758 un->un_ctype = CTYPE_CDROM; 7759 break; 7760 case DTYPE_OPTICAL: 7761 un->un_node_type = DDI_NT_BLOCK_CHAN; 7762 un->un_ctype = CTYPE_ROD; 7763 break; 7764 default: 7765 un->un_node_type = DDI_NT_BLOCK_CHAN; 7766 un->un_ctype = CTYPE_CCS; 7767 break; 7768 } 7769 7770 /* 7771 * Try to read the interconnect type from the HBA. 7772 * 7773 * Note: This driver is currently compiled as two binaries, a parallel 7774 * scsi version (sd) and a fibre channel version (ssd). All functional 7775 * differences are determined at compile time. In the future a single 7776 * binary will be provided and the inteconnect type will be used to 7777 * differentiate between fibre and parallel scsi behaviors. At that time 7778 * it will be necessary for all fibre channel HBAs to support this 7779 * property. 7780 * 7781 * set un_f_is_fiber to TRUE ( default fiber ) 7782 */ 7783 un->un_f_is_fibre = TRUE; 7784 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7785 case INTERCONNECT_SSA: 7786 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7787 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7788 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7789 break; 7790 case INTERCONNECT_PARALLEL: 7791 un->un_f_is_fibre = FALSE; 7792 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7793 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7794 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7795 break; 7796 case INTERCONNECT_FIBRE: 7797 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7798 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7799 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7800 break; 7801 case INTERCONNECT_FABRIC: 7802 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7803 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7805 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7806 break; 7807 default: 7808 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7809 /* 7810 * The HBA does not support the "interconnect-type" property 7811 * (or did not provide a recognized type). 7812 * 7813 * Note: This will be obsoleted when a single fibre channel 7814 * and parallel scsi driver is delivered. In the meantime the 7815 * interconnect type will be set to the platform default.If that 7816 * type is not parallel SCSI, it means that we should be 7817 * assuming "ssd" semantics. However, here this also means that 7818 * the FC HBA is not supporting the "interconnect-type" property 7819 * like we expect it to, so log this occurrence. 7820 */ 7821 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7822 if (!SD_IS_PARALLEL_SCSI(un)) { 7823 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7824 "sd_unit_attach: un:0x%p Assuming " 7825 "INTERCONNECT_FIBRE\n", un); 7826 } else { 7827 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7828 "sd_unit_attach: un:0x%p Assuming " 7829 "INTERCONNECT_PARALLEL\n", un); 7830 un->un_f_is_fibre = FALSE; 7831 } 7832 #else 7833 /* 7834 * Note: This source will be implemented when a single fibre 7835 * channel and parallel scsi driver is delivered. The default 7836 * will be to assume that if a device does not support the 7837 * "interconnect-type" property it is a parallel SCSI HBA and 7838 * we will set the interconnect type for parallel scsi. 7839 */ 7840 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7841 un->un_f_is_fibre = FALSE; 7842 #endif 7843 break; 7844 } 7845 7846 if (un->un_f_is_fibre == TRUE) { 7847 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7848 SCSI_VERSION_3) { 7849 switch (un->un_interconnect_type) { 7850 case SD_INTERCONNECT_FIBRE: 7851 case SD_INTERCONNECT_SSA: 7852 un->un_node_type = DDI_NT_BLOCK_WWN; 7853 break; 7854 default: 7855 break; 7856 } 7857 } 7858 } 7859 7860 /* 7861 * Initialize the Request Sense command for the target 7862 */ 7863 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7864 goto alloc_rqs_failed; 7865 } 7866 7867 /* 7868 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7869 * with seperate binary for sd and ssd. 7870 * 7871 * x86 has 1 binary, un_retry_count is set base on connection type. 7872 * The hardcoded values will go away when Sparc uses 1 binary 7873 * for sd and ssd. This hardcoded values need to match 7874 * SD_RETRY_COUNT in sddef.h 7875 * The value used is base on interconnect type. 7876 * fibre = 3, parallel = 5 7877 */ 7878 #if defined(__i386) || defined(__amd64) 7879 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7880 #else 7881 un->un_retry_count = SD_RETRY_COUNT; 7882 #endif 7883 7884 /* 7885 * Set the per disk retry count to the default number of retries 7886 * for disks and CDROMs. This value can be overridden by the 7887 * disk property list or an entry in sd.conf. 7888 */ 7889 un->un_notready_retry_count = 7890 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7891 : DISK_NOT_READY_RETRY_COUNT(un); 7892 7893 /* 7894 * Set the busy retry count to the default value of un_retry_count. 7895 * This can be overridden by entries in sd.conf or the device 7896 * config table. 7897 */ 7898 un->un_busy_retry_count = un->un_retry_count; 7899 7900 /* 7901 * Init the reset threshold for retries. This number determines 7902 * how many retries must be performed before a reset can be issued 7903 * (for certain error conditions). This can be overridden by entries 7904 * in sd.conf or the device config table. 7905 */ 7906 un->un_reset_retry_count = (un->un_retry_count / 2); 7907 7908 /* 7909 * Set the victim_retry_count to the default un_retry_count 7910 */ 7911 un->un_victim_retry_count = (2 * un->un_retry_count); 7912 7913 /* 7914 * Set the reservation release timeout to the default value of 7915 * 5 seconds. This can be overridden by entries in ssd.conf or the 7916 * device config table. 7917 */ 7918 un->un_reserve_release_time = 5; 7919 7920 /* 7921 * Set up the default maximum transfer size. Note that this may 7922 * get updated later in the attach, when setting up default wide 7923 * operations for disks. 7924 */ 7925 #if defined(__i386) || defined(__amd64) 7926 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7927 #else 7928 un->un_max_xfer_size = (uint_t)maxphys; 7929 #endif 7930 7931 /* 7932 * Get "allow bus device reset" property (defaults to "enabled" if 7933 * the property was not defined). This is to disable bus resets for 7934 * certain kinds of error recovery. Note: In the future when a run-time 7935 * fibre check is available the soft state flag should default to 7936 * enabled. 7937 */ 7938 if (un->un_f_is_fibre == TRUE) { 7939 un->un_f_allow_bus_device_reset = TRUE; 7940 } else { 7941 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7942 "allow-bus-device-reset", 1) != 0) { 7943 un->un_f_allow_bus_device_reset = TRUE; 7944 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7945 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7946 un); 7947 } else { 7948 un->un_f_allow_bus_device_reset = FALSE; 7949 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7950 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7951 un); 7952 } 7953 } 7954 7955 /* 7956 * Check if this is an ATAPI device. ATAPI devices use Group 1 7957 * Read/Write commands and Group 2 Mode Sense/Select commands. 7958 * 7959 * Note: The "obsolete" way of doing this is to check for the "atapi" 7960 * property. The new "variant" property with a value of "atapi" has been 7961 * introduced so that future 'variants' of standard SCSI behavior (like 7962 * atapi) could be specified by the underlying HBA drivers by supplying 7963 * a new value for the "variant" property, instead of having to define a 7964 * new property. 7965 */ 7966 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7967 un->un_f_cfg_is_atapi = TRUE; 7968 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7969 "sd_unit_attach: un:0x%p Atapi device\n", un); 7970 } 7971 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7972 &variantp) == DDI_PROP_SUCCESS) { 7973 if (strcmp(variantp, "atapi") == 0) { 7974 un->un_f_cfg_is_atapi = TRUE; 7975 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7976 "sd_unit_attach: un:0x%p Atapi device\n", un); 7977 } 7978 ddi_prop_free(variantp); 7979 } 7980 7981 /* 7982 * Assume doorlock commands are supported. If not, the first 7983 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7984 */ 7985 un->un_f_doorlock_supported = TRUE; 7986 7987 un->un_cmd_timeout = SD_IO_TIME; 7988 7989 /* Info on current states, statuses, etc. (Updated frequently) */ 7990 un->un_state = SD_STATE_NORMAL; 7991 un->un_last_state = SD_STATE_NORMAL; 7992 7993 /* Control & status info for command throttling */ 7994 un->un_throttle = sd_max_throttle; 7995 un->un_saved_throttle = sd_max_throttle; 7996 un->un_min_throttle = sd_min_throttle; 7997 7998 if (un->un_f_is_fibre == TRUE) { 7999 un->un_f_use_adaptive_throttle = TRUE; 8000 } else { 8001 un->un_f_use_adaptive_throttle = FALSE; 8002 } 8003 8004 /* Removable media support. */ 8005 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 8006 un->un_mediastate = DKIO_NONE; 8007 un->un_specified_mediastate = DKIO_NONE; 8008 8009 /* CVs for suspend/resume (PM or DR) */ 8010 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 8011 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 8012 8013 /* Power management support. */ 8014 un->un_power_level = SD_SPINDLE_UNINIT; 8015 8016 /* 8017 * The open/close semaphore is used to serialize threads executing 8018 * in the driver's open & close entry point routines for a given 8019 * instance. 8020 */ 8021 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 8022 8023 /* 8024 * The conf file entry and softstate variable is a forceful override, 8025 * meaning a non-zero value must be entered to change the default. 8026 */ 8027 un->un_f_disksort_disabled = FALSE; 8028 8029 /* 8030 * Retrieve the properties from the static driver table or the driver 8031 * configuration file (.conf) for this unit and update the soft state 8032 * for the device as needed for the indicated properties. 8033 * Note: the property configuration needs to occur here as some of the 8034 * following routines may have dependancies on soft state flags set 8035 * as part of the driver property configuration. 8036 */ 8037 sd_read_unit_properties(un); 8038 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8039 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 8040 8041 /* 8042 * By default, we mark the capacity, lbazize, and geometry 8043 * as invalid. Only if we successfully read a valid capacity 8044 * will we update the un_blockcount and un_tgt_blocksize with the 8045 * valid values (the geometry will be validated later). 8046 */ 8047 un->un_f_blockcount_is_valid = FALSE; 8048 un->un_f_tgt_blocksize_is_valid = FALSE; 8049 un->un_f_geometry_is_valid = FALSE; 8050 8051 /* 8052 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 8053 * otherwise. 8054 */ 8055 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 8056 un->un_blockcount = 0; 8057 8058 /* 8059 * Set up the per-instance info needed to determine the correct 8060 * CDBs and other info for issuing commands to the target. 8061 */ 8062 sd_init_cdb_limits(un); 8063 8064 /* 8065 * Set up the IO chains to use, based upon the target type. 8066 */ 8067 if (ISREMOVABLE(un)) { 8068 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 8069 } else { 8070 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 8071 } 8072 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 8073 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 8074 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 8075 8076 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 8077 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 8078 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 8079 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 8080 8081 8082 if (ISCD(un)) { 8083 un->un_additional_codes = sd_additional_codes; 8084 } else { 8085 un->un_additional_codes = NULL; 8086 } 8087 8088 /* 8089 * Create the kstats here so they can be available for attach-time 8090 * routines that send commands to the unit (either polled or via 8091 * sd_send_scsi_cmd). 8092 * 8093 * Note: This is a critical sequence that needs to be maintained: 8094 * 1) Instantiate the kstats here, before any routines using the 8095 * iopath (i.e. sd_send_scsi_cmd). 8096 * 2) Initialize the error stats (sd_set_errstats) and partition 8097 * stats (sd_set_pstats), following sd_validate_geometry(), 8098 * sd_register_devid(), and sd_disable_caching(). 8099 */ 8100 8101 un->un_stats = kstat_create(sd_label, instance, 8102 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 8103 if (un->un_stats != NULL) { 8104 un->un_stats->ks_lock = SD_MUTEX(un); 8105 kstat_install(un->un_stats); 8106 } 8107 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8108 "sd_unit_attach: un:0x%p un_stats created\n", un); 8109 8110 sd_create_errstats(un, instance); 8111 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8112 "sd_unit_attach: un:0x%p errstats created\n", un); 8113 8114 /* 8115 * The following if/else code was relocated here from below as part 8116 * of the fix for bug (4430280). However with the default setup added 8117 * on entry to this routine, it's no longer absolutely necessary for 8118 * this to be before the call to sd_spin_up_unit. 8119 */ 8120 if (SD_IS_PARALLEL_SCSI(un)) { 8121 /* 8122 * If SCSI-2 tagged queueing is supported by the target 8123 * and by the host adapter then we will enable it. 8124 */ 8125 un->un_tagflags = 0; 8126 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8127 (devp->sd_inq->inq_cmdque) && 8128 (un->un_f_arq_enabled == TRUE)) { 8129 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8130 1, 1) == 1) { 8131 un->un_tagflags = FLAG_STAG; 8132 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8133 "sd_unit_attach: un:0x%p tag queueing " 8134 "enabled\n", un); 8135 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8136 "untagged-qing", 0) == 1) { 8137 un->un_f_opt_queueing = TRUE; 8138 un->un_saved_throttle = un->un_throttle = 8139 min(un->un_throttle, 3); 8140 } else { 8141 un->un_f_opt_queueing = FALSE; 8142 un->un_saved_throttle = un->un_throttle = 1; 8143 } 8144 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8145 == 1) && (un->un_f_arq_enabled == TRUE)) { 8146 /* The Host Adapter supports internal queueing. */ 8147 un->un_f_opt_queueing = TRUE; 8148 un->un_saved_throttle = un->un_throttle = 8149 min(un->un_throttle, 3); 8150 } else { 8151 un->un_f_opt_queueing = FALSE; 8152 un->un_saved_throttle = un->un_throttle = 1; 8153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8154 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8155 } 8156 8157 8158 /* Setup or tear down default wide operations for disks */ 8159 8160 /* 8161 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8162 * and "ssd_max_xfer_size" to exist simultaneously on the same 8163 * system and be set to different values. In the future this 8164 * code may need to be updated when the ssd module is 8165 * obsoleted and removed from the system. (4299588) 8166 */ 8167 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8168 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8169 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8170 1, 1) == 1) { 8171 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8172 "sd_unit_attach: un:0x%p Wide Transfer " 8173 "enabled\n", un); 8174 } 8175 8176 /* 8177 * If tagged queuing has also been enabled, then 8178 * enable large xfers 8179 */ 8180 if (un->un_saved_throttle == sd_max_throttle) { 8181 un->un_max_xfer_size = 8182 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8183 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8184 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8185 "sd_unit_attach: un:0x%p max transfer " 8186 "size=0x%x\n", un, un->un_max_xfer_size); 8187 } 8188 } else { 8189 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8190 0, 1) == 1) { 8191 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8192 "sd_unit_attach: un:0x%p " 8193 "Wide Transfer disabled\n", un); 8194 } 8195 } 8196 } else { 8197 un->un_tagflags = FLAG_STAG; 8198 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8199 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8200 } 8201 8202 /* 8203 * If this target supports LUN reset, try to enable it. 8204 */ 8205 if (un->un_f_lun_reset_enabled) { 8206 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8207 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8208 "un:0x%p lun_reset capability set\n", un); 8209 } else { 8210 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8211 "un:0x%p lun-reset capability not set\n", un); 8212 } 8213 } 8214 8215 /* 8216 * At this point in the attach, we have enough info in the 8217 * soft state to be able to issue commands to the target. 8218 * 8219 * All command paths used below MUST issue their commands as 8220 * SD_PATH_DIRECT. This is important as intermediate layers 8221 * are not all initialized yet (such as PM). 8222 */ 8223 8224 /* 8225 * Send a TEST UNIT READY command to the device. This should clear 8226 * any outstanding UNIT ATTENTION that may be present. 8227 * 8228 * Note: Don't check for success, just track if there is a reservation, 8229 * this is a throw away command to clear any unit attentions. 8230 * 8231 * Note: This MUST be the first command issued to the target during 8232 * attach to ensure power on UNIT ATTENTIONS are cleared. 8233 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8234 * with attempts at spinning up a device with no media. 8235 */ 8236 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8237 reservation_flag = SD_TARGET_IS_RESERVED; 8238 } 8239 8240 /* 8241 * If the device is NOT a removable media device, attempt to spin 8242 * it up (using the START_STOP_UNIT command) and read its capacity 8243 * (using the READ CAPACITY command). Note, however, that either 8244 * of these could fail and in some cases we would continue with 8245 * the attach despite the failure (see below). 8246 */ 8247 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8248 switch (sd_spin_up_unit(un)) { 8249 case 0: 8250 /* 8251 * Spin-up was successful; now try to read the 8252 * capacity. If successful then save the results 8253 * and mark the capacity & lbasize as valid. 8254 */ 8255 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8256 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8257 8258 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8259 &lbasize, SD_PATH_DIRECT)) { 8260 case 0: { 8261 if (capacity > DK_MAX_BLOCKS) { 8262 #ifdef _LP64 8263 /* 8264 * Enable descriptor format sense data 8265 * so that we can get 64 bit sense 8266 * data fields. 8267 */ 8268 sd_enable_descr_sense(un); 8269 #else 8270 /* 32-bit kernels can't handle this */ 8271 scsi_log(SD_DEVINFO(un), 8272 sd_label, CE_WARN, 8273 "disk has %llu blocks, which " 8274 "is too large for a 32-bit " 8275 "kernel", capacity); 8276 goto spinup_failed; 8277 #endif 8278 } 8279 /* 8280 * The following relies on 8281 * sd_send_scsi_READ_CAPACITY never 8282 * returning 0 for capacity and/or lbasize. 8283 */ 8284 sd_update_block_info(un, lbasize, capacity); 8285 8286 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8287 "sd_unit_attach: un:0x%p capacity = %ld " 8288 "blocks; lbasize= %ld.\n", un, 8289 un->un_blockcount, un->un_tgt_blocksize); 8290 8291 break; 8292 } 8293 case EACCES: 8294 /* 8295 * Should never get here if the spin-up 8296 * succeeded, but code it in anyway. 8297 * From here, just continue with the attach... 8298 */ 8299 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8300 "sd_unit_attach: un:0x%p " 8301 "sd_send_scsi_READ_CAPACITY " 8302 "returned reservation conflict\n", un); 8303 reservation_flag = SD_TARGET_IS_RESERVED; 8304 break; 8305 default: 8306 /* 8307 * Likewise, should never get here if the 8308 * spin-up succeeded. Just continue with 8309 * the attach... 8310 */ 8311 break; 8312 } 8313 break; 8314 case EACCES: 8315 /* 8316 * Device is reserved by another host. In this case 8317 * we could not spin it up or read the capacity, but 8318 * we continue with the attach anyway. 8319 */ 8320 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8321 "sd_unit_attach: un:0x%p spin-up reservation " 8322 "conflict.\n", un); 8323 reservation_flag = SD_TARGET_IS_RESERVED; 8324 break; 8325 default: 8326 /* Fail the attach if the spin-up failed. */ 8327 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8328 "sd_unit_attach: un:0x%p spin-up failed.", un); 8329 goto spinup_failed; 8330 } 8331 } 8332 8333 /* 8334 * Check to see if this is a MMC drive 8335 */ 8336 if (ISCD(un)) { 8337 sd_set_mmc_caps(un); 8338 } 8339 8340 /* 8341 * Create the minor nodes for the device. 8342 * Note: If we want to support fdisk on both sparc and intel, this will 8343 * have to separate out the notion that VTOC8 is always sparc, and 8344 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8345 * type will have to be determined at run-time, and the fdisk 8346 * partitioning will have to have been read & set up before we 8347 * create the minor nodes. (any other inits (such as kstats) that 8348 * also ought to be done before creating the minor nodes?) (Doesn't 8349 * setting up the minor nodes kind of imply that we're ready to 8350 * handle an open from userland?) 8351 */ 8352 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8353 goto create_minor_nodes_failed; 8354 } 8355 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8356 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8357 8358 /* 8359 * Add a zero-length attribute to tell the world we support 8360 * kernel ioctls (for layered drivers) 8361 */ 8362 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8363 DDI_KERNEL_IOCTL, NULL, 0); 8364 8365 /* 8366 * Add a boolean property to tell the world we support 8367 * the B_FAILFAST flag (for layered drivers) 8368 */ 8369 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8370 "ddi-failfast-supported", NULL, 0); 8371 8372 /* 8373 * Initialize power management 8374 */ 8375 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8376 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8377 sd_setup_pm(un, devi); 8378 if (un->un_f_pm_is_enabled == FALSE) { 8379 /* 8380 * For performance, point to a jump table that does 8381 * not include pm. 8382 * The direct and priority chains don't change with PM. 8383 * 8384 * Note: this is currently done based on individual device 8385 * capabilities. When an interface for determining system 8386 * power enabled state becomes available, or when additional 8387 * layers are added to the command chain, these values will 8388 * have to be re-evaluated for correctness. 8389 */ 8390 if (ISREMOVABLE(un)) { 8391 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8392 } else { 8393 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8394 } 8395 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8396 } 8397 8398 /* 8399 * This property is set to 0 by HA software to avoid retries 8400 * on a reserved disk. (The preferred property name is 8401 * "retry-on-reservation-conflict") (1189689) 8402 * 8403 * Note: The use of a global here can have unintended consequences. A 8404 * per instance variable is preferrable to match the capabilities of 8405 * different underlying hba's (4402600) 8406 */ 8407 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8408 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8409 sd_retry_on_reservation_conflict); 8410 if (sd_retry_on_reservation_conflict != 0) { 8411 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8412 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8413 sd_retry_on_reservation_conflict); 8414 } 8415 8416 /* Set up options for QFULL handling. */ 8417 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8418 "qfull-retries", -1)) != -1) { 8419 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8420 rval, 1); 8421 } 8422 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8423 "qfull-retry-interval", -1)) != -1) { 8424 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8425 rval, 1); 8426 } 8427 8428 /* 8429 * This just prints a message that announces the existence of the 8430 * device. The message is always printed in the system logfile, but 8431 * only appears on the console if the system is booted with the 8432 * -v (verbose) argument. 8433 */ 8434 ddi_report_dev(devi); 8435 8436 /* 8437 * The framework calls driver attach routines single-threaded 8438 * for a given instance. However we still acquire SD_MUTEX here 8439 * because this required for calling the sd_validate_geometry() 8440 * and sd_register_devid() functions. 8441 */ 8442 mutex_enter(SD_MUTEX(un)); 8443 un->un_f_geometry_is_valid = FALSE; 8444 un->un_mediastate = DKIO_NONE; 8445 un->un_reserved = -1; 8446 if (!ISREMOVABLE(un)) { 8447 /* 8448 * Read and validate the device's geometry (ie, disk label) 8449 * A new unformatted drive will not have a valid geometry, but 8450 * the driver needs to successfully attach to this device so 8451 * the drive can be formatted via ioctls. 8452 */ 8453 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8454 ENOTSUP)) && 8455 (un->un_blockcount < DK_MAX_BLOCKS)) { 8456 /* 8457 * We found a small disk with an EFI label on it; 8458 * we need to fix up the minor nodes accordingly. 8459 */ 8460 ddi_remove_minor_node(devi, "h"); 8461 ddi_remove_minor_node(devi, "h,raw"); 8462 (void) ddi_create_minor_node(devi, "wd", 8463 S_IFBLK, 8464 (instance << SDUNIT_SHIFT) | WD_NODE, 8465 un->un_node_type, NULL); 8466 (void) ddi_create_minor_node(devi, "wd,raw", 8467 S_IFCHR, 8468 (instance << SDUNIT_SHIFT) | WD_NODE, 8469 un->un_node_type, NULL); 8470 } 8471 } 8472 8473 /* 8474 * Read and initialize the devid for the unit. 8475 */ 8476 ASSERT(un->un_errstats != NULL); 8477 if (!ISREMOVABLE(un)) { 8478 sd_register_devid(un, devi, reservation_flag); 8479 } 8480 mutex_exit(SD_MUTEX(un)); 8481 8482 #if (defined(__fibre)) 8483 /* 8484 * Register callbacks for fibre only. You can't do this soley 8485 * on the basis of the devid_type because this is hba specific. 8486 * We need to query our hba capabilities to find out whether to 8487 * register or not. 8488 */ 8489 if (un->un_f_is_fibre) { 8490 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8491 sd_init_event_callbacks(un); 8492 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8493 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8494 } 8495 } 8496 #endif 8497 8498 if (un->un_f_opt_disable_cache == TRUE) { 8499 if (sd_disable_caching(un) != 0) { 8500 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8501 "sd_unit_attach: un:0x%p Could not disable " 8502 "caching", un); 8503 goto devid_failed; 8504 } 8505 } 8506 8507 /* 8508 * NOTE: Since there is currently no mechanism to 8509 * change the state of the Write Cache Enable mode select, 8510 * this code just checks the value of the WCE bit 8511 * at device attach time. If a mechanism 8512 * is added to the driver to change WCE, un_f_write_cache_enabled 8513 * must be updated appropriately. 8514 */ 8515 (void) sd_get_write_cache_enabled(un, &wc_enabled); 8516 mutex_enter(SD_MUTEX(un)); 8517 un->un_f_write_cache_enabled = (wc_enabled != 0); 8518 mutex_exit(SD_MUTEX(un)); 8519 8520 /* 8521 * Set the pstat and error stat values here, so data obtained during the 8522 * previous attach-time routines is available. 8523 * 8524 * Note: This is a critical sequence that needs to be maintained: 8525 * 1) Instantiate the kstats before any routines using the iopath 8526 * (i.e. sd_send_scsi_cmd). 8527 * 2) Initialize the error stats (sd_set_errstats) and partition 8528 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8529 * sd_register_devid(), and sd_disable_caching(). 8530 */ 8531 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8532 sd_set_pstats(un); 8533 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8534 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8535 } 8536 8537 sd_set_errstats(un); 8538 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8539 "sd_unit_attach: un:0x%p errstats set\n", un); 8540 8541 /* 8542 * Find out what type of reservation this disk supports. 8543 */ 8544 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8545 case 0: 8546 /* 8547 * SCSI-3 reservations are supported. 8548 */ 8549 un->un_reservation_type = SD_SCSI3_RESERVATION; 8550 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8551 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8552 break; 8553 case ENOTSUP: 8554 /* 8555 * The PERSISTENT RESERVE IN command would not be recognized by 8556 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8557 */ 8558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8559 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8560 un->un_reservation_type = SD_SCSI2_RESERVATION; 8561 break; 8562 default: 8563 /* 8564 * default to SCSI-3 reservations 8565 */ 8566 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8567 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8568 un->un_reservation_type = SD_SCSI3_RESERVATION; 8569 break; 8570 } 8571 8572 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8573 "sd_unit_attach: un:0x%p exit success\n", un); 8574 8575 return (DDI_SUCCESS); 8576 8577 /* 8578 * An error occurred during the attach; clean up & return failure. 8579 */ 8580 8581 devid_failed: 8582 8583 setup_pm_failed: 8584 ddi_remove_minor_node(devi, NULL); 8585 8586 create_minor_nodes_failed: 8587 /* 8588 * Cleanup from the scsi_ifsetcap() calls (437868) 8589 */ 8590 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8591 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8592 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8593 8594 if (un->un_f_is_fibre == FALSE) { 8595 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8596 } 8597 8598 spinup_failed: 8599 8600 mutex_enter(SD_MUTEX(un)); 8601 8602 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8603 if (un->un_direct_priority_timeid != NULL) { 8604 timeout_id_t temp_id = un->un_direct_priority_timeid; 8605 un->un_direct_priority_timeid = NULL; 8606 mutex_exit(SD_MUTEX(un)); 8607 (void) untimeout(temp_id); 8608 mutex_enter(SD_MUTEX(un)); 8609 } 8610 8611 /* Cancel any pending start/stop timeouts */ 8612 if (un->un_startstop_timeid != NULL) { 8613 timeout_id_t temp_id = un->un_startstop_timeid; 8614 un->un_startstop_timeid = NULL; 8615 mutex_exit(SD_MUTEX(un)); 8616 (void) untimeout(temp_id); 8617 mutex_enter(SD_MUTEX(un)); 8618 } 8619 8620 /* Cancel any pending reset-throttle timeouts */ 8621 if (un->un_reset_throttle_timeid != NULL) { 8622 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8623 un->un_reset_throttle_timeid = NULL; 8624 mutex_exit(SD_MUTEX(un)); 8625 (void) untimeout(temp_id); 8626 mutex_enter(SD_MUTEX(un)); 8627 } 8628 8629 /* Cancel any pending retry timeouts */ 8630 if (un->un_retry_timeid != NULL) { 8631 timeout_id_t temp_id = un->un_retry_timeid; 8632 un->un_retry_timeid = NULL; 8633 mutex_exit(SD_MUTEX(un)); 8634 (void) untimeout(temp_id); 8635 mutex_enter(SD_MUTEX(un)); 8636 } 8637 8638 /* Cancel any pending delayed cv broadcast timeouts */ 8639 if (un->un_dcvb_timeid != NULL) { 8640 timeout_id_t temp_id = un->un_dcvb_timeid; 8641 un->un_dcvb_timeid = NULL; 8642 mutex_exit(SD_MUTEX(un)); 8643 (void) untimeout(temp_id); 8644 mutex_enter(SD_MUTEX(un)); 8645 } 8646 8647 mutex_exit(SD_MUTEX(un)); 8648 8649 /* There should not be any in-progress I/O so ASSERT this check */ 8650 ASSERT(un->un_ncmds_in_transport == 0); 8651 ASSERT(un->un_ncmds_in_driver == 0); 8652 8653 /* Do not free the softstate if the callback routine is active */ 8654 sd_sync_with_callback(un); 8655 8656 /* 8657 * Partition stats apparently are not used with removables. These would 8658 * not have been created during attach, so no need to clean them up... 8659 */ 8660 if (un->un_stats != NULL) { 8661 kstat_delete(un->un_stats); 8662 un->un_stats = NULL; 8663 } 8664 if (un->un_errstats != NULL) { 8665 kstat_delete(un->un_errstats); 8666 un->un_errstats = NULL; 8667 } 8668 8669 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8670 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8671 8672 ddi_prop_remove_all(devi); 8673 sema_destroy(&un->un_semoclose); 8674 cv_destroy(&un->un_state_cv); 8675 8676 getrbuf_failed: 8677 8678 sd_free_rqs(un); 8679 8680 alloc_rqs_failed: 8681 8682 devp->sd_private = NULL; 8683 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8684 8685 get_softstate_failed: 8686 /* 8687 * Note: the man pages are unclear as to whether or not doing a 8688 * ddi_soft_state_free(sd_state, instance) is the right way to 8689 * clean up after the ddi_soft_state_zalloc() if the subsequent 8690 * ddi_get_soft_state() fails. The implication seems to be 8691 * that the get_soft_state cannot fail if the zalloc succeeds. 8692 */ 8693 ddi_soft_state_free(sd_state, instance); 8694 8695 probe_failed: 8696 scsi_unprobe(devp); 8697 #ifdef SDDEBUG 8698 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8699 (sd_level_mask & SD_LOGMASK_TRACE)) { 8700 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8701 (void *)un); 8702 } 8703 #endif 8704 return (DDI_FAILURE); 8705 } 8706 8707 8708 /* 8709 * Function: sd_unit_detach 8710 * 8711 * Description: Performs DDI_DETACH processing for sddetach(). 8712 * 8713 * Return Code: DDI_SUCCESS 8714 * DDI_FAILURE 8715 * 8716 * Context: Kernel thread context 8717 */ 8718 8719 static int 8720 sd_unit_detach(dev_info_t *devi) 8721 { 8722 struct scsi_device *devp; 8723 struct sd_lun *un; 8724 int i; 8725 dev_t dev; 8726 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8727 int reset_retval; 8728 #endif 8729 int instance = ddi_get_instance(devi); 8730 8731 mutex_enter(&sd_detach_mutex); 8732 8733 /* 8734 * Fail the detach for any of the following: 8735 * - Unable to get the sd_lun struct for the instance 8736 * - A layered driver has an outstanding open on the instance 8737 * - Another thread is already detaching this instance 8738 * - Another thread is currently performing an open 8739 */ 8740 devp = ddi_get_driver_private(devi); 8741 if ((devp == NULL) || 8742 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8743 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8744 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8745 mutex_exit(&sd_detach_mutex); 8746 return (DDI_FAILURE); 8747 } 8748 8749 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8750 8751 /* 8752 * Mark this instance as currently in a detach, to inhibit any 8753 * opens from a layered driver. 8754 */ 8755 un->un_detach_count++; 8756 mutex_exit(&sd_detach_mutex); 8757 8758 dev = sd_make_device(SD_DEVINFO(un)); 8759 8760 _NOTE(COMPETING_THREADS_NOW); 8761 8762 mutex_enter(SD_MUTEX(un)); 8763 8764 /* 8765 * Fail the detach if there are any outstanding layered 8766 * opens on this device. 8767 */ 8768 for (i = 0; i < NDKMAP; i++) { 8769 if (un->un_ocmap.lyropen[i] != 0) { 8770 goto err_notclosed; 8771 } 8772 } 8773 8774 /* 8775 * Verify there are NO outstanding commands issued to this device. 8776 * ie, un_ncmds_in_transport == 0. 8777 * It's possible to have outstanding commands through the physio 8778 * code path, even though everything's closed. 8779 */ 8780 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8781 (un->un_direct_priority_timeid != NULL) || 8782 (un->un_state == SD_STATE_RWAIT)) { 8783 mutex_exit(SD_MUTEX(un)); 8784 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8785 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8786 goto err_stillbusy; 8787 } 8788 8789 /* 8790 * If we have the device reserved, release the reservation. 8791 */ 8792 if ((un->un_resvd_status & SD_RESERVE) && 8793 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8794 mutex_exit(SD_MUTEX(un)); 8795 /* 8796 * Note: sd_reserve_release sends a command to the device 8797 * via the sd_ioctlcmd() path, and can sleep. 8798 */ 8799 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8800 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8801 "sd_dr_detach: Cannot release reservation \n"); 8802 } 8803 } else { 8804 mutex_exit(SD_MUTEX(un)); 8805 } 8806 8807 /* 8808 * Untimeout any reserve recover, throttle reset, restart unit 8809 * and delayed broadcast timeout threads. Protect the timeout pointer 8810 * from getting nulled by their callback functions. 8811 */ 8812 mutex_enter(SD_MUTEX(un)); 8813 if (un->un_resvd_timeid != NULL) { 8814 timeout_id_t temp_id = un->un_resvd_timeid; 8815 un->un_resvd_timeid = NULL; 8816 mutex_exit(SD_MUTEX(un)); 8817 (void) untimeout(temp_id); 8818 mutex_enter(SD_MUTEX(un)); 8819 } 8820 8821 if (un->un_reset_throttle_timeid != NULL) { 8822 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8823 un->un_reset_throttle_timeid = NULL; 8824 mutex_exit(SD_MUTEX(un)); 8825 (void) untimeout(temp_id); 8826 mutex_enter(SD_MUTEX(un)); 8827 } 8828 8829 if (un->un_startstop_timeid != NULL) { 8830 timeout_id_t temp_id = un->un_startstop_timeid; 8831 un->un_startstop_timeid = NULL; 8832 mutex_exit(SD_MUTEX(un)); 8833 (void) untimeout(temp_id); 8834 mutex_enter(SD_MUTEX(un)); 8835 } 8836 8837 if (un->un_dcvb_timeid != NULL) { 8838 timeout_id_t temp_id = un->un_dcvb_timeid; 8839 un->un_dcvb_timeid = NULL; 8840 mutex_exit(SD_MUTEX(un)); 8841 (void) untimeout(temp_id); 8842 } else { 8843 mutex_exit(SD_MUTEX(un)); 8844 } 8845 8846 /* Remove any pending reservation reclaim requests for this device */ 8847 sd_rmv_resv_reclaim_req(dev); 8848 8849 mutex_enter(SD_MUTEX(un)); 8850 8851 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8852 if (un->un_direct_priority_timeid != NULL) { 8853 timeout_id_t temp_id = un->un_direct_priority_timeid; 8854 un->un_direct_priority_timeid = NULL; 8855 mutex_exit(SD_MUTEX(un)); 8856 (void) untimeout(temp_id); 8857 mutex_enter(SD_MUTEX(un)); 8858 } 8859 8860 /* Cancel any active multi-host disk watch thread requests */ 8861 if (un->un_mhd_token != NULL) { 8862 mutex_exit(SD_MUTEX(un)); 8863 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8864 if (scsi_watch_request_terminate(un->un_mhd_token, 8865 SCSI_WATCH_TERMINATE_NOWAIT)) { 8866 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8867 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8868 /* 8869 * Note: We are returning here after having removed 8870 * some driver timeouts above. This is consistent with 8871 * the legacy implementation but perhaps the watch 8872 * terminate call should be made with the wait flag set. 8873 */ 8874 goto err_stillbusy; 8875 } 8876 mutex_enter(SD_MUTEX(un)); 8877 un->un_mhd_token = NULL; 8878 } 8879 8880 if (un->un_swr_token != NULL) { 8881 mutex_exit(SD_MUTEX(un)); 8882 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8883 if (scsi_watch_request_terminate(un->un_swr_token, 8884 SCSI_WATCH_TERMINATE_NOWAIT)) { 8885 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8886 "sd_dr_detach: Cannot cancel swr watch request\n"); 8887 /* 8888 * Note: We are returning here after having removed 8889 * some driver timeouts above. This is consistent with 8890 * the legacy implementation but perhaps the watch 8891 * terminate call should be made with the wait flag set. 8892 */ 8893 goto err_stillbusy; 8894 } 8895 mutex_enter(SD_MUTEX(un)); 8896 un->un_swr_token = NULL; 8897 } 8898 8899 mutex_exit(SD_MUTEX(un)); 8900 8901 /* 8902 * Clear any scsi_reset_notifies. We clear the reset notifies 8903 * if we have not registered one. 8904 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8905 */ 8906 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8907 sd_mhd_reset_notify_cb, (caddr_t)un); 8908 8909 8910 8911 #if defined(__i386) || defined(__amd64) 8912 /* 8913 * Gratuitous bus resets sometimes cause an otherwise 8914 * okay ATA/ATAPI bus to hang. This is due the lack of 8915 * a clear spec of how resets should be implemented by ATA 8916 * disk drives. 8917 */ 8918 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8919 /* 8920 * Reset target/bus. 8921 * 8922 * Note: This is a legacy workaround for Elite III dual-port drives that 8923 * will not come online after an aborted detach and subsequent re-attach 8924 * It should be removed when the Elite III FW is fixed, or the drives 8925 * are no longer supported. 8926 */ 8927 if (un->un_f_cfg_is_atapi == FALSE) { 8928 reset_retval = 0; 8929 8930 /* If the device is in low power mode don't reset it */ 8931 8932 mutex_enter(&un->un_pm_mutex); 8933 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8934 /* 8935 * First try a LUN reset if we can, then move on to a 8936 * target reset if needed; swat the bus as a last 8937 * resort. 8938 */ 8939 mutex_exit(&un->un_pm_mutex); 8940 if (un->un_f_allow_bus_device_reset == TRUE) { 8941 if (un->un_f_lun_reset_enabled == TRUE) { 8942 reset_retval = 8943 scsi_reset(SD_ADDRESS(un), 8944 RESET_LUN); 8945 } 8946 if (reset_retval == 0) { 8947 reset_retval = 8948 scsi_reset(SD_ADDRESS(un), 8949 RESET_TARGET); 8950 } 8951 } 8952 if (reset_retval == 0) { 8953 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8954 } 8955 } else { 8956 mutex_exit(&un->un_pm_mutex); 8957 } 8958 } 8959 #endif 8960 8961 /* 8962 * protect the timeout pointers from getting nulled by 8963 * their callback functions during the cancellation process. 8964 * In such a scenario untimeout can be invoked with a null value. 8965 */ 8966 _NOTE(NO_COMPETING_THREADS_NOW); 8967 8968 mutex_enter(&un->un_pm_mutex); 8969 if (un->un_pm_idle_timeid != NULL) { 8970 timeout_id_t temp_id = un->un_pm_idle_timeid; 8971 un->un_pm_idle_timeid = NULL; 8972 mutex_exit(&un->un_pm_mutex); 8973 8974 /* 8975 * Timeout is active; cancel it. 8976 * Note that it'll never be active on a device 8977 * that does not support PM therefore we don't 8978 * have to check before calling pm_idle_component. 8979 */ 8980 (void) untimeout(temp_id); 8981 (void) pm_idle_component(SD_DEVINFO(un), 0); 8982 mutex_enter(&un->un_pm_mutex); 8983 } 8984 8985 /* 8986 * Check whether there is already a timeout scheduled for power 8987 * management. If yes then don't lower the power here, that's. 8988 * the timeout handler's job. 8989 */ 8990 if (un->un_pm_timeid != NULL) { 8991 timeout_id_t temp_id = un->un_pm_timeid; 8992 un->un_pm_timeid = NULL; 8993 mutex_exit(&un->un_pm_mutex); 8994 /* 8995 * Timeout is active; cancel it. 8996 * Note that it'll never be active on a device 8997 * that does not support PM therefore we don't 8998 * have to check before calling pm_idle_component. 8999 */ 9000 (void) untimeout(temp_id); 9001 (void) pm_idle_component(SD_DEVINFO(un), 0); 9002 9003 } else { 9004 mutex_exit(&un->un_pm_mutex); 9005 if ((un->un_f_pm_is_enabled == TRUE) && 9006 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 9007 DDI_SUCCESS)) { 9008 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9009 "sd_dr_detach: Lower power request failed, ignoring.\n"); 9010 /* 9011 * Fix for bug: 4297749, item # 13 9012 * The above test now includes a check to see if PM is 9013 * supported by this device before call 9014 * pm_lower_power(). 9015 * Note, the following is not dead code. The call to 9016 * pm_lower_power above will generate a call back into 9017 * our sdpower routine which might result in a timeout 9018 * handler getting activated. Therefore the following 9019 * code is valid and necessary. 9020 */ 9021 mutex_enter(&un->un_pm_mutex); 9022 if (un->un_pm_timeid != NULL) { 9023 timeout_id_t temp_id = un->un_pm_timeid; 9024 un->un_pm_timeid = NULL; 9025 mutex_exit(&un->un_pm_mutex); 9026 (void) untimeout(temp_id); 9027 (void) pm_idle_component(SD_DEVINFO(un), 0); 9028 } else { 9029 mutex_exit(&un->un_pm_mutex); 9030 } 9031 } 9032 } 9033 9034 /* 9035 * Cleanup from the scsi_ifsetcap() calls (437868) 9036 * Relocated here from above to be after the call to 9037 * pm_lower_power, which was getting errors. 9038 */ 9039 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 9040 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 9041 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 9042 9043 if (un->un_f_is_fibre == FALSE) { 9044 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 9045 } 9046 9047 /* 9048 * Remove any event callbacks, fibre only 9049 */ 9050 if (un->un_f_is_fibre == TRUE) { 9051 if ((un->un_insert_event != NULL) && 9052 (ddi_remove_event_handler(un->un_insert_cb_id) != 9053 DDI_SUCCESS)) { 9054 /* 9055 * Note: We are returning here after having done 9056 * substantial cleanup above. This is consistent 9057 * with the legacy implementation but this may not 9058 * be the right thing to do. 9059 */ 9060 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9061 "sd_dr_detach: Cannot cancel insert event\n"); 9062 goto err_remove_event; 9063 } 9064 un->un_insert_event = NULL; 9065 9066 if ((un->un_remove_event != NULL) && 9067 (ddi_remove_event_handler(un->un_remove_cb_id) != 9068 DDI_SUCCESS)) { 9069 /* 9070 * Note: We are returning here after having done 9071 * substantial cleanup above. This is consistent 9072 * with the legacy implementation but this may not 9073 * be the right thing to do. 9074 */ 9075 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9076 "sd_dr_detach: Cannot cancel remove event\n"); 9077 goto err_remove_event; 9078 } 9079 un->un_remove_event = NULL; 9080 } 9081 9082 /* Do not free the softstate if the callback routine is active */ 9083 sd_sync_with_callback(un); 9084 9085 /* 9086 * Hold the detach mutex here, to make sure that no other threads ever 9087 * can access a (partially) freed soft state structure. 9088 */ 9089 mutex_enter(&sd_detach_mutex); 9090 9091 /* 9092 * Clean up the soft state struct. 9093 * Cleanup is done in reverse order of allocs/inits. 9094 * At this point there should be no competing threads anymore. 9095 */ 9096 9097 /* Unregister and free device id. */ 9098 ddi_devid_unregister(devi); 9099 if (un->un_devid) { 9100 ddi_devid_free(un->un_devid); 9101 un->un_devid = NULL; 9102 } 9103 9104 /* 9105 * Destroy wmap cache if it exists. 9106 */ 9107 if (un->un_wm_cache != NULL) { 9108 kmem_cache_destroy(un->un_wm_cache); 9109 un->un_wm_cache = NULL; 9110 } 9111 9112 /* Remove minor nodes */ 9113 ddi_remove_minor_node(devi, NULL); 9114 9115 /* 9116 * kstat cleanup is done in detach for all device types (4363169). 9117 * We do not want to fail detach if the device kstats are not deleted 9118 * since there is a confusion about the devo_refcnt for the device. 9119 * We just delete the kstats and let detach complete successfully. 9120 */ 9121 if (un->un_stats != NULL) { 9122 kstat_delete(un->un_stats); 9123 un->un_stats = NULL; 9124 } 9125 if (un->un_errstats != NULL) { 9126 kstat_delete(un->un_errstats); 9127 un->un_errstats = NULL; 9128 } 9129 9130 /* Remove partition stats (not created for removables) */ 9131 if (!ISREMOVABLE(un)) { 9132 for (i = 0; i < NSDMAP; i++) { 9133 if (un->un_pstats[i] != NULL) { 9134 kstat_delete(un->un_pstats[i]); 9135 un->un_pstats[i] = NULL; 9136 } 9137 } 9138 } 9139 9140 /* Remove xbuf registration */ 9141 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9142 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9143 9144 /* Remove driver properties */ 9145 ddi_prop_remove_all(devi); 9146 9147 mutex_destroy(&un->un_pm_mutex); 9148 cv_destroy(&un->un_pm_busy_cv); 9149 9150 /* Open/close semaphore */ 9151 sema_destroy(&un->un_semoclose); 9152 9153 /* Removable media condvar. */ 9154 cv_destroy(&un->un_state_cv); 9155 9156 /* Suspend/resume condvar. */ 9157 cv_destroy(&un->un_suspend_cv); 9158 cv_destroy(&un->un_disk_busy_cv); 9159 9160 sd_free_rqs(un); 9161 9162 /* Free up soft state */ 9163 devp->sd_private = NULL; 9164 bzero(un, sizeof (struct sd_lun)); 9165 ddi_soft_state_free(sd_state, instance); 9166 9167 mutex_exit(&sd_detach_mutex); 9168 9169 /* This frees up the INQUIRY data associated with the device. */ 9170 scsi_unprobe(devp); 9171 9172 return (DDI_SUCCESS); 9173 9174 err_notclosed: 9175 mutex_exit(SD_MUTEX(un)); 9176 9177 err_stillbusy: 9178 _NOTE(NO_COMPETING_THREADS_NOW); 9179 9180 err_remove_event: 9181 mutex_enter(&sd_detach_mutex); 9182 un->un_detach_count--; 9183 mutex_exit(&sd_detach_mutex); 9184 9185 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9186 return (DDI_FAILURE); 9187 } 9188 9189 9190 /* 9191 * Driver minor node structure and data table 9192 */ 9193 struct driver_minor_data { 9194 char *name; 9195 minor_t minor; 9196 int type; 9197 }; 9198 9199 static struct driver_minor_data sd_minor_data[] = { 9200 {"a", 0, S_IFBLK}, 9201 {"b", 1, S_IFBLK}, 9202 {"c", 2, S_IFBLK}, 9203 {"d", 3, S_IFBLK}, 9204 {"e", 4, S_IFBLK}, 9205 {"f", 5, S_IFBLK}, 9206 {"g", 6, S_IFBLK}, 9207 {"h", 7, S_IFBLK}, 9208 #if defined(_SUNOS_VTOC_16) 9209 {"i", 8, S_IFBLK}, 9210 {"j", 9, S_IFBLK}, 9211 {"k", 10, S_IFBLK}, 9212 {"l", 11, S_IFBLK}, 9213 {"m", 12, S_IFBLK}, 9214 {"n", 13, S_IFBLK}, 9215 {"o", 14, S_IFBLK}, 9216 {"p", 15, S_IFBLK}, 9217 #endif /* defined(_SUNOS_VTOC_16) */ 9218 #if defined(_FIRMWARE_NEEDS_FDISK) 9219 {"q", 16, S_IFBLK}, 9220 {"r", 17, S_IFBLK}, 9221 {"s", 18, S_IFBLK}, 9222 {"t", 19, S_IFBLK}, 9223 {"u", 20, S_IFBLK}, 9224 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9225 {"a,raw", 0, S_IFCHR}, 9226 {"b,raw", 1, S_IFCHR}, 9227 {"c,raw", 2, S_IFCHR}, 9228 {"d,raw", 3, S_IFCHR}, 9229 {"e,raw", 4, S_IFCHR}, 9230 {"f,raw", 5, S_IFCHR}, 9231 {"g,raw", 6, S_IFCHR}, 9232 {"h,raw", 7, S_IFCHR}, 9233 #if defined(_SUNOS_VTOC_16) 9234 {"i,raw", 8, S_IFCHR}, 9235 {"j,raw", 9, S_IFCHR}, 9236 {"k,raw", 10, S_IFCHR}, 9237 {"l,raw", 11, S_IFCHR}, 9238 {"m,raw", 12, S_IFCHR}, 9239 {"n,raw", 13, S_IFCHR}, 9240 {"o,raw", 14, S_IFCHR}, 9241 {"p,raw", 15, S_IFCHR}, 9242 #endif /* defined(_SUNOS_VTOC_16) */ 9243 #if defined(_FIRMWARE_NEEDS_FDISK) 9244 {"q,raw", 16, S_IFCHR}, 9245 {"r,raw", 17, S_IFCHR}, 9246 {"s,raw", 18, S_IFCHR}, 9247 {"t,raw", 19, S_IFCHR}, 9248 {"u,raw", 20, S_IFCHR}, 9249 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9250 {0} 9251 }; 9252 9253 static struct driver_minor_data sd_minor_data_efi[] = { 9254 {"a", 0, S_IFBLK}, 9255 {"b", 1, S_IFBLK}, 9256 {"c", 2, S_IFBLK}, 9257 {"d", 3, S_IFBLK}, 9258 {"e", 4, S_IFBLK}, 9259 {"f", 5, S_IFBLK}, 9260 {"g", 6, S_IFBLK}, 9261 {"wd", 7, S_IFBLK}, 9262 #if defined(_FIRMWARE_NEEDS_FDISK) 9263 {"q", 16, S_IFBLK}, 9264 {"r", 17, S_IFBLK}, 9265 {"s", 18, S_IFBLK}, 9266 {"t", 19, S_IFBLK}, 9267 {"u", 20, S_IFBLK}, 9268 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9269 {"a,raw", 0, S_IFCHR}, 9270 {"b,raw", 1, S_IFCHR}, 9271 {"c,raw", 2, S_IFCHR}, 9272 {"d,raw", 3, S_IFCHR}, 9273 {"e,raw", 4, S_IFCHR}, 9274 {"f,raw", 5, S_IFCHR}, 9275 {"g,raw", 6, S_IFCHR}, 9276 {"wd,raw", 7, S_IFCHR}, 9277 #if defined(_FIRMWARE_NEEDS_FDISK) 9278 {"q,raw", 16, S_IFCHR}, 9279 {"r,raw", 17, S_IFCHR}, 9280 {"s,raw", 18, S_IFCHR}, 9281 {"t,raw", 19, S_IFCHR}, 9282 {"u,raw", 20, S_IFCHR}, 9283 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9284 {0} 9285 }; 9286 9287 9288 /* 9289 * Function: sd_create_minor_nodes 9290 * 9291 * Description: Create the minor device nodes for the instance. 9292 * 9293 * Arguments: un - driver soft state (unit) structure 9294 * devi - pointer to device info structure 9295 * 9296 * Return Code: DDI_SUCCESS 9297 * DDI_FAILURE 9298 * 9299 * Context: Kernel thread context 9300 */ 9301 9302 static int 9303 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9304 { 9305 struct driver_minor_data *dmdp; 9306 struct scsi_device *devp; 9307 int instance; 9308 char name[48]; 9309 9310 ASSERT(un != NULL); 9311 devp = ddi_get_driver_private(devi); 9312 instance = ddi_get_instance(devp->sd_dev); 9313 9314 /* 9315 * Create all the minor nodes for this target. 9316 */ 9317 if (un->un_blockcount > DK_MAX_BLOCKS) 9318 dmdp = sd_minor_data_efi; 9319 else 9320 dmdp = sd_minor_data; 9321 while (dmdp->name != NULL) { 9322 9323 (void) sprintf(name, "%s", dmdp->name); 9324 9325 if (ddi_create_minor_node(devi, name, dmdp->type, 9326 (instance << SDUNIT_SHIFT) | dmdp->minor, 9327 un->un_node_type, NULL) == DDI_FAILURE) { 9328 /* 9329 * Clean up any nodes that may have been created, in 9330 * case this fails in the middle of the loop. 9331 */ 9332 ddi_remove_minor_node(devi, NULL); 9333 return (DDI_FAILURE); 9334 } 9335 dmdp++; 9336 } 9337 9338 return (DDI_SUCCESS); 9339 } 9340 9341 9342 /* 9343 * Function: sd_create_errstats 9344 * 9345 * Description: This routine instantiates the device error stats. 9346 * 9347 * Note: During attach the stats are instantiated first so they are 9348 * available for attach-time routines that utilize the driver 9349 * iopath to send commands to the device. The stats are initialized 9350 * separately so data obtained during some attach-time routines is 9351 * available. (4362483) 9352 * 9353 * Arguments: un - driver soft state (unit) structure 9354 * instance - driver instance 9355 * 9356 * Context: Kernel thread context 9357 */ 9358 9359 static void 9360 sd_create_errstats(struct sd_lun *un, int instance) 9361 { 9362 struct sd_errstats *stp; 9363 char kstatmodule_err[KSTAT_STRLEN]; 9364 char kstatname[KSTAT_STRLEN]; 9365 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9366 9367 ASSERT(un != NULL); 9368 9369 if (un->un_errstats != NULL) { 9370 return; 9371 } 9372 9373 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9374 "%serr", sd_label); 9375 (void) snprintf(kstatname, sizeof (kstatname), 9376 "%s%d,err", sd_label, instance); 9377 9378 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9379 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9380 9381 if (un->un_errstats == NULL) { 9382 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9383 "sd_create_errstats: Failed kstat_create\n"); 9384 return; 9385 } 9386 9387 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9388 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9389 KSTAT_DATA_UINT32); 9390 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9391 KSTAT_DATA_UINT32); 9392 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9393 KSTAT_DATA_UINT32); 9394 kstat_named_init(&stp->sd_vid, "Vendor", 9395 KSTAT_DATA_CHAR); 9396 kstat_named_init(&stp->sd_pid, "Product", 9397 KSTAT_DATA_CHAR); 9398 kstat_named_init(&stp->sd_revision, "Revision", 9399 KSTAT_DATA_CHAR); 9400 kstat_named_init(&stp->sd_serial, "Serial No", 9401 KSTAT_DATA_CHAR); 9402 kstat_named_init(&stp->sd_capacity, "Size", 9403 KSTAT_DATA_ULONGLONG); 9404 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9405 KSTAT_DATA_UINT32); 9406 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9407 KSTAT_DATA_UINT32); 9408 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9409 KSTAT_DATA_UINT32); 9410 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9411 KSTAT_DATA_UINT32); 9412 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9413 KSTAT_DATA_UINT32); 9414 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9415 KSTAT_DATA_UINT32); 9416 9417 un->un_errstats->ks_private = un; 9418 un->un_errstats->ks_update = nulldev; 9419 9420 kstat_install(un->un_errstats); 9421 } 9422 9423 9424 /* 9425 * Function: sd_set_errstats 9426 * 9427 * Description: This routine sets the value of the vendor id, product id, 9428 * revision, serial number, and capacity device error stats. 9429 * 9430 * Note: During attach the stats are instantiated first so they are 9431 * available for attach-time routines that utilize the driver 9432 * iopath to send commands to the device. The stats are initialized 9433 * separately so data obtained during some attach-time routines is 9434 * available. (4362483) 9435 * 9436 * Arguments: un - driver soft state (unit) structure 9437 * 9438 * Context: Kernel thread context 9439 */ 9440 9441 static void 9442 sd_set_errstats(struct sd_lun *un) 9443 { 9444 struct sd_errstats *stp; 9445 9446 ASSERT(un != NULL); 9447 ASSERT(un->un_errstats != NULL); 9448 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9449 ASSERT(stp != NULL); 9450 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9451 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9452 (void) strncpy(stp->sd_revision.value.c, 9453 un->un_sd->sd_inq->inq_revision, 4); 9454 9455 /* 9456 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9457 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9458 * (4376302)) 9459 */ 9460 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9461 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9462 sizeof (SD_INQUIRY(un)->inq_serial)); 9463 } 9464 9465 if (un->un_f_blockcount_is_valid != TRUE) { 9466 /* 9467 * Set capacity error stat to 0 for no media. This ensures 9468 * a valid capacity is displayed in response to 'iostat -E' 9469 * when no media is present in the device. 9470 */ 9471 stp->sd_capacity.value.ui64 = 0; 9472 } else { 9473 /* 9474 * Multiply un_blockcount by un->un_sys_blocksize to get 9475 * capacity. 9476 * 9477 * Note: for non-512 blocksize devices "un_blockcount" has been 9478 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9479 * (un_tgt_blocksize / un->un_sys_blocksize). 9480 */ 9481 stp->sd_capacity.value.ui64 = (uint64_t) 9482 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9483 } 9484 } 9485 9486 9487 /* 9488 * Function: sd_set_pstats 9489 * 9490 * Description: This routine instantiates and initializes the partition 9491 * stats for each partition with more than zero blocks. 9492 * (4363169) 9493 * 9494 * Arguments: un - driver soft state (unit) structure 9495 * 9496 * Context: Kernel thread context 9497 */ 9498 9499 static void 9500 sd_set_pstats(struct sd_lun *un) 9501 { 9502 char kstatname[KSTAT_STRLEN]; 9503 int instance; 9504 int i; 9505 9506 ASSERT(un != NULL); 9507 9508 instance = ddi_get_instance(SD_DEVINFO(un)); 9509 9510 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9511 for (i = 0; i < NSDMAP; i++) { 9512 if ((un->un_pstats[i] == NULL) && 9513 (un->un_map[i].dkl_nblk != 0)) { 9514 (void) snprintf(kstatname, sizeof (kstatname), 9515 "%s%d,%s", sd_label, instance, 9516 sd_minor_data[i].name); 9517 un->un_pstats[i] = kstat_create(sd_label, 9518 instance, kstatname, "partition", KSTAT_TYPE_IO, 9519 1, KSTAT_FLAG_PERSISTENT); 9520 if (un->un_pstats[i] != NULL) { 9521 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9522 kstat_install(un->un_pstats[i]); 9523 } 9524 } 9525 } 9526 } 9527 9528 9529 #if (defined(__fibre)) 9530 /* 9531 * Function: sd_init_event_callbacks 9532 * 9533 * Description: This routine initializes the insertion and removal event 9534 * callbacks. (fibre only) 9535 * 9536 * Arguments: un - driver soft state (unit) structure 9537 * 9538 * Context: Kernel thread context 9539 */ 9540 9541 static void 9542 sd_init_event_callbacks(struct sd_lun *un) 9543 { 9544 ASSERT(un != NULL); 9545 9546 if ((un->un_insert_event == NULL) && 9547 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9548 &un->un_insert_event) == DDI_SUCCESS)) { 9549 /* 9550 * Add the callback for an insertion event 9551 */ 9552 (void) ddi_add_event_handler(SD_DEVINFO(un), 9553 un->un_insert_event, sd_event_callback, (void *)un, 9554 &(un->un_insert_cb_id)); 9555 } 9556 9557 if ((un->un_remove_event == NULL) && 9558 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9559 &un->un_remove_event) == DDI_SUCCESS)) { 9560 /* 9561 * Add the callback for a removal event 9562 */ 9563 (void) ddi_add_event_handler(SD_DEVINFO(un), 9564 un->un_remove_event, sd_event_callback, (void *)un, 9565 &(un->un_remove_cb_id)); 9566 } 9567 } 9568 9569 9570 /* 9571 * Function: sd_event_callback 9572 * 9573 * Description: This routine handles insert/remove events (photon). The 9574 * state is changed to OFFLINE which can be used to supress 9575 * error msgs. (fibre only) 9576 * 9577 * Arguments: un - driver soft state (unit) structure 9578 * 9579 * Context: Callout thread context 9580 */ 9581 /* ARGSUSED */ 9582 static void 9583 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9584 void *bus_impldata) 9585 { 9586 struct sd_lun *un = (struct sd_lun *)arg; 9587 9588 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9589 if (event == un->un_insert_event) { 9590 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9591 mutex_enter(SD_MUTEX(un)); 9592 if (un->un_state == SD_STATE_OFFLINE) { 9593 if (un->un_last_state != SD_STATE_SUSPENDED) { 9594 un->un_state = un->un_last_state; 9595 } else { 9596 /* 9597 * We have gone through SUSPEND/RESUME while 9598 * we were offline. Restore the last state 9599 */ 9600 un->un_state = un->un_save_state; 9601 } 9602 } 9603 mutex_exit(SD_MUTEX(un)); 9604 9605 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9606 } else if (event == un->un_remove_event) { 9607 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9608 mutex_enter(SD_MUTEX(un)); 9609 /* 9610 * We need to handle an event callback that occurs during 9611 * the suspend operation, since we don't prevent it. 9612 */ 9613 if (un->un_state != SD_STATE_OFFLINE) { 9614 if (un->un_state != SD_STATE_SUSPENDED) { 9615 New_state(un, SD_STATE_OFFLINE); 9616 } else { 9617 un->un_last_state = SD_STATE_OFFLINE; 9618 } 9619 } 9620 mutex_exit(SD_MUTEX(un)); 9621 } else { 9622 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9623 "!Unknown event\n"); 9624 } 9625 9626 } 9627 #endif 9628 9629 9630 /* 9631 * Function: sd_disable_caching() 9632 * 9633 * Description: This routine is the driver entry point for disabling 9634 * read and write caching by modifying the WCE (write cache 9635 * enable) and RCD (read cache disable) bits of mode 9636 * page 8 (MODEPAGE_CACHING). 9637 * 9638 * Arguments: un - driver soft state (unit) structure 9639 * 9640 * Return Code: EIO 9641 * code returned by sd_send_scsi_MODE_SENSE and 9642 * sd_send_scsi_MODE_SELECT 9643 * 9644 * Context: Kernel Thread 9645 */ 9646 9647 static int 9648 sd_disable_caching(struct sd_lun *un) 9649 { 9650 struct mode_caching *mode_caching_page; 9651 uchar_t *header; 9652 size_t buflen; 9653 int hdrlen; 9654 int bd_len; 9655 int rval = 0; 9656 9657 ASSERT(un != NULL); 9658 9659 /* 9660 * Do a test unit ready, otherwise a mode sense may not work if this 9661 * is the first command sent to the device after boot. 9662 */ 9663 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9664 9665 if (un->un_f_cfg_is_atapi == TRUE) { 9666 hdrlen = MODE_HEADER_LENGTH_GRP2; 9667 } else { 9668 hdrlen = MODE_HEADER_LENGTH; 9669 } 9670 9671 /* 9672 * Allocate memory for the retrieved mode page and its headers. Set 9673 * a pointer to the page itself. 9674 */ 9675 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9676 header = kmem_zalloc(buflen, KM_SLEEP); 9677 9678 /* Get the information from the device. */ 9679 if (un->un_f_cfg_is_atapi == TRUE) { 9680 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9681 MODEPAGE_CACHING, SD_PATH_DIRECT); 9682 } else { 9683 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9684 MODEPAGE_CACHING, SD_PATH_DIRECT); 9685 } 9686 if (rval != 0) { 9687 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9688 "sd_disable_caching: Mode Sense Failed\n"); 9689 kmem_free(header, buflen); 9690 return (rval); 9691 } 9692 9693 /* 9694 * Determine size of Block Descriptors in order to locate 9695 * the mode page data. ATAPI devices return 0, SCSI devices 9696 * should return MODE_BLK_DESC_LENGTH. 9697 */ 9698 if (un->un_f_cfg_is_atapi == TRUE) { 9699 struct mode_header_grp2 *mhp; 9700 mhp = (struct mode_header_grp2 *)header; 9701 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9702 } else { 9703 bd_len = ((struct mode_header *)header)->bdesc_length; 9704 } 9705 9706 if (bd_len > MODE_BLK_DESC_LENGTH) { 9707 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9708 "sd_disable_caching: Mode Sense returned invalid " 9709 "block descriptor length\n"); 9710 kmem_free(header, buflen); 9711 return (EIO); 9712 } 9713 9714 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9715 9716 /* Check the relevant bits on successful mode sense. */ 9717 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9718 /* 9719 * Read or write caching is enabled. Disable both of them. 9720 */ 9721 mode_caching_page->wce = 0; 9722 mode_caching_page->rcd = 1; 9723 9724 /* Clear reserved bits before mode select. */ 9725 mode_caching_page->mode_page.ps = 0; 9726 9727 /* 9728 * Clear out mode header for mode select. 9729 * The rest of the retrieved page will be reused. 9730 */ 9731 bzero(header, hdrlen); 9732 9733 /* Change the cache page to disable all caching. */ 9734 if (un->un_f_cfg_is_atapi == TRUE) { 9735 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9736 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9737 } else { 9738 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9739 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9740 } 9741 } 9742 9743 kmem_free(header, buflen); 9744 return (rval); 9745 } 9746 9747 9748 /* 9749 * Function: sd_get_write_cache_enabled() 9750 * 9751 * Description: This routine is the driver entry point for determining if 9752 * write caching is enabled. It examines the WCE (write cache 9753 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9754 * 9755 * Arguments: un - driver soft state (unit) structure 9756 * is_enabled - pointer to int where write cache enabled state 9757 * is returned (non-zero -> write cache enabled) 9758 * 9759 * 9760 * Return Code: EIO 9761 * code returned by sd_send_scsi_MODE_SENSE 9762 * 9763 * Context: Kernel Thread 9764 * 9765 * NOTE: If ioctl is added to disable write cache, this sequence should 9766 * be followed so that no locking is required for accesses to 9767 * un->un_f_write_cache_enabled: 9768 * do mode select to clear wce 9769 * do synchronize cache to flush cache 9770 * set un->un_f_write_cache_enabled = FALSE 9771 * 9772 * Conversely, an ioctl to enable the write cache should be done 9773 * in this order: 9774 * set un->un_f_write_cache_enabled = TRUE 9775 * do mode select to set wce 9776 */ 9777 9778 static int 9779 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 9780 { 9781 struct mode_caching *mode_caching_page; 9782 uchar_t *header; 9783 size_t buflen; 9784 int hdrlen; 9785 int bd_len; 9786 int rval = 0; 9787 9788 ASSERT(un != NULL); 9789 ASSERT(is_enabled != NULL); 9790 9791 /* in case of error, flag as enabled */ 9792 *is_enabled = TRUE; 9793 9794 /* 9795 * Do a test unit ready, otherwise a mode sense may not work if this 9796 * is the first command sent to the device after boot. 9797 */ 9798 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9799 9800 if (un->un_f_cfg_is_atapi == TRUE) { 9801 hdrlen = MODE_HEADER_LENGTH_GRP2; 9802 } else { 9803 hdrlen = MODE_HEADER_LENGTH; 9804 } 9805 9806 /* 9807 * Allocate memory for the retrieved mode page and its headers. Set 9808 * a pointer to the page itself. 9809 */ 9810 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9811 header = kmem_zalloc(buflen, KM_SLEEP); 9812 9813 /* Get the information from the device. */ 9814 if (un->un_f_cfg_is_atapi == TRUE) { 9815 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9816 MODEPAGE_CACHING, SD_PATH_DIRECT); 9817 } else { 9818 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9819 MODEPAGE_CACHING, SD_PATH_DIRECT); 9820 } 9821 if (rval != 0) { 9822 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9823 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9824 kmem_free(header, buflen); 9825 return (rval); 9826 } 9827 9828 /* 9829 * Determine size of Block Descriptors in order to locate 9830 * the mode page data. ATAPI devices return 0, SCSI devices 9831 * should return MODE_BLK_DESC_LENGTH. 9832 */ 9833 if (un->un_f_cfg_is_atapi == TRUE) { 9834 struct mode_header_grp2 *mhp; 9835 mhp = (struct mode_header_grp2 *)header; 9836 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9837 } else { 9838 bd_len = ((struct mode_header *)header)->bdesc_length; 9839 } 9840 9841 if (bd_len > MODE_BLK_DESC_LENGTH) { 9842 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9843 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9844 "block descriptor length\n"); 9845 kmem_free(header, buflen); 9846 return (EIO); 9847 } 9848 9849 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9850 *is_enabled = mode_caching_page->wce; 9851 9852 kmem_free(header, buflen); 9853 return (0); 9854 } 9855 9856 9857 /* 9858 * Function: sd_make_device 9859 * 9860 * Description: Utility routine to return the Solaris device number from 9861 * the data in the device's dev_info structure. 9862 * 9863 * Return Code: The Solaris device number 9864 * 9865 * Context: Any 9866 */ 9867 9868 static dev_t 9869 sd_make_device(dev_info_t *devi) 9870 { 9871 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9872 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9873 } 9874 9875 9876 /* 9877 * Function: sd_pm_entry 9878 * 9879 * Description: Called at the start of a new command to manage power 9880 * and busy status of a device. This includes determining whether 9881 * the current power state of the device is sufficient for 9882 * performing the command or whether it must be changed. 9883 * The PM framework is notified appropriately. 9884 * Only with a return status of DDI_SUCCESS will the 9885 * component be busy to the framework. 9886 * 9887 * All callers of sd_pm_entry must check the return status 9888 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9889 * of DDI_FAILURE indicates the device failed to power up. 9890 * In this case un_pm_count has been adjusted so the result 9891 * on exit is still powered down, ie. count is less than 0. 9892 * Calling sd_pm_exit with this count value hits an ASSERT. 9893 * 9894 * Return Code: DDI_SUCCESS or DDI_FAILURE 9895 * 9896 * Context: Kernel thread context. 9897 */ 9898 9899 static int 9900 sd_pm_entry(struct sd_lun *un) 9901 { 9902 int return_status = DDI_SUCCESS; 9903 9904 ASSERT(!mutex_owned(SD_MUTEX(un))); 9905 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9906 9907 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9908 9909 if (un->un_f_pm_is_enabled == FALSE) { 9910 SD_TRACE(SD_LOG_IO_PM, un, 9911 "sd_pm_entry: exiting, PM not enabled\n"); 9912 return (return_status); 9913 } 9914 9915 /* 9916 * Just increment a counter if PM is enabled. On the transition from 9917 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9918 * the count with each IO and mark the device as idle when the count 9919 * hits 0. 9920 * 9921 * If the count is less than 0 the device is powered down. If a powered 9922 * down device is successfully powered up then the count must be 9923 * incremented to reflect the power up. Note that it'll get incremented 9924 * a second time to become busy. 9925 * 9926 * Because the following has the potential to change the device state 9927 * and must release the un_pm_mutex to do so, only one thread can be 9928 * allowed through at a time. 9929 */ 9930 9931 mutex_enter(&un->un_pm_mutex); 9932 while (un->un_pm_busy == TRUE) { 9933 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9934 } 9935 un->un_pm_busy = TRUE; 9936 9937 if (un->un_pm_count < 1) { 9938 9939 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9940 9941 /* 9942 * Indicate we are now busy so the framework won't attempt to 9943 * power down the device. This call will only fail if either 9944 * we passed a bad component number or the device has no 9945 * components. Neither of these should ever happen. 9946 */ 9947 mutex_exit(&un->un_pm_mutex); 9948 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9949 ASSERT(return_status == DDI_SUCCESS); 9950 9951 mutex_enter(&un->un_pm_mutex); 9952 9953 if (un->un_pm_count < 0) { 9954 mutex_exit(&un->un_pm_mutex); 9955 9956 SD_TRACE(SD_LOG_IO_PM, un, 9957 "sd_pm_entry: power up component\n"); 9958 9959 /* 9960 * pm_raise_power will cause sdpower to be called 9961 * which brings the device power level to the 9962 * desired state, ON in this case. If successful, 9963 * un_pm_count and un_power_level will be updated 9964 * appropriately. 9965 */ 9966 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9967 SD_SPINDLE_ON); 9968 9969 mutex_enter(&un->un_pm_mutex); 9970 9971 if (return_status != DDI_SUCCESS) { 9972 /* 9973 * Power up failed. 9974 * Idle the device and adjust the count 9975 * so the result on exit is that we're 9976 * still powered down, ie. count is less than 0. 9977 */ 9978 SD_TRACE(SD_LOG_IO_PM, un, 9979 "sd_pm_entry: power up failed," 9980 " idle the component\n"); 9981 9982 (void) pm_idle_component(SD_DEVINFO(un), 0); 9983 un->un_pm_count--; 9984 } else { 9985 /* 9986 * Device is powered up, verify the 9987 * count is non-negative. 9988 * This is debug only. 9989 */ 9990 ASSERT(un->un_pm_count == 0); 9991 } 9992 } 9993 9994 if (return_status == DDI_SUCCESS) { 9995 /* 9996 * For performance, now that the device has been tagged 9997 * as busy, and it's known to be powered up, update the 9998 * chain types to use jump tables that do not include 9999 * pm. This significantly lowers the overhead and 10000 * therefore improves performance. 10001 */ 10002 10003 mutex_exit(&un->un_pm_mutex); 10004 mutex_enter(SD_MUTEX(un)); 10005 SD_TRACE(SD_LOG_IO_PM, un, 10006 "sd_pm_entry: changing uscsi_chain_type from %d\n", 10007 un->un_uscsi_chain_type); 10008 10009 if (ISREMOVABLE(un)) { 10010 un->un_buf_chain_type = 10011 SD_CHAIN_INFO_RMMEDIA_NO_PM; 10012 } else { 10013 un->un_buf_chain_type = 10014 SD_CHAIN_INFO_DISK_NO_PM; 10015 } 10016 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 10017 10018 SD_TRACE(SD_LOG_IO_PM, un, 10019 " changed uscsi_chain_type to %d\n", 10020 un->un_uscsi_chain_type); 10021 mutex_exit(SD_MUTEX(un)); 10022 mutex_enter(&un->un_pm_mutex); 10023 10024 if (un->un_pm_idle_timeid == NULL) { 10025 /* 300 ms. */ 10026 un->un_pm_idle_timeid = 10027 timeout(sd_pm_idletimeout_handler, un, 10028 (drv_usectohz((clock_t)300000))); 10029 /* 10030 * Include an extra call to busy which keeps the 10031 * device busy with-respect-to the PM layer 10032 * until the timer fires, at which time it'll 10033 * get the extra idle call. 10034 */ 10035 (void) pm_busy_component(SD_DEVINFO(un), 0); 10036 } 10037 } 10038 } 10039 un->un_pm_busy = FALSE; 10040 /* Next... */ 10041 cv_signal(&un->un_pm_busy_cv); 10042 10043 un->un_pm_count++; 10044 10045 SD_TRACE(SD_LOG_IO_PM, un, 10046 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 10047 10048 mutex_exit(&un->un_pm_mutex); 10049 10050 return (return_status); 10051 } 10052 10053 10054 /* 10055 * Function: sd_pm_exit 10056 * 10057 * Description: Called at the completion of a command to manage busy 10058 * status for the device. If the device becomes idle the 10059 * PM framework is notified. 10060 * 10061 * Context: Kernel thread context 10062 */ 10063 10064 static void 10065 sd_pm_exit(struct sd_lun *un) 10066 { 10067 ASSERT(!mutex_owned(SD_MUTEX(un))); 10068 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10069 10070 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10071 10072 /* 10073 * After attach the following flag is only read, so don't 10074 * take the penalty of acquiring a mutex for it. 10075 */ 10076 if (un->un_f_pm_is_enabled == TRUE) { 10077 10078 mutex_enter(&un->un_pm_mutex); 10079 un->un_pm_count--; 10080 10081 SD_TRACE(SD_LOG_IO_PM, un, 10082 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10083 10084 ASSERT(un->un_pm_count >= 0); 10085 if (un->un_pm_count == 0) { 10086 mutex_exit(&un->un_pm_mutex); 10087 10088 SD_TRACE(SD_LOG_IO_PM, un, 10089 "sd_pm_exit: idle component\n"); 10090 10091 (void) pm_idle_component(SD_DEVINFO(un), 0); 10092 10093 } else { 10094 mutex_exit(&un->un_pm_mutex); 10095 } 10096 } 10097 10098 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10099 } 10100 10101 10102 /* 10103 * Function: sdopen 10104 * 10105 * Description: Driver's open(9e) entry point function. 10106 * 10107 * Arguments: dev_i - pointer to device number 10108 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10109 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10110 * cred_p - user credential pointer 10111 * 10112 * Return Code: EINVAL 10113 * ENXIO 10114 * EIO 10115 * EROFS 10116 * EBUSY 10117 * 10118 * Context: Kernel thread context 10119 */ 10120 /* ARGSUSED */ 10121 static int 10122 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10123 { 10124 struct sd_lun *un; 10125 int nodelay; 10126 int part; 10127 uint64_t partmask; 10128 int instance; 10129 dev_t dev; 10130 int rval = EIO; 10131 10132 /* Validate the open type */ 10133 if (otyp >= OTYPCNT) { 10134 return (EINVAL); 10135 } 10136 10137 dev = *dev_p; 10138 instance = SDUNIT(dev); 10139 mutex_enter(&sd_detach_mutex); 10140 10141 /* 10142 * Fail the open if there is no softstate for the instance, or 10143 * if another thread somewhere is trying to detach the instance. 10144 */ 10145 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10146 (un->un_detach_count != 0)) { 10147 mutex_exit(&sd_detach_mutex); 10148 /* 10149 * The probe cache only needs to be cleared when open (9e) fails 10150 * with ENXIO (4238046). 10151 */ 10152 /* 10153 * un-conditionally clearing probe cache is ok with 10154 * separate sd/ssd binaries 10155 * x86 platform can be an issue with both parallel 10156 * and fibre in 1 binary 10157 */ 10158 sd_scsi_clear_probe_cache(); 10159 return (ENXIO); 10160 } 10161 10162 /* 10163 * The un_layer_count is to prevent another thread in specfs from 10164 * trying to detach the instance, which can happen when we are 10165 * called from a higher-layer driver instead of thru specfs. 10166 * This will not be needed when DDI provides a layered driver 10167 * interface that allows specfs to know that an instance is in 10168 * use by a layered driver & should not be detached. 10169 * 10170 * Note: the semantics for layered driver opens are exactly one 10171 * close for every open. 10172 */ 10173 if (otyp == OTYP_LYR) { 10174 un->un_layer_count++; 10175 } 10176 10177 /* 10178 * Keep a count of the current # of opens in progress. This is because 10179 * some layered drivers try to call us as a regular open. This can 10180 * cause problems that we cannot prevent, however by keeping this count 10181 * we can at least keep our open and detach routines from racing against 10182 * each other under such conditions. 10183 */ 10184 un->un_opens_in_progress++; 10185 mutex_exit(&sd_detach_mutex); 10186 10187 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10188 part = SDPART(dev); 10189 partmask = 1 << part; 10190 10191 /* 10192 * We use a semaphore here in order to serialize 10193 * open and close requests on the device. 10194 */ 10195 sema_p(&un->un_semoclose); 10196 10197 mutex_enter(SD_MUTEX(un)); 10198 10199 /* 10200 * All device accesses go thru sdstrategy() where we check 10201 * on suspend status but there could be a scsi_poll command, 10202 * which bypasses sdstrategy(), so we need to check pm 10203 * status. 10204 */ 10205 10206 if (!nodelay) { 10207 while ((un->un_state == SD_STATE_SUSPENDED) || 10208 (un->un_state == SD_STATE_PM_CHANGING)) { 10209 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10210 } 10211 10212 mutex_exit(SD_MUTEX(un)); 10213 if (sd_pm_entry(un) != DDI_SUCCESS) { 10214 rval = EIO; 10215 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10216 "sdopen: sd_pm_entry failed\n"); 10217 goto open_failed_with_pm; 10218 } 10219 mutex_enter(SD_MUTEX(un)); 10220 } 10221 10222 /* check for previous exclusive open */ 10223 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10224 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10225 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10226 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10227 10228 if (un->un_exclopen & (partmask)) { 10229 goto excl_open_fail; 10230 } 10231 10232 if (flag & FEXCL) { 10233 int i; 10234 if (un->un_ocmap.lyropen[part]) { 10235 goto excl_open_fail; 10236 } 10237 for (i = 0; i < (OTYPCNT - 1); i++) { 10238 if (un->un_ocmap.regopen[i] & (partmask)) { 10239 goto excl_open_fail; 10240 } 10241 } 10242 } 10243 10244 /* 10245 * Check the write permission if this is a removable media device, 10246 * NDELAY has not been set, and writable permission is requested. 10247 * 10248 * Note: If NDELAY was set and this is write-protected media the WRITE 10249 * attempt will fail with EIO as part of the I/O processing. This is a 10250 * more permissive implementation that allows the open to succeed and 10251 * WRITE attempts to fail when appropriate. 10252 */ 10253 if (ISREMOVABLE(un)) { 10254 if ((flag & FWRITE) && (!nodelay)) { 10255 mutex_exit(SD_MUTEX(un)); 10256 /* 10257 * Defer the check for write permission on writable 10258 * DVD drive till sdstrategy and will not fail open even 10259 * if FWRITE is set as the device can be writable 10260 * depending upon the media and the media can change 10261 * after the call to open(). 10262 */ 10263 if (un->un_f_dvdram_writable_device == FALSE) { 10264 if (ISCD(un) || sr_check_wp(dev)) { 10265 rval = EROFS; 10266 mutex_enter(SD_MUTEX(un)); 10267 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10268 "write to cd or write protected media\n"); 10269 goto open_fail; 10270 } 10271 } 10272 mutex_enter(SD_MUTEX(un)); 10273 } 10274 } 10275 10276 /* 10277 * If opening in NDELAY/NONBLOCK mode, just return. 10278 * Check if disk is ready and has a valid geometry later. 10279 */ 10280 if (!nodelay) { 10281 mutex_exit(SD_MUTEX(un)); 10282 rval = sd_ready_and_valid(un); 10283 mutex_enter(SD_MUTEX(un)); 10284 /* 10285 * Fail if device is not ready or if the number of disk 10286 * blocks is zero or negative for non CD devices. 10287 */ 10288 if ((rval != SD_READY_VALID) || 10289 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10290 if (ISREMOVABLE(un)) { 10291 rval = ENXIO; 10292 } else { 10293 rval = EIO; 10294 } 10295 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10296 "device not ready or invalid disk block value\n"); 10297 goto open_fail; 10298 } 10299 #if defined(__i386) || defined(__amd64) 10300 } else { 10301 uchar_t *cp; 10302 /* 10303 * x86 requires special nodelay handling, so that p0 is 10304 * always defined and accessible. 10305 * Invalidate geometry only if device is not already open. 10306 */ 10307 cp = &un->un_ocmap.chkd[0]; 10308 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10309 if (*cp != (uchar_t)0) { 10310 break; 10311 } 10312 cp++; 10313 } 10314 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10315 un->un_f_geometry_is_valid = FALSE; 10316 } 10317 10318 #endif 10319 } 10320 10321 if (otyp == OTYP_LYR) { 10322 un->un_ocmap.lyropen[part]++; 10323 } else { 10324 un->un_ocmap.regopen[otyp] |= partmask; 10325 } 10326 10327 /* Set up open and exclusive open flags */ 10328 if (flag & FEXCL) { 10329 un->un_exclopen |= (partmask); 10330 } 10331 10332 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10333 "open of part %d type %d\n", part, otyp); 10334 10335 mutex_exit(SD_MUTEX(un)); 10336 if (!nodelay) { 10337 sd_pm_exit(un); 10338 } 10339 10340 sema_v(&un->un_semoclose); 10341 10342 mutex_enter(&sd_detach_mutex); 10343 un->un_opens_in_progress--; 10344 mutex_exit(&sd_detach_mutex); 10345 10346 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10347 return (DDI_SUCCESS); 10348 10349 excl_open_fail: 10350 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10351 rval = EBUSY; 10352 10353 open_fail: 10354 mutex_exit(SD_MUTEX(un)); 10355 10356 /* 10357 * On a failed open we must exit the pm management. 10358 */ 10359 if (!nodelay) { 10360 sd_pm_exit(un); 10361 } 10362 open_failed_with_pm: 10363 sema_v(&un->un_semoclose); 10364 10365 mutex_enter(&sd_detach_mutex); 10366 un->un_opens_in_progress--; 10367 if (otyp == OTYP_LYR) { 10368 un->un_layer_count--; 10369 } 10370 mutex_exit(&sd_detach_mutex); 10371 10372 return (rval); 10373 } 10374 10375 10376 /* 10377 * Function: sdclose 10378 * 10379 * Description: Driver's close(9e) entry point function. 10380 * 10381 * Arguments: dev - device number 10382 * flag - file status flag, informational only 10383 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10384 * cred_p - user credential pointer 10385 * 10386 * Return Code: ENXIO 10387 * 10388 * Context: Kernel thread context 10389 */ 10390 /* ARGSUSED */ 10391 static int 10392 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10393 { 10394 struct sd_lun *un; 10395 uchar_t *cp; 10396 int part; 10397 int nodelay; 10398 int rval = 0; 10399 10400 /* Validate the open type */ 10401 if (otyp >= OTYPCNT) { 10402 return (ENXIO); 10403 } 10404 10405 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10406 return (ENXIO); 10407 } 10408 10409 part = SDPART(dev); 10410 nodelay = flag & (FNDELAY | FNONBLOCK); 10411 10412 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10413 "sdclose: close of part %d type %d\n", part, otyp); 10414 10415 /* 10416 * We use a semaphore here in order to serialize 10417 * open and close requests on the device. 10418 */ 10419 sema_p(&un->un_semoclose); 10420 10421 mutex_enter(SD_MUTEX(un)); 10422 10423 /* Don't proceed if power is being changed. */ 10424 while (un->un_state == SD_STATE_PM_CHANGING) { 10425 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10426 } 10427 10428 if (un->un_exclopen & (1 << part)) { 10429 un->un_exclopen &= ~(1 << part); 10430 } 10431 10432 /* Update the open partition map */ 10433 if (otyp == OTYP_LYR) { 10434 un->un_ocmap.lyropen[part] -= 1; 10435 } else { 10436 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10437 } 10438 10439 cp = &un->un_ocmap.chkd[0]; 10440 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10441 if (*cp != NULL) { 10442 break; 10443 } 10444 cp++; 10445 } 10446 10447 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10448 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10449 10450 /* 10451 * We avoid persistance upon the last close, and set 10452 * the throttle back to the maximum. 10453 */ 10454 un->un_throttle = un->un_saved_throttle; 10455 10456 if (un->un_state == SD_STATE_OFFLINE) { 10457 if (un->un_f_is_fibre == FALSE) { 10458 scsi_log(SD_DEVINFO(un), sd_label, 10459 CE_WARN, "offline\n"); 10460 } 10461 un->un_f_geometry_is_valid = FALSE; 10462 10463 } else { 10464 /* 10465 * Flush any outstanding writes in NVRAM cache. 10466 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10467 * cmd, it may not work for non-Pluto devices. 10468 * SYNCHRONIZE CACHE is not required for removables, 10469 * except DVD-RAM drives. 10470 * 10471 * Also note: because SYNCHRONIZE CACHE is currently 10472 * the only command issued here that requires the 10473 * drive be powered up, only do the power up before 10474 * sending the Sync Cache command. If additional 10475 * commands are added which require a powered up 10476 * drive, the following sequence may have to change. 10477 * 10478 * And finally, note that parallel SCSI on SPARC 10479 * only issues a Sync Cache to DVD-RAM, a newly 10480 * supported device. 10481 */ 10482 #if defined(__i386) || defined(__amd64) 10483 if (!ISREMOVABLE(un) || 10484 un->un_f_dvdram_writable_device == TRUE) { 10485 #else 10486 if (un->un_f_dvdram_writable_device == TRUE) { 10487 #endif 10488 mutex_exit(SD_MUTEX(un)); 10489 if (sd_pm_entry(un) == DDI_SUCCESS) { 10490 rval = 10491 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10492 NULL); 10493 /* ignore error if not supported */ 10494 if (rval == ENOTSUP) { 10495 rval = 0; 10496 } else if (rval != 0) { 10497 rval = EIO; 10498 } 10499 sd_pm_exit(un); 10500 } else { 10501 rval = EIO; 10502 } 10503 mutex_enter(SD_MUTEX(un)); 10504 } 10505 10506 /* 10507 * For removable media devices, send an ALLOW MEDIA 10508 * REMOVAL command, but don't get upset if it fails. 10509 * Also invalidate the geometry. We need to raise 10510 * the power of the drive before we can call 10511 * sd_send_scsi_DOORLOCK() 10512 */ 10513 if (ISREMOVABLE(un)) { 10514 mutex_exit(SD_MUTEX(un)); 10515 if (sd_pm_entry(un) == DDI_SUCCESS) { 10516 rval = sd_send_scsi_DOORLOCK(un, 10517 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10518 10519 sd_pm_exit(un); 10520 if (ISCD(un) && (rval != 0) && 10521 (nodelay != 0)) { 10522 rval = ENXIO; 10523 } 10524 } else { 10525 rval = EIO; 10526 } 10527 mutex_enter(SD_MUTEX(un)); 10528 10529 sr_ejected(un); 10530 /* 10531 * Destroy the cache (if it exists) which was 10532 * allocated for the write maps since this is 10533 * the last close for this media. 10534 */ 10535 if (un->un_wm_cache) { 10536 /* 10537 * Check if there are pending commands. 10538 * and if there are give a warning and 10539 * do not destroy the cache. 10540 */ 10541 if (un->un_ncmds_in_driver > 0) { 10542 scsi_log(SD_DEVINFO(un), 10543 sd_label, CE_WARN, 10544 "Unable to clean up memory " 10545 "because of pending I/O\n"); 10546 } else { 10547 kmem_cache_destroy( 10548 un->un_wm_cache); 10549 un->un_wm_cache = NULL; 10550 } 10551 } 10552 } 10553 } 10554 } 10555 10556 mutex_exit(SD_MUTEX(un)); 10557 sema_v(&un->un_semoclose); 10558 10559 if (otyp == OTYP_LYR) { 10560 mutex_enter(&sd_detach_mutex); 10561 /* 10562 * The detach routine may run when the layer count 10563 * drops to zero. 10564 */ 10565 un->un_layer_count--; 10566 mutex_exit(&sd_detach_mutex); 10567 } 10568 10569 return (rval); 10570 } 10571 10572 10573 /* 10574 * Function: sd_ready_and_valid 10575 * 10576 * Description: Test if device is ready and has a valid geometry. 10577 * 10578 * Arguments: dev - device number 10579 * un - driver soft state (unit) structure 10580 * 10581 * Return Code: SD_READY_VALID ready and valid label 10582 * SD_READY_NOT_VALID ready, geom ops never applicable 10583 * SD_NOT_READY_VALID not ready, no label 10584 * 10585 * Context: Never called at interrupt context. 10586 */ 10587 10588 static int 10589 sd_ready_and_valid(struct sd_lun *un) 10590 { 10591 struct sd_errstats *stp; 10592 uint64_t capacity; 10593 uint_t lbasize; 10594 int rval = SD_READY_VALID; 10595 char name_str[48]; 10596 10597 ASSERT(un != NULL); 10598 ASSERT(!mutex_owned(SD_MUTEX(un))); 10599 10600 mutex_enter(SD_MUTEX(un)); 10601 if (ISREMOVABLE(un)) { 10602 mutex_exit(SD_MUTEX(un)); 10603 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10604 rval = SD_NOT_READY_VALID; 10605 mutex_enter(SD_MUTEX(un)); 10606 goto done; 10607 } 10608 10609 mutex_enter(SD_MUTEX(un)); 10610 if ((un->un_f_geometry_is_valid == FALSE) || 10611 (un->un_f_blockcount_is_valid == FALSE) || 10612 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10613 10614 /* capacity has to be read every open. */ 10615 mutex_exit(SD_MUTEX(un)); 10616 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10617 &lbasize, SD_PATH_DIRECT) != 0) { 10618 mutex_enter(SD_MUTEX(un)); 10619 un->un_f_geometry_is_valid = FALSE; 10620 rval = SD_NOT_READY_VALID; 10621 goto done; 10622 } else { 10623 mutex_enter(SD_MUTEX(un)); 10624 sd_update_block_info(un, lbasize, capacity); 10625 } 10626 } 10627 10628 /* 10629 * If this is a non 512 block device, allocate space for 10630 * the wmap cache. This is being done here since every time 10631 * a media is changed this routine will be called and the 10632 * block size is a function of media rather than device. 10633 */ 10634 if (NOT_DEVBSIZE(un)) { 10635 if (!(un->un_wm_cache)) { 10636 (void) snprintf(name_str, sizeof (name_str), 10637 "%s%d_cache", 10638 ddi_driver_name(SD_DEVINFO(un)), 10639 ddi_get_instance(SD_DEVINFO(un))); 10640 un->un_wm_cache = kmem_cache_create( 10641 name_str, sizeof (struct sd_w_map), 10642 8, sd_wm_cache_constructor, 10643 sd_wm_cache_destructor, NULL, 10644 (void *)un, NULL, 0); 10645 if (!(un->un_wm_cache)) { 10646 rval = ENOMEM; 10647 goto done; 10648 } 10649 } 10650 } 10651 10652 /* 10653 * Check if the media in the device is writable or not. 10654 */ 10655 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10656 sd_check_for_writable_cd(un); 10657 } 10658 10659 } else { 10660 /* 10661 * Do a test unit ready to clear any unit attention from non-cd 10662 * devices. 10663 */ 10664 mutex_exit(SD_MUTEX(un)); 10665 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10666 mutex_enter(SD_MUTEX(un)); 10667 } 10668 10669 10670 if (un->un_state == SD_STATE_NORMAL) { 10671 /* 10672 * If the target is not yet ready here (defined by a TUR 10673 * failure), invalidate the geometry and print an 'offline' 10674 * message. This is a legacy message, as the state of the 10675 * target is not actually changed to SD_STATE_OFFLINE. 10676 * 10677 * If the TUR fails for EACCES (Reservation Conflict), it 10678 * means there actually is nothing wrong with the target that 10679 * would require invalidating the geometry, so continue in 10680 * that case as if the TUR was successful. 10681 */ 10682 int err; 10683 10684 mutex_exit(SD_MUTEX(un)); 10685 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10686 mutex_enter(SD_MUTEX(un)); 10687 10688 if ((err != 0) && (err != EACCES)) { 10689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10690 "offline\n"); 10691 un->un_f_geometry_is_valid = FALSE; 10692 rval = SD_NOT_READY_VALID; 10693 goto done; 10694 } 10695 } 10696 10697 if (un->un_f_format_in_progress == FALSE) { 10698 /* 10699 * Note: sd_validate_geometry may return TRUE, but that does 10700 * not necessarily mean un_f_geometry_is_valid == TRUE! 10701 */ 10702 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10703 if (rval == ENOTSUP) { 10704 if (un->un_f_geometry_is_valid == TRUE) 10705 rval = 0; 10706 else { 10707 rval = SD_READY_NOT_VALID; 10708 goto done; 10709 } 10710 } 10711 if (rval != 0) { 10712 /* 10713 * We don't check the validity of geometry for 10714 * CDROMs. Also we assume we have a good label 10715 * even if sd_validate_geometry returned ENOMEM. 10716 */ 10717 if (!ISCD(un) && rval != ENOMEM) { 10718 rval = SD_NOT_READY_VALID; 10719 goto done; 10720 } 10721 } 10722 } 10723 10724 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10725 /* 10726 * check to see if this disk is write protected, if it is and we have 10727 * not set read-only, then fail 10728 */ 10729 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10730 New_state(un, SD_STATE_CLOSED); 10731 goto done; 10732 } 10733 #endif 10734 10735 /* 10736 * If this is a removable media device, try and send 10737 * a PREVENT MEDIA REMOVAL command, but don't get upset 10738 * if it fails. For a CD, however, it is an error 10739 */ 10740 if (ISREMOVABLE(un)) { 10741 mutex_exit(SD_MUTEX(un)); 10742 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10743 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10744 rval = SD_NOT_READY_VALID; 10745 mutex_enter(SD_MUTEX(un)); 10746 goto done; 10747 } 10748 mutex_enter(SD_MUTEX(un)); 10749 } 10750 10751 /* The state has changed, inform the media watch routines */ 10752 un->un_mediastate = DKIO_INSERTED; 10753 cv_broadcast(&un->un_state_cv); 10754 rval = SD_READY_VALID; 10755 10756 done: 10757 10758 /* 10759 * Initialize the capacity kstat value, if no media previously 10760 * (capacity kstat is 0) and a media has been inserted 10761 * (un_blockcount > 0). 10762 * This is a more generic way then checking for ISREMOVABLE. 10763 */ 10764 if (un->un_errstats != NULL) { 10765 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10766 if ((stp->sd_capacity.value.ui64 == 0) && 10767 (un->un_f_blockcount_is_valid == TRUE)) { 10768 stp->sd_capacity.value.ui64 = 10769 (uint64_t)((uint64_t)un->un_blockcount * 10770 un->un_sys_blocksize); 10771 } 10772 } 10773 10774 mutex_exit(SD_MUTEX(un)); 10775 return (rval); 10776 } 10777 10778 10779 /* 10780 * Function: sdmin 10781 * 10782 * Description: Routine to limit the size of a data transfer. Used in 10783 * conjunction with physio(9F). 10784 * 10785 * Arguments: bp - pointer to the indicated buf(9S) struct. 10786 * 10787 * Context: Kernel thread context. 10788 */ 10789 10790 static void 10791 sdmin(struct buf *bp) 10792 { 10793 struct sd_lun *un; 10794 int instance; 10795 10796 instance = SDUNIT(bp->b_edev); 10797 10798 un = ddi_get_soft_state(sd_state, instance); 10799 ASSERT(un != NULL); 10800 10801 if (bp->b_bcount > un->un_max_xfer_size) { 10802 bp->b_bcount = un->un_max_xfer_size; 10803 } 10804 } 10805 10806 10807 /* 10808 * Function: sdread 10809 * 10810 * Description: Driver's read(9e) entry point function. 10811 * 10812 * Arguments: dev - device number 10813 * uio - structure pointer describing where data is to be stored 10814 * in user's space 10815 * cred_p - user credential pointer 10816 * 10817 * Return Code: ENXIO 10818 * EIO 10819 * EINVAL 10820 * value returned by physio 10821 * 10822 * Context: Kernel thread context. 10823 */ 10824 /* ARGSUSED */ 10825 static int 10826 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10827 { 10828 struct sd_lun *un = NULL; 10829 int secmask; 10830 int err; 10831 10832 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10833 return (ENXIO); 10834 } 10835 10836 ASSERT(!mutex_owned(SD_MUTEX(un))); 10837 10838 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10839 mutex_enter(SD_MUTEX(un)); 10840 /* 10841 * Because the call to sd_ready_and_valid will issue I/O we 10842 * must wait here if either the device is suspended or 10843 * if it's power level is changing. 10844 */ 10845 while ((un->un_state == SD_STATE_SUSPENDED) || 10846 (un->un_state == SD_STATE_PM_CHANGING)) { 10847 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10848 } 10849 un->un_ncmds_in_driver++; 10850 mutex_exit(SD_MUTEX(un)); 10851 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10852 mutex_enter(SD_MUTEX(un)); 10853 un->un_ncmds_in_driver--; 10854 ASSERT(un->un_ncmds_in_driver >= 0); 10855 mutex_exit(SD_MUTEX(un)); 10856 return (EIO); 10857 } 10858 mutex_enter(SD_MUTEX(un)); 10859 un->un_ncmds_in_driver--; 10860 ASSERT(un->un_ncmds_in_driver >= 0); 10861 mutex_exit(SD_MUTEX(un)); 10862 } 10863 10864 /* 10865 * Read requests are restricted to multiples of the system block size. 10866 */ 10867 secmask = un->un_sys_blocksize - 1; 10868 10869 if (uio->uio_loffset & ((offset_t)(secmask))) { 10870 SD_ERROR(SD_LOG_READ_WRITE, un, 10871 "sdread: file offset not modulo %d\n", 10872 un->un_sys_blocksize); 10873 err = EINVAL; 10874 } else if (uio->uio_iov->iov_len & (secmask)) { 10875 SD_ERROR(SD_LOG_READ_WRITE, un, 10876 "sdread: transfer length not modulo %d\n", 10877 un->un_sys_blocksize); 10878 err = EINVAL; 10879 } else { 10880 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10881 } 10882 return (err); 10883 } 10884 10885 10886 /* 10887 * Function: sdwrite 10888 * 10889 * Description: Driver's write(9e) entry point function. 10890 * 10891 * Arguments: dev - device number 10892 * uio - structure pointer describing where data is stored in 10893 * user's space 10894 * cred_p - user credential pointer 10895 * 10896 * Return Code: ENXIO 10897 * EIO 10898 * EINVAL 10899 * value returned by physio 10900 * 10901 * Context: Kernel thread context. 10902 */ 10903 /* ARGSUSED */ 10904 static int 10905 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10906 { 10907 struct sd_lun *un = NULL; 10908 int secmask; 10909 int err; 10910 10911 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10912 return (ENXIO); 10913 } 10914 10915 ASSERT(!mutex_owned(SD_MUTEX(un))); 10916 10917 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10918 mutex_enter(SD_MUTEX(un)); 10919 /* 10920 * Because the call to sd_ready_and_valid will issue I/O we 10921 * must wait here if either the device is suspended or 10922 * if it's power level is changing. 10923 */ 10924 while ((un->un_state == SD_STATE_SUSPENDED) || 10925 (un->un_state == SD_STATE_PM_CHANGING)) { 10926 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10927 } 10928 un->un_ncmds_in_driver++; 10929 mutex_exit(SD_MUTEX(un)); 10930 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10931 mutex_enter(SD_MUTEX(un)); 10932 un->un_ncmds_in_driver--; 10933 ASSERT(un->un_ncmds_in_driver >= 0); 10934 mutex_exit(SD_MUTEX(un)); 10935 return (EIO); 10936 } 10937 mutex_enter(SD_MUTEX(un)); 10938 un->un_ncmds_in_driver--; 10939 ASSERT(un->un_ncmds_in_driver >= 0); 10940 mutex_exit(SD_MUTEX(un)); 10941 } 10942 10943 /* 10944 * Write requests are restricted to multiples of the system block size. 10945 */ 10946 secmask = un->un_sys_blocksize - 1; 10947 10948 if (uio->uio_loffset & ((offset_t)(secmask))) { 10949 SD_ERROR(SD_LOG_READ_WRITE, un, 10950 "sdwrite: file offset not modulo %d\n", 10951 un->un_sys_blocksize); 10952 err = EINVAL; 10953 } else if (uio->uio_iov->iov_len & (secmask)) { 10954 SD_ERROR(SD_LOG_READ_WRITE, un, 10955 "sdwrite: transfer length not modulo %d\n", 10956 un->un_sys_blocksize); 10957 err = EINVAL; 10958 } else { 10959 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10960 } 10961 return (err); 10962 } 10963 10964 10965 /* 10966 * Function: sdaread 10967 * 10968 * Description: Driver's aread(9e) entry point function. 10969 * 10970 * Arguments: dev - device number 10971 * aio - structure pointer describing where data is to be stored 10972 * cred_p - user credential pointer 10973 * 10974 * Return Code: ENXIO 10975 * EIO 10976 * EINVAL 10977 * value returned by aphysio 10978 * 10979 * Context: Kernel thread context. 10980 */ 10981 /* ARGSUSED */ 10982 static int 10983 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10984 { 10985 struct sd_lun *un = NULL; 10986 struct uio *uio = aio->aio_uio; 10987 int secmask; 10988 int err; 10989 10990 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10991 return (ENXIO); 10992 } 10993 10994 ASSERT(!mutex_owned(SD_MUTEX(un))); 10995 10996 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10997 mutex_enter(SD_MUTEX(un)); 10998 /* 10999 * Because the call to sd_ready_and_valid will issue I/O we 11000 * must wait here if either the device is suspended or 11001 * if it's power level is changing. 11002 */ 11003 while ((un->un_state == SD_STATE_SUSPENDED) || 11004 (un->un_state == SD_STATE_PM_CHANGING)) { 11005 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11006 } 11007 un->un_ncmds_in_driver++; 11008 mutex_exit(SD_MUTEX(un)); 11009 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 11010 mutex_enter(SD_MUTEX(un)); 11011 un->un_ncmds_in_driver--; 11012 ASSERT(un->un_ncmds_in_driver >= 0); 11013 mutex_exit(SD_MUTEX(un)); 11014 return (EIO); 11015 } 11016 mutex_enter(SD_MUTEX(un)); 11017 un->un_ncmds_in_driver--; 11018 ASSERT(un->un_ncmds_in_driver >= 0); 11019 mutex_exit(SD_MUTEX(un)); 11020 } 11021 11022 /* 11023 * Read requests are restricted to multiples of the system block size. 11024 */ 11025 secmask = un->un_sys_blocksize - 1; 11026 11027 if (uio->uio_loffset & ((offset_t)(secmask))) { 11028 SD_ERROR(SD_LOG_READ_WRITE, un, 11029 "sdaread: file offset not modulo %d\n", 11030 un->un_sys_blocksize); 11031 err = EINVAL; 11032 } else if (uio->uio_iov->iov_len & (secmask)) { 11033 SD_ERROR(SD_LOG_READ_WRITE, un, 11034 "sdaread: transfer length not modulo %d\n", 11035 un->un_sys_blocksize); 11036 err = EINVAL; 11037 } else { 11038 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11039 } 11040 return (err); 11041 } 11042 11043 11044 /* 11045 * Function: sdawrite 11046 * 11047 * Description: Driver's awrite(9e) entry point function. 11048 * 11049 * Arguments: dev - device number 11050 * aio - structure pointer describing where data is stored 11051 * cred_p - user credential pointer 11052 * 11053 * Return Code: ENXIO 11054 * EIO 11055 * EINVAL 11056 * value returned by aphysio 11057 * 11058 * Context: Kernel thread context. 11059 */ 11060 /* ARGSUSED */ 11061 static int 11062 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11063 { 11064 struct sd_lun *un = NULL; 11065 struct uio *uio = aio->aio_uio; 11066 int secmask; 11067 int err; 11068 11069 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11070 return (ENXIO); 11071 } 11072 11073 ASSERT(!mutex_owned(SD_MUTEX(un))); 11074 11075 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 11076 mutex_enter(SD_MUTEX(un)); 11077 /* 11078 * Because the call to sd_ready_and_valid will issue I/O we 11079 * must wait here if either the device is suspended or 11080 * if it's power level is changing. 11081 */ 11082 while ((un->un_state == SD_STATE_SUSPENDED) || 11083 (un->un_state == SD_STATE_PM_CHANGING)) { 11084 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11085 } 11086 un->un_ncmds_in_driver++; 11087 mutex_exit(SD_MUTEX(un)); 11088 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 11089 mutex_enter(SD_MUTEX(un)); 11090 un->un_ncmds_in_driver--; 11091 ASSERT(un->un_ncmds_in_driver >= 0); 11092 mutex_exit(SD_MUTEX(un)); 11093 return (EIO); 11094 } 11095 mutex_enter(SD_MUTEX(un)); 11096 un->un_ncmds_in_driver--; 11097 ASSERT(un->un_ncmds_in_driver >= 0); 11098 mutex_exit(SD_MUTEX(un)); 11099 } 11100 11101 /* 11102 * Write requests are restricted to multiples of the system block size. 11103 */ 11104 secmask = un->un_sys_blocksize - 1; 11105 11106 if (uio->uio_loffset & ((offset_t)(secmask))) { 11107 SD_ERROR(SD_LOG_READ_WRITE, un, 11108 "sdawrite: file offset not modulo %d\n", 11109 un->un_sys_blocksize); 11110 err = EINVAL; 11111 } else if (uio->uio_iov->iov_len & (secmask)) { 11112 SD_ERROR(SD_LOG_READ_WRITE, un, 11113 "sdawrite: transfer length not modulo %d\n", 11114 un->un_sys_blocksize); 11115 err = EINVAL; 11116 } else { 11117 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11118 } 11119 return (err); 11120 } 11121 11122 11123 11124 11125 11126 /* 11127 * Driver IO processing follows the following sequence: 11128 * 11129 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11130 * | | ^ 11131 * v v | 11132 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11133 * | | | | 11134 * v | | | 11135 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11136 * | | ^ ^ 11137 * v v | | 11138 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11139 * | | | | 11140 * +---+ | +------------+ +-------+ 11141 * | | | | 11142 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11143 * | v | | 11144 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11145 * | | ^ | 11146 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11147 * | v | | 11148 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11149 * | | ^ | 11150 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11151 * | v | | 11152 * | sd_checksum_iostart() sd_checksum_iodone() | 11153 * | | ^ | 11154 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11155 * | v | | 11156 * | sd_pm_iostart() sd_pm_iodone() | 11157 * | | ^ | 11158 * | | | | 11159 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11160 * | ^ 11161 * v | 11162 * sd_core_iostart() | 11163 * | | 11164 * | +------>(*destroypkt)() 11165 * +-> sd_start_cmds() <-+ | | 11166 * | | | v 11167 * | | | scsi_destroy_pkt(9F) 11168 * | | | 11169 * +->(*initpkt)() +- sdintr() 11170 * | | | | 11171 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11172 * | +-> scsi_setup_cdb(9F) | 11173 * | | 11174 * +--> scsi_transport(9F) | 11175 * | | 11176 * +----> SCSA ---->+ 11177 * 11178 * 11179 * This code is based upon the following presumtions: 11180 * 11181 * - iostart and iodone functions operate on buf(9S) structures. These 11182 * functions perform the necessary operations on the buf(9S) and pass 11183 * them along to the next function in the chain by using the macros 11184 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11185 * (for iodone side functions). 11186 * 11187 * - The iostart side functions may sleep. The iodone side functions 11188 * are called under interrupt context and may NOT sleep. Therefore 11189 * iodone side functions also may not call iostart side functions. 11190 * (NOTE: iostart side functions should NOT sleep for memory, as 11191 * this could result in deadlock.) 11192 * 11193 * - An iostart side function may call its corresponding iodone side 11194 * function directly (if necessary). 11195 * 11196 * - In the event of an error, an iostart side function can return a buf(9S) 11197 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11198 * b_error in the usual way of course). 11199 * 11200 * - The taskq mechanism may be used by the iodone side functions to dispatch 11201 * requests to the iostart side functions. The iostart side functions in 11202 * this case would be called under the context of a taskq thread, so it's 11203 * OK for them to block/sleep/spin in this case. 11204 * 11205 * - iostart side functions may allocate "shadow" buf(9S) structs and 11206 * pass them along to the next function in the chain. The corresponding 11207 * iodone side functions must coalesce the "shadow" bufs and return 11208 * the "original" buf to the next higher layer. 11209 * 11210 * - The b_private field of the buf(9S) struct holds a pointer to 11211 * an sd_xbuf struct, which contains information needed to 11212 * construct the scsi_pkt for the command. 11213 * 11214 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11215 * layer must acquire & release the SD_MUTEX(un) as needed. 11216 */ 11217 11218 11219 /* 11220 * Create taskq for all targets in the system. This is created at 11221 * _init(9E) and destroyed at _fini(9E). 11222 * 11223 * Note: here we set the minalloc to a reasonably high number to ensure that 11224 * we will have an adequate supply of task entries available at interrupt time. 11225 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11226 * sd_create_taskq(). Since we do not want to sleep for allocations at 11227 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11228 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11229 * requests any one instant in time. 11230 */ 11231 #define SD_TASKQ_NUMTHREADS 8 11232 #define SD_TASKQ_MINALLOC 256 11233 #define SD_TASKQ_MAXALLOC 256 11234 11235 static taskq_t *sd_tq = NULL; 11236 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11237 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11238 11239 /* 11240 * The following task queue is being created for the write part of 11241 * read-modify-write of non-512 block size devices. 11242 * Limit the number of threads to 1 for now. This number has been choosen 11243 * considering the fact that it applies only to dvd ram drives/MO drives 11244 * currently. Performance for which is not main criteria at this stage. 11245 * Note: It needs to be explored if we can use a single taskq in future 11246 */ 11247 #define SD_WMR_TASKQ_NUMTHREADS 1 11248 static taskq_t *sd_wmr_tq = NULL; 11249 11250 /* 11251 * Function: sd_taskq_create 11252 * 11253 * Description: Create taskq thread(s) and preallocate task entries 11254 * 11255 * Return Code: Returns a pointer to the allocated taskq_t. 11256 * 11257 * Context: Can sleep. Requires blockable context. 11258 * 11259 * Notes: - The taskq() facility currently is NOT part of the DDI. 11260 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11261 * - taskq_create() will block for memory, also it will panic 11262 * if it cannot create the requested number of threads. 11263 * - Currently taskq_create() creates threads that cannot be 11264 * swapped. 11265 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11266 * supply of taskq entries at interrupt time (ie, so that we 11267 * do not have to sleep for memory) 11268 */ 11269 11270 static void 11271 sd_taskq_create(void) 11272 { 11273 char taskq_name[TASKQ_NAMELEN]; 11274 11275 ASSERT(sd_tq == NULL); 11276 ASSERT(sd_wmr_tq == NULL); 11277 11278 (void) snprintf(taskq_name, sizeof (taskq_name), 11279 "%s_drv_taskq", sd_label); 11280 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11281 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11282 TASKQ_PREPOPULATE)); 11283 11284 (void) snprintf(taskq_name, sizeof (taskq_name), 11285 "%s_rmw_taskq", sd_label); 11286 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11287 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11288 TASKQ_PREPOPULATE)); 11289 } 11290 11291 11292 /* 11293 * Function: sd_taskq_delete 11294 * 11295 * Description: Complementary cleanup routine for sd_taskq_create(). 11296 * 11297 * Context: Kernel thread context. 11298 */ 11299 11300 static void 11301 sd_taskq_delete(void) 11302 { 11303 ASSERT(sd_tq != NULL); 11304 ASSERT(sd_wmr_tq != NULL); 11305 taskq_destroy(sd_tq); 11306 taskq_destroy(sd_wmr_tq); 11307 sd_tq = NULL; 11308 sd_wmr_tq = NULL; 11309 } 11310 11311 11312 /* 11313 * Function: sdstrategy 11314 * 11315 * Description: Driver's strategy (9E) entry point function. 11316 * 11317 * Arguments: bp - pointer to buf(9S) 11318 * 11319 * Return Code: Always returns zero 11320 * 11321 * Context: Kernel thread context. 11322 */ 11323 11324 static int 11325 sdstrategy(struct buf *bp) 11326 { 11327 struct sd_lun *un; 11328 11329 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11330 if (un == NULL) { 11331 bioerror(bp, EIO); 11332 bp->b_resid = bp->b_bcount; 11333 biodone(bp); 11334 return (0); 11335 } 11336 /* As was done in the past, fail new cmds. if state is dumping. */ 11337 if (un->un_state == SD_STATE_DUMPING) { 11338 bioerror(bp, ENXIO); 11339 bp->b_resid = bp->b_bcount; 11340 biodone(bp); 11341 return (0); 11342 } 11343 11344 ASSERT(!mutex_owned(SD_MUTEX(un))); 11345 11346 /* 11347 * Commands may sneak in while we released the mutex in 11348 * DDI_SUSPEND, we should block new commands. However, old 11349 * commands that are still in the driver at this point should 11350 * still be allowed to drain. 11351 */ 11352 mutex_enter(SD_MUTEX(un)); 11353 /* 11354 * Must wait here if either the device is suspended or 11355 * if it's power level is changing. 11356 */ 11357 while ((un->un_state == SD_STATE_SUSPENDED) || 11358 (un->un_state == SD_STATE_PM_CHANGING)) { 11359 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11360 } 11361 11362 un->un_ncmds_in_driver++; 11363 11364 /* 11365 * atapi: Since we are running the CD for now in PIO mode we need to 11366 * call bp_mapin here to avoid bp_mapin called interrupt context under 11367 * the HBA's init_pkt routine. 11368 */ 11369 if (un->un_f_cfg_is_atapi == TRUE) { 11370 mutex_exit(SD_MUTEX(un)); 11371 bp_mapin(bp); 11372 mutex_enter(SD_MUTEX(un)); 11373 } 11374 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11375 un->un_ncmds_in_driver); 11376 11377 mutex_exit(SD_MUTEX(un)); 11378 11379 /* 11380 * This will (eventually) allocate the sd_xbuf area and 11381 * call sd_xbuf_strategy(). We just want to return the 11382 * result of ddi_xbuf_qstrategy so that we have an opt- 11383 * imized tail call which saves us a stack frame. 11384 */ 11385 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11386 } 11387 11388 11389 /* 11390 * Function: sd_xbuf_strategy 11391 * 11392 * Description: Function for initiating IO operations via the 11393 * ddi_xbuf_qstrategy() mechanism. 11394 * 11395 * Context: Kernel thread context. 11396 */ 11397 11398 static void 11399 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11400 { 11401 struct sd_lun *un = arg; 11402 11403 ASSERT(bp != NULL); 11404 ASSERT(xp != NULL); 11405 ASSERT(un != NULL); 11406 ASSERT(!mutex_owned(SD_MUTEX(un))); 11407 11408 /* 11409 * Initialize the fields in the xbuf and save a pointer to the 11410 * xbuf in bp->b_private. 11411 */ 11412 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11413 11414 /* Send the buf down the iostart chain */ 11415 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11416 } 11417 11418 11419 /* 11420 * Function: sd_xbuf_init 11421 * 11422 * Description: Prepare the given sd_xbuf struct for use. 11423 * 11424 * Arguments: un - ptr to softstate 11425 * bp - ptr to associated buf(9S) 11426 * xp - ptr to associated sd_xbuf 11427 * chain_type - IO chain type to use: 11428 * SD_CHAIN_NULL 11429 * SD_CHAIN_BUFIO 11430 * SD_CHAIN_USCSI 11431 * SD_CHAIN_DIRECT 11432 * SD_CHAIN_DIRECT_PRIORITY 11433 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11434 * initialization; may be NULL if none. 11435 * 11436 * Context: Kernel thread context 11437 */ 11438 11439 static void 11440 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11441 uchar_t chain_type, void *pktinfop) 11442 { 11443 int index; 11444 11445 ASSERT(un != NULL); 11446 ASSERT(bp != NULL); 11447 ASSERT(xp != NULL); 11448 11449 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11450 bp, chain_type); 11451 11452 xp->xb_un = un; 11453 xp->xb_pktp = NULL; 11454 xp->xb_pktinfo = pktinfop; 11455 xp->xb_private = bp->b_private; 11456 xp->xb_blkno = (daddr_t)bp->b_blkno; 11457 11458 /* 11459 * Set up the iostart and iodone chain indexes in the xbuf, based 11460 * upon the specified chain type to use. 11461 */ 11462 switch (chain_type) { 11463 case SD_CHAIN_NULL: 11464 /* 11465 * Fall thru to just use the values for the buf type, even 11466 * tho for the NULL chain these values will never be used. 11467 */ 11468 /* FALLTHRU */ 11469 case SD_CHAIN_BUFIO: 11470 index = un->un_buf_chain_type; 11471 break; 11472 case SD_CHAIN_USCSI: 11473 index = un->un_uscsi_chain_type; 11474 break; 11475 case SD_CHAIN_DIRECT: 11476 index = un->un_direct_chain_type; 11477 break; 11478 case SD_CHAIN_DIRECT_PRIORITY: 11479 index = un->un_priority_chain_type; 11480 break; 11481 default: 11482 /* We're really broken if we ever get here... */ 11483 panic("sd_xbuf_init: illegal chain type!"); 11484 /*NOTREACHED*/ 11485 } 11486 11487 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11488 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11489 11490 /* 11491 * It might be a bit easier to simply bzero the entire xbuf above, 11492 * but it turns out that since we init a fair number of members anyway, 11493 * we save a fair number cycles by doing explicit assignment of zero. 11494 */ 11495 xp->xb_pkt_flags = 0; 11496 xp->xb_dma_resid = 0; 11497 xp->xb_retry_count = 0; 11498 xp->xb_victim_retry_count = 0; 11499 xp->xb_ua_retry_count = 0; 11500 xp->xb_sense_bp = NULL; 11501 xp->xb_sense_status = 0; 11502 xp->xb_sense_state = 0; 11503 xp->xb_sense_resid = 0; 11504 11505 bp->b_private = xp; 11506 bp->b_flags &= ~(B_DONE | B_ERROR); 11507 bp->b_resid = 0; 11508 bp->av_forw = NULL; 11509 bp->av_back = NULL; 11510 bioerror(bp, 0); 11511 11512 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11513 } 11514 11515 11516 /* 11517 * Function: sd_uscsi_strategy 11518 * 11519 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11520 * 11521 * Arguments: bp - buf struct ptr 11522 * 11523 * Return Code: Always returns 0 11524 * 11525 * Context: Kernel thread context 11526 */ 11527 11528 static int 11529 sd_uscsi_strategy(struct buf *bp) 11530 { 11531 struct sd_lun *un; 11532 struct sd_uscsi_info *uip; 11533 struct sd_xbuf *xp; 11534 uchar_t chain_type; 11535 11536 ASSERT(bp != NULL); 11537 11538 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11539 if (un == NULL) { 11540 bioerror(bp, EIO); 11541 bp->b_resid = bp->b_bcount; 11542 biodone(bp); 11543 return (0); 11544 } 11545 11546 ASSERT(!mutex_owned(SD_MUTEX(un))); 11547 11548 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11549 11550 mutex_enter(SD_MUTEX(un)); 11551 /* 11552 * atapi: Since we are running the CD for now in PIO mode we need to 11553 * call bp_mapin here to avoid bp_mapin called interrupt context under 11554 * the HBA's init_pkt routine. 11555 */ 11556 if (un->un_f_cfg_is_atapi == TRUE) { 11557 mutex_exit(SD_MUTEX(un)); 11558 bp_mapin(bp); 11559 mutex_enter(SD_MUTEX(un)); 11560 } 11561 un->un_ncmds_in_driver++; 11562 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11563 un->un_ncmds_in_driver); 11564 mutex_exit(SD_MUTEX(un)); 11565 11566 /* 11567 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11568 */ 11569 ASSERT(bp->b_private != NULL); 11570 uip = (struct sd_uscsi_info *)bp->b_private; 11571 11572 switch (uip->ui_flags) { 11573 case SD_PATH_DIRECT: 11574 chain_type = SD_CHAIN_DIRECT; 11575 break; 11576 case SD_PATH_DIRECT_PRIORITY: 11577 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11578 break; 11579 default: 11580 chain_type = SD_CHAIN_USCSI; 11581 break; 11582 } 11583 11584 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11585 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11586 11587 /* Use the index obtained within xbuf_init */ 11588 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11589 11590 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11591 11592 return (0); 11593 } 11594 11595 11596 /* 11597 * These routines perform raw i/o operations. 11598 */ 11599 /*ARGSUSED*/ 11600 static void 11601 sduscsimin(struct buf *bp) 11602 { 11603 /* 11604 * do not break up because the CDB count would then 11605 * be incorrect and data underruns would result (incomplete 11606 * read/writes which would be retried and then failed, see 11607 * sdintr(). 11608 */ 11609 } 11610 11611 11612 11613 /* 11614 * Function: sd_send_scsi_cmd 11615 * 11616 * Description: Runs a USCSI command for user (when called thru sdioctl), 11617 * or for the driver 11618 * 11619 * Arguments: dev - the dev_t for the device 11620 * incmd - ptr to a valid uscsi_cmd struct 11621 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11622 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11623 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11624 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11625 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11626 * to use the USCSI "direct" chain and bypass the normal 11627 * command waitq. 11628 * 11629 * Return Code: 0 - successful completion of the given command 11630 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11631 * ENXIO - soft state not found for specified dev 11632 * EINVAL 11633 * EFAULT - copyin/copyout error 11634 * return code of biowait(9F) or physio(9F): 11635 * EIO - IO error, caller may check incmd->uscsi_status 11636 * ENXIO 11637 * EACCES - reservation conflict 11638 * 11639 * Context: Waits for command to complete. Can sleep. 11640 */ 11641 11642 static int 11643 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11644 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11645 int path_flag) 11646 { 11647 struct sd_uscsi_info *uip; 11648 struct uscsi_cmd *uscmd; 11649 struct sd_lun *un; 11650 struct buf *bp; 11651 int rval; 11652 int flags; 11653 11654 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11655 if (un == NULL) { 11656 return (ENXIO); 11657 } 11658 11659 ASSERT(!mutex_owned(SD_MUTEX(un))); 11660 11661 #ifdef SDDEBUG 11662 switch (dataspace) { 11663 case UIO_USERSPACE: 11664 SD_TRACE(SD_LOG_IO, un, 11665 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11666 break; 11667 case UIO_SYSSPACE: 11668 SD_TRACE(SD_LOG_IO, un, 11669 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11670 break; 11671 default: 11672 SD_TRACE(SD_LOG_IO, un, 11673 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11674 break; 11675 } 11676 #endif 11677 11678 /* 11679 * Perform resets directly; no need to generate a command to do it. 11680 */ 11681 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11682 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11683 RESET_ALL : RESET_TARGET; 11684 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11685 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11686 /* Reset attempt was unsuccessful */ 11687 SD_TRACE(SD_LOG_IO, un, 11688 "sd_send_scsi_cmd: reset: failure\n"); 11689 return (EIO); 11690 } 11691 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11692 return (0); 11693 } 11694 11695 /* Perfunctory sanity check... */ 11696 if (incmd->uscsi_cdblen <= 0) { 11697 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11698 "invalid uscsi_cdblen, returning EINVAL\n"); 11699 return (EINVAL); 11700 } 11701 11702 /* 11703 * In order to not worry about where the uscsi structure came from 11704 * (or where the cdb it points to came from) we're going to make 11705 * kmem_alloc'd copies of them here. This will also allow reference 11706 * to the data they contain long after this process has gone to 11707 * sleep and its kernel stack has been unmapped, etc. 11708 * 11709 * First get some memory for the uscsi_cmd struct and copy the 11710 * contents of the given uscsi_cmd struct into it. 11711 */ 11712 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11713 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11714 11715 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11716 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11717 11718 /* 11719 * Now get some space for the CDB, and copy the given CDB into 11720 * it. Use ddi_copyin() in case the data is in user space. 11721 */ 11722 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11723 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11724 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11725 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11726 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11727 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11728 return (EFAULT); 11729 } 11730 11731 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11732 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11733 11734 bp = getrbuf(KM_SLEEP); 11735 11736 /* 11737 * Allocate an sd_uscsi_info struct and fill it with the info 11738 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11739 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11740 * since we allocate the buf here in this function, we do not 11741 * need to preserve the prior contents of b_private. 11742 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11743 */ 11744 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11745 uip->ui_flags = path_flag; 11746 uip->ui_cmdp = uscmd; 11747 bp->b_private = uip; 11748 11749 /* 11750 * Initialize Request Sense buffering, if requested. 11751 */ 11752 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11753 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11754 /* 11755 * Here uscmd->uscsi_rqbuf currently points to the caller's 11756 * buffer, but we replace this with a kernel buffer that 11757 * we allocate to use with the sense data. The sense data 11758 * (if present) gets copied into this new buffer before the 11759 * command is completed. Then we copy the sense data from 11760 * our allocated buf into the caller's buffer below. Note 11761 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11762 * below to perform the copy back to the caller's buf. 11763 */ 11764 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11765 if (rqbufspace == UIO_USERSPACE) { 11766 uscmd->uscsi_rqlen = SENSE_LENGTH; 11767 uscmd->uscsi_rqresid = SENSE_LENGTH; 11768 } else { 11769 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11770 uscmd->uscsi_rqlen = rlen; 11771 uscmd->uscsi_rqresid = rlen; 11772 } 11773 } else { 11774 uscmd->uscsi_rqbuf = NULL; 11775 uscmd->uscsi_rqlen = 0; 11776 uscmd->uscsi_rqresid = 0; 11777 } 11778 11779 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11780 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11781 11782 if (un->un_f_is_fibre == FALSE) { 11783 /* 11784 * Force asynchronous mode, if necessary. Doing this here 11785 * has the unfortunate effect of running other queued 11786 * commands async also, but since the main purpose of this 11787 * capability is downloading new drive firmware, we can 11788 * probably live with it. 11789 */ 11790 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11791 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11792 == 1) { 11793 if (scsi_ifsetcap(SD_ADDRESS(un), 11794 "synchronous", 0, 1) == 1) { 11795 SD_TRACE(SD_LOG_IO, un, 11796 "sd_send_scsi_cmd: forced async ok\n"); 11797 } else { 11798 SD_TRACE(SD_LOG_IO, un, 11799 "sd_send_scsi_cmd:\ 11800 forced async failed\n"); 11801 rval = EINVAL; 11802 goto done; 11803 } 11804 } 11805 } 11806 11807 /* 11808 * Re-enable synchronous mode, if requested 11809 */ 11810 if (uscmd->uscsi_flags & USCSI_SYNC) { 11811 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11812 == 0) { 11813 int i = scsi_ifsetcap(SD_ADDRESS(un), 11814 "synchronous", 1, 1); 11815 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11816 "re-enabled sync %s\n", 11817 (i == 1) ? "ok" : "failed"); 11818 } 11819 } 11820 } 11821 11822 /* 11823 * Commands sent with priority are intended for error recovery 11824 * situations, and do not have retries performed. 11825 */ 11826 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11827 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11828 } 11829 11830 /* 11831 * If we're going to do actual I/O, let physio do all the right things 11832 */ 11833 if (uscmd->uscsi_buflen != 0) { 11834 struct iovec aiov; 11835 struct uio auio; 11836 struct uio *uio = &auio; 11837 11838 bzero(&auio, sizeof (struct uio)); 11839 bzero(&aiov, sizeof (struct iovec)); 11840 aiov.iov_base = uscmd->uscsi_bufaddr; 11841 aiov.iov_len = uscmd->uscsi_buflen; 11842 uio->uio_iov = &aiov; 11843 11844 uio->uio_iovcnt = 1; 11845 uio->uio_resid = uscmd->uscsi_buflen; 11846 uio->uio_segflg = dataspace; 11847 11848 /* 11849 * physio() will block here until the command completes.... 11850 */ 11851 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11852 11853 rval = physio(sd_uscsi_strategy, bp, dev, 11854 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11855 sduscsimin, uio); 11856 11857 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11858 "returned from physio with 0x%x\n", rval); 11859 11860 } else { 11861 /* 11862 * We have to mimic what physio would do here! Argh! 11863 */ 11864 bp->b_flags = B_BUSY | 11865 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11866 bp->b_edev = dev; 11867 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11868 bp->b_bcount = 0; 11869 bp->b_blkno = 0; 11870 11871 SD_TRACE(SD_LOG_IO, un, 11872 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11873 11874 (void) sd_uscsi_strategy(bp); 11875 11876 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11877 11878 rval = biowait(bp); 11879 11880 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11881 "returned from biowait with 0x%x\n", rval); 11882 } 11883 11884 done: 11885 11886 #ifdef SDDEBUG 11887 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11888 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11889 uscmd->uscsi_status, uscmd->uscsi_resid); 11890 if (uscmd->uscsi_bufaddr != NULL) { 11891 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11892 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11893 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11894 if (dataspace == UIO_SYSSPACE) { 11895 SD_DUMP_MEMORY(un, SD_LOG_IO, 11896 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11897 uscmd->uscsi_buflen, SD_LOG_HEX); 11898 } 11899 } 11900 #endif 11901 11902 /* 11903 * Get the status and residual to return to the caller. 11904 */ 11905 incmd->uscsi_status = uscmd->uscsi_status; 11906 incmd->uscsi_resid = uscmd->uscsi_resid; 11907 11908 /* 11909 * If the caller wants sense data, copy back whatever sense data 11910 * we may have gotten, and update the relevant rqsense info. 11911 */ 11912 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11913 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11914 11915 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11916 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11917 11918 /* Update the Request Sense status and resid */ 11919 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11920 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11921 11922 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11923 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11924 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11925 11926 /* Copy out the sense data for user processes */ 11927 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11928 int flags = 11929 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11930 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11931 rqlen, flags) != 0) { 11932 rval = EFAULT; 11933 } 11934 /* 11935 * Note: Can't touch incmd->uscsi_rqbuf so use 11936 * uscmd->uscsi_rqbuf instead. They're the same. 11937 */ 11938 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11939 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11940 incmd->uscsi_rqbuf, rqlen); 11941 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11942 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11943 } 11944 } 11945 11946 /* 11947 * Free allocated resources and return; mapout the buf in case it was 11948 * mapped in by a lower layer. 11949 */ 11950 bp_mapout(bp); 11951 freerbuf(bp); 11952 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11953 if (uscmd->uscsi_rqbuf != NULL) { 11954 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11955 } 11956 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11957 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11958 11959 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11960 11961 return (rval); 11962 } 11963 11964 11965 /* 11966 * Function: sd_buf_iodone 11967 * 11968 * Description: Frees the sd_xbuf & returns the buf to its originator. 11969 * 11970 * Context: May be called from interrupt context. 11971 */ 11972 /* ARGSUSED */ 11973 static void 11974 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11975 { 11976 struct sd_xbuf *xp; 11977 11978 ASSERT(un != NULL); 11979 ASSERT(bp != NULL); 11980 ASSERT(!mutex_owned(SD_MUTEX(un))); 11981 11982 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11983 11984 xp = SD_GET_XBUF(bp); 11985 ASSERT(xp != NULL); 11986 11987 mutex_enter(SD_MUTEX(un)); 11988 11989 /* 11990 * Grab time when the cmd completed. 11991 * This is used for determining if the system has been 11992 * idle long enough to make it idle to the PM framework. 11993 * This is for lowering the overhead, and therefore improving 11994 * performance per I/O operation. 11995 */ 11996 un->un_pm_idle_time = ddi_get_time(); 11997 11998 un->un_ncmds_in_driver--; 11999 ASSERT(un->un_ncmds_in_driver >= 0); 12000 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12001 un->un_ncmds_in_driver); 12002 12003 mutex_exit(SD_MUTEX(un)); 12004 12005 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 12006 biodone(bp); /* bp is gone after this */ 12007 12008 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12009 } 12010 12011 12012 /* 12013 * Function: sd_uscsi_iodone 12014 * 12015 * Description: Frees the sd_xbuf & returns the buf to its originator. 12016 * 12017 * Context: May be called from interrupt context. 12018 */ 12019 /* ARGSUSED */ 12020 static void 12021 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12022 { 12023 struct sd_xbuf *xp; 12024 12025 ASSERT(un != NULL); 12026 ASSERT(bp != NULL); 12027 12028 xp = SD_GET_XBUF(bp); 12029 ASSERT(xp != NULL); 12030 ASSERT(!mutex_owned(SD_MUTEX(un))); 12031 12032 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12033 12034 bp->b_private = xp->xb_private; 12035 12036 mutex_enter(SD_MUTEX(un)); 12037 12038 /* 12039 * Grab time when the cmd completed. 12040 * This is used for determining if the system has been 12041 * idle long enough to make it idle to the PM framework. 12042 * This is for lowering the overhead, and therefore improving 12043 * performance per I/O operation. 12044 */ 12045 un->un_pm_idle_time = ddi_get_time(); 12046 12047 un->un_ncmds_in_driver--; 12048 ASSERT(un->un_ncmds_in_driver >= 0); 12049 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12050 un->un_ncmds_in_driver); 12051 12052 mutex_exit(SD_MUTEX(un)); 12053 12054 kmem_free(xp, sizeof (struct sd_xbuf)); 12055 biodone(bp); 12056 12057 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12058 } 12059 12060 12061 /* 12062 * Function: sd_mapblockaddr_iostart 12063 * 12064 * Description: Verify request lies withing the partition limits for 12065 * the indicated minor device. Issue "overrun" buf if 12066 * request would exceed partition range. Converts 12067 * partition-relative block address to absolute. 12068 * 12069 * Context: Can sleep 12070 * 12071 * Issues: This follows what the old code did, in terms of accessing 12072 * some of the partition info in the unit struct without holding 12073 * the mutext. This is a general issue, if the partition info 12074 * can be altered while IO is in progress... as soon as we send 12075 * a buf, its partitioning can be invalid before it gets to the 12076 * device. Probably the right fix is to move partitioning out 12077 * of the driver entirely. 12078 */ 12079 12080 static void 12081 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12082 { 12083 daddr_t nblocks; /* #blocks in the given partition */ 12084 daddr_t blocknum; /* Block number specified by the buf */ 12085 size_t requested_nblocks; 12086 size_t available_nblocks; 12087 int partition; 12088 diskaddr_t partition_offset; 12089 struct sd_xbuf *xp; 12090 12091 12092 ASSERT(un != NULL); 12093 ASSERT(bp != NULL); 12094 ASSERT(!mutex_owned(SD_MUTEX(un))); 12095 12096 SD_TRACE(SD_LOG_IO_PARTITION, un, 12097 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12098 12099 xp = SD_GET_XBUF(bp); 12100 ASSERT(xp != NULL); 12101 12102 /* 12103 * If the geometry is not indicated as valid, attempt to access 12104 * the unit & verify the geometry/label. This can be the case for 12105 * removable-media devices, of if the device was opened in 12106 * NDELAY/NONBLOCK mode. 12107 */ 12108 if ((un->un_f_geometry_is_valid != TRUE) && 12109 (sd_ready_and_valid(un) != SD_READY_VALID)) { 12110 /* 12111 * For removable devices it is possible to start an I/O 12112 * without a media by opening the device in nodelay mode. 12113 * Also for writable CDs there can be many scenarios where 12114 * there is no geometry yet but volume manager is trying to 12115 * issue a read() just because it can see TOC on the CD. So 12116 * do not print a message for removables. 12117 */ 12118 if (!ISREMOVABLE(un)) { 12119 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12120 "i/o to invalid geometry\n"); 12121 } 12122 bioerror(bp, EIO); 12123 bp->b_resid = bp->b_bcount; 12124 SD_BEGIN_IODONE(index, un, bp); 12125 return; 12126 } 12127 12128 partition = SDPART(bp->b_edev); 12129 12130 /* #blocks in partition */ 12131 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 12132 12133 /* Use of a local variable potentially improves performance slightly */ 12134 partition_offset = un->un_offset[partition]; 12135 12136 /* 12137 * blocknum is the starting block number of the request. At this 12138 * point it is still relative to the start of the minor device. 12139 */ 12140 blocknum = xp->xb_blkno; 12141 12142 /* 12143 * Legacy: If the starting block number is one past the last block 12144 * in the partition, do not set B_ERROR in the buf. 12145 */ 12146 if (blocknum == nblocks) { 12147 goto error_exit; 12148 } 12149 12150 /* 12151 * Confirm that the first block of the request lies within the 12152 * partition limits. Also the requested number of bytes must be 12153 * a multiple of the system block size. 12154 */ 12155 if ((blocknum < 0) || (blocknum >= nblocks) || 12156 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12157 bp->b_flags |= B_ERROR; 12158 goto error_exit; 12159 } 12160 12161 /* 12162 * If the requsted # blocks exceeds the available # blocks, that 12163 * is an overrun of the partition. 12164 */ 12165 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12166 available_nblocks = (size_t)(nblocks - blocknum); 12167 ASSERT(nblocks >= blocknum); 12168 12169 if (requested_nblocks > available_nblocks) { 12170 /* 12171 * Allocate an "overrun" buf to allow the request to proceed 12172 * for the amount of space available in the partition. The 12173 * amount not transferred will be added into the b_resid 12174 * when the operation is complete. The overrun buf 12175 * replaces the original buf here, and the original buf 12176 * is saved inside the overrun buf, for later use. 12177 */ 12178 size_t resid = SD_SYSBLOCKS2BYTES(un, 12179 (offset_t)(requested_nblocks - available_nblocks)); 12180 size_t count = bp->b_bcount - resid; 12181 /* 12182 * Note: count is an unsigned entity thus it'll NEVER 12183 * be less than 0 so ASSERT the original values are 12184 * correct. 12185 */ 12186 ASSERT(bp->b_bcount >= resid); 12187 12188 bp = sd_bioclone_alloc(bp, count, blocknum, 12189 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12190 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12191 ASSERT(xp != NULL); 12192 } 12193 12194 /* At this point there should be no residual for this buf. */ 12195 ASSERT(bp->b_resid == 0); 12196 12197 /* Convert the block number to an absolute address. */ 12198 xp->xb_blkno += partition_offset; 12199 12200 SD_NEXT_IOSTART(index, un, bp); 12201 12202 SD_TRACE(SD_LOG_IO_PARTITION, un, 12203 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12204 12205 return; 12206 12207 error_exit: 12208 bp->b_resid = bp->b_bcount; 12209 SD_BEGIN_IODONE(index, un, bp); 12210 SD_TRACE(SD_LOG_IO_PARTITION, un, 12211 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12212 } 12213 12214 12215 /* 12216 * Function: sd_mapblockaddr_iodone 12217 * 12218 * Description: Completion-side processing for partition management. 12219 * 12220 * Context: May be called under interrupt context 12221 */ 12222 12223 static void 12224 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12225 { 12226 /* int partition; */ /* Not used, see below. */ 12227 ASSERT(un != NULL); 12228 ASSERT(bp != NULL); 12229 ASSERT(!mutex_owned(SD_MUTEX(un))); 12230 12231 SD_TRACE(SD_LOG_IO_PARTITION, un, 12232 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12233 12234 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12235 /* 12236 * We have an "overrun" buf to deal with... 12237 */ 12238 struct sd_xbuf *xp; 12239 struct buf *obp; /* ptr to the original buf */ 12240 12241 xp = SD_GET_XBUF(bp); 12242 ASSERT(xp != NULL); 12243 12244 /* Retrieve the pointer to the original buf */ 12245 obp = (struct buf *)xp->xb_private; 12246 ASSERT(obp != NULL); 12247 12248 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12249 bioerror(obp, bp->b_error); 12250 12251 sd_bioclone_free(bp); 12252 12253 /* 12254 * Get back the original buf. 12255 * Note that since the restoration of xb_blkno below 12256 * was removed, the sd_xbuf is not needed. 12257 */ 12258 bp = obp; 12259 /* 12260 * xp = SD_GET_XBUF(bp); 12261 * ASSERT(xp != NULL); 12262 */ 12263 } 12264 12265 /* 12266 * Convert sd->xb_blkno back to a minor-device relative value. 12267 * Note: this has been commented out, as it is not needed in the 12268 * current implementation of the driver (ie, since this function 12269 * is at the top of the layering chains, so the info will be 12270 * discarded) and it is in the "hot" IO path. 12271 * 12272 * partition = getminor(bp->b_edev) & SDPART_MASK; 12273 * xp->xb_blkno -= un->un_offset[partition]; 12274 */ 12275 12276 SD_NEXT_IODONE(index, un, bp); 12277 12278 SD_TRACE(SD_LOG_IO_PARTITION, un, 12279 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12280 } 12281 12282 12283 /* 12284 * Function: sd_mapblocksize_iostart 12285 * 12286 * Description: Convert between system block size (un->un_sys_blocksize) 12287 * and target block size (un->un_tgt_blocksize). 12288 * 12289 * Context: Can sleep to allocate resources. 12290 * 12291 * Assumptions: A higher layer has already performed any partition validation, 12292 * and converted the xp->xb_blkno to an absolute value relative 12293 * to the start of the device. 12294 * 12295 * It is also assumed that the higher layer has implemented 12296 * an "overrun" mechanism for the case where the request would 12297 * read/write beyond the end of a partition. In this case we 12298 * assume (and ASSERT) that bp->b_resid == 0. 12299 * 12300 * Note: The implementation for this routine assumes the target 12301 * block size remains constant between allocation and transport. 12302 */ 12303 12304 static void 12305 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12306 { 12307 struct sd_mapblocksize_info *bsp; 12308 struct sd_xbuf *xp; 12309 offset_t first_byte; 12310 daddr_t start_block, end_block; 12311 daddr_t request_bytes; 12312 ushort_t is_aligned = FALSE; 12313 12314 ASSERT(un != NULL); 12315 ASSERT(bp != NULL); 12316 ASSERT(!mutex_owned(SD_MUTEX(un))); 12317 ASSERT(bp->b_resid == 0); 12318 12319 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12320 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12321 12322 /* 12323 * For a non-writable CD, a write request is an error 12324 */ 12325 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12326 (un->un_f_mmc_writable_media == FALSE)) { 12327 bioerror(bp, EIO); 12328 bp->b_resid = bp->b_bcount; 12329 SD_BEGIN_IODONE(index, un, bp); 12330 return; 12331 } 12332 12333 /* 12334 * We do not need a shadow buf if the device is using 12335 * un->un_sys_blocksize as its block size or if bcount == 0. 12336 * In this case there is no layer-private data block allocated. 12337 */ 12338 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12339 (bp->b_bcount == 0)) { 12340 goto done; 12341 } 12342 12343 #if defined(__i386) || defined(__amd64) 12344 /* We do not support non-block-aligned transfers for ROD devices */ 12345 ASSERT(!ISROD(un)); 12346 #endif 12347 12348 xp = SD_GET_XBUF(bp); 12349 ASSERT(xp != NULL); 12350 12351 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12352 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12353 un->un_tgt_blocksize, un->un_sys_blocksize); 12354 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12355 "request start block:0x%x\n", xp->xb_blkno); 12356 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12357 "request len:0x%x\n", bp->b_bcount); 12358 12359 /* 12360 * Allocate the layer-private data area for the mapblocksize layer. 12361 * Layers are allowed to use the xp_private member of the sd_xbuf 12362 * struct to store the pointer to their layer-private data block, but 12363 * each layer also has the responsibility of restoring the prior 12364 * contents of xb_private before returning the buf/xbuf to the 12365 * higher layer that sent it. 12366 * 12367 * Here we save the prior contents of xp->xb_private into the 12368 * bsp->mbs_oprivate field of our layer-private data area. This value 12369 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12370 * the layer-private area and returning the buf/xbuf to the layer 12371 * that sent it. 12372 * 12373 * Note that here we use kmem_zalloc for the allocation as there are 12374 * parts of the mapblocksize code that expect certain fields to be 12375 * zero unless explicitly set to a required value. 12376 */ 12377 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12378 bsp->mbs_oprivate = xp->xb_private; 12379 xp->xb_private = bsp; 12380 12381 /* 12382 * This treats the data on the disk (target) as an array of bytes. 12383 * first_byte is the byte offset, from the beginning of the device, 12384 * to the location of the request. This is converted from a 12385 * un->un_sys_blocksize block address to a byte offset, and then back 12386 * to a block address based upon a un->un_tgt_blocksize block size. 12387 * 12388 * xp->xb_blkno should be absolute upon entry into this function, 12389 * but, but it is based upon partitions that use the "system" 12390 * block size. It must be adjusted to reflect the block size of 12391 * the target. 12392 * 12393 * Note that end_block is actually the block that follows the last 12394 * block of the request, but that's what is needed for the computation. 12395 */ 12396 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12397 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12398 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12399 un->un_tgt_blocksize; 12400 12401 /* request_bytes is rounded up to a multiple of the target block size */ 12402 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12403 12404 /* 12405 * See if the starting address of the request and the request 12406 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12407 * then we do not need to allocate a shadow buf to handle the request. 12408 */ 12409 if (((first_byte % un->un_tgt_blocksize) == 0) && 12410 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12411 is_aligned = TRUE; 12412 } 12413 12414 if ((bp->b_flags & B_READ) == 0) { 12415 /* 12416 * Lock the range for a write operation. An aligned request is 12417 * considered a simple write; otherwise the request must be a 12418 * read-modify-write. 12419 */ 12420 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12421 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12422 } 12423 12424 /* 12425 * Alloc a shadow buf if the request is not aligned. Also, this is 12426 * where the READ command is generated for a read-modify-write. (The 12427 * write phase is deferred until after the read completes.) 12428 */ 12429 if (is_aligned == FALSE) { 12430 12431 struct sd_mapblocksize_info *shadow_bsp; 12432 struct sd_xbuf *shadow_xp; 12433 struct buf *shadow_bp; 12434 12435 /* 12436 * Allocate the shadow buf and it associated xbuf. Note that 12437 * after this call the xb_blkno value in both the original 12438 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12439 * same: absolute relative to the start of the device, and 12440 * adjusted for the target block size. The b_blkno in the 12441 * shadow buf will also be set to this value. We should never 12442 * change b_blkno in the original bp however. 12443 * 12444 * Note also that the shadow buf will always need to be a 12445 * READ command, regardless of whether the incoming command 12446 * is a READ or a WRITE. 12447 */ 12448 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12449 xp->xb_blkno, 12450 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12451 12452 shadow_xp = SD_GET_XBUF(shadow_bp); 12453 12454 /* 12455 * Allocate the layer-private data for the shadow buf. 12456 * (No need to preserve xb_private in the shadow xbuf.) 12457 */ 12458 shadow_xp->xb_private = shadow_bsp = 12459 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12460 12461 /* 12462 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12463 * to figure out where the start of the user data is (based upon 12464 * the system block size) in the data returned by the READ 12465 * command (which will be based upon the target blocksize). Note 12466 * that this is only really used if the request is unaligned. 12467 */ 12468 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12469 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12470 ASSERT((bsp->mbs_copy_offset >= 0) && 12471 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12472 12473 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12474 12475 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12476 12477 /* Transfer the wmap (if any) to the shadow buf */ 12478 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12479 bsp->mbs_wmp = NULL; 12480 12481 /* 12482 * The shadow buf goes on from here in place of the 12483 * original buf. 12484 */ 12485 shadow_bsp->mbs_orig_bp = bp; 12486 bp = shadow_bp; 12487 } 12488 12489 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12490 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12491 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12492 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12493 request_bytes); 12494 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12495 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12496 12497 done: 12498 SD_NEXT_IOSTART(index, un, bp); 12499 12500 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12501 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12502 } 12503 12504 12505 /* 12506 * Function: sd_mapblocksize_iodone 12507 * 12508 * Description: Completion side processing for block-size mapping. 12509 * 12510 * Context: May be called under interrupt context 12511 */ 12512 12513 static void 12514 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12515 { 12516 struct sd_mapblocksize_info *bsp; 12517 struct sd_xbuf *xp; 12518 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12519 struct buf *orig_bp; /* ptr to the original buf */ 12520 offset_t shadow_end; 12521 offset_t request_end; 12522 offset_t shadow_start; 12523 ssize_t copy_offset; 12524 size_t copy_length; 12525 size_t shortfall; 12526 uint_t is_write; /* TRUE if this bp is a WRITE */ 12527 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12528 12529 ASSERT(un != NULL); 12530 ASSERT(bp != NULL); 12531 12532 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12533 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12534 12535 /* 12536 * There is no shadow buf or layer-private data if the target is 12537 * using un->un_sys_blocksize as its block size or if bcount == 0. 12538 */ 12539 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12540 (bp->b_bcount == 0)) { 12541 goto exit; 12542 } 12543 12544 xp = SD_GET_XBUF(bp); 12545 ASSERT(xp != NULL); 12546 12547 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12548 bsp = xp->xb_private; 12549 12550 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12551 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12552 12553 if (is_write) { 12554 /* 12555 * For a WRITE request we must free up the block range that 12556 * we have locked up. This holds regardless of whether this is 12557 * an aligned write request or a read-modify-write request. 12558 */ 12559 sd_range_unlock(un, bsp->mbs_wmp); 12560 bsp->mbs_wmp = NULL; 12561 } 12562 12563 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12564 /* 12565 * An aligned read or write command will have no shadow buf; 12566 * there is not much else to do with it. 12567 */ 12568 goto done; 12569 } 12570 12571 orig_bp = bsp->mbs_orig_bp; 12572 ASSERT(orig_bp != NULL); 12573 orig_xp = SD_GET_XBUF(orig_bp); 12574 ASSERT(orig_xp != NULL); 12575 ASSERT(!mutex_owned(SD_MUTEX(un))); 12576 12577 if (!is_write && has_wmap) { 12578 /* 12579 * A READ with a wmap means this is the READ phase of a 12580 * read-modify-write. If an error occurred on the READ then 12581 * we do not proceed with the WRITE phase or copy any data. 12582 * Just release the write maps and return with an error. 12583 */ 12584 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12585 orig_bp->b_resid = orig_bp->b_bcount; 12586 bioerror(orig_bp, bp->b_error); 12587 sd_range_unlock(un, bsp->mbs_wmp); 12588 goto freebuf_done; 12589 } 12590 } 12591 12592 /* 12593 * Here is where we set up to copy the data from the shadow buf 12594 * into the space associated with the original buf. 12595 * 12596 * To deal with the conversion between block sizes, these 12597 * computations treat the data as an array of bytes, with the 12598 * first byte (byte 0) corresponding to the first byte in the 12599 * first block on the disk. 12600 */ 12601 12602 /* 12603 * shadow_start and shadow_len indicate the location and size of 12604 * the data returned with the shadow IO request. 12605 */ 12606 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12607 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12608 12609 /* 12610 * copy_offset gives the offset (in bytes) from the start of the first 12611 * block of the READ request to the beginning of the data. We retrieve 12612 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12613 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12614 * data to be copied (in bytes). 12615 */ 12616 copy_offset = bsp->mbs_copy_offset; 12617 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12618 copy_length = orig_bp->b_bcount; 12619 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12620 12621 /* 12622 * Set up the resid and error fields of orig_bp as appropriate. 12623 */ 12624 if (shadow_end >= request_end) { 12625 /* We got all the requested data; set resid to zero */ 12626 orig_bp->b_resid = 0; 12627 } else { 12628 /* 12629 * We failed to get enough data to fully satisfy the original 12630 * request. Just copy back whatever data we got and set 12631 * up the residual and error code as required. 12632 * 12633 * 'shortfall' is the amount by which the data received with the 12634 * shadow buf has "fallen short" of the requested amount. 12635 */ 12636 shortfall = (size_t)(request_end - shadow_end); 12637 12638 if (shortfall > orig_bp->b_bcount) { 12639 /* 12640 * We did not get enough data to even partially 12641 * fulfill the original request. The residual is 12642 * equal to the amount requested. 12643 */ 12644 orig_bp->b_resid = orig_bp->b_bcount; 12645 } else { 12646 /* 12647 * We did not get all the data that we requested 12648 * from the device, but we will try to return what 12649 * portion we did get. 12650 */ 12651 orig_bp->b_resid = shortfall; 12652 } 12653 ASSERT(copy_length >= orig_bp->b_resid); 12654 copy_length -= orig_bp->b_resid; 12655 } 12656 12657 /* Propagate the error code from the shadow buf to the original buf */ 12658 bioerror(orig_bp, bp->b_error); 12659 12660 if (is_write) { 12661 goto freebuf_done; /* No data copying for a WRITE */ 12662 } 12663 12664 if (has_wmap) { 12665 /* 12666 * This is a READ command from the READ phase of a 12667 * read-modify-write request. We have to copy the data given 12668 * by the user OVER the data returned by the READ command, 12669 * then convert the command from a READ to a WRITE and send 12670 * it back to the target. 12671 */ 12672 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12673 copy_length); 12674 12675 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12676 12677 /* 12678 * Dispatch the WRITE command to the taskq thread, which 12679 * will in turn send the command to the target. When the 12680 * WRITE command completes, we (sd_mapblocksize_iodone()) 12681 * will get called again as part of the iodone chain 12682 * processing for it. Note that we will still be dealing 12683 * with the shadow buf at that point. 12684 */ 12685 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12686 KM_NOSLEEP) != 0) { 12687 /* 12688 * Dispatch was successful so we are done. Return 12689 * without going any higher up the iodone chain. Do 12690 * not free up any layer-private data until after the 12691 * WRITE completes. 12692 */ 12693 return; 12694 } 12695 12696 /* 12697 * Dispatch of the WRITE command failed; set up the error 12698 * condition and send this IO back up the iodone chain. 12699 */ 12700 bioerror(orig_bp, EIO); 12701 orig_bp->b_resid = orig_bp->b_bcount; 12702 12703 } else { 12704 /* 12705 * This is a regular READ request (ie, not a RMW). Copy the 12706 * data from the shadow buf into the original buf. The 12707 * copy_offset compensates for any "misalignment" between the 12708 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12709 * original buf (with its un->un_sys_blocksize blocks). 12710 */ 12711 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12712 copy_length); 12713 } 12714 12715 freebuf_done: 12716 12717 /* 12718 * At this point we still have both the shadow buf AND the original 12719 * buf to deal with, as well as the layer-private data area in each. 12720 * Local variables are as follows: 12721 * 12722 * bp -- points to shadow buf 12723 * xp -- points to xbuf of shadow buf 12724 * bsp -- points to layer-private data area of shadow buf 12725 * orig_bp -- points to original buf 12726 * 12727 * First free the shadow buf and its associated xbuf, then free the 12728 * layer-private data area from the shadow buf. There is no need to 12729 * restore xb_private in the shadow xbuf. 12730 */ 12731 sd_shadow_buf_free(bp); 12732 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12733 12734 /* 12735 * Now update the local variables to point to the original buf, xbuf, 12736 * and layer-private area. 12737 */ 12738 bp = orig_bp; 12739 xp = SD_GET_XBUF(bp); 12740 ASSERT(xp != NULL); 12741 ASSERT(xp == orig_xp); 12742 bsp = xp->xb_private; 12743 ASSERT(bsp != NULL); 12744 12745 done: 12746 /* 12747 * Restore xb_private to whatever it was set to by the next higher 12748 * layer in the chain, then free the layer-private data area. 12749 */ 12750 xp->xb_private = bsp->mbs_oprivate; 12751 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12752 12753 exit: 12754 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12755 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12756 12757 SD_NEXT_IODONE(index, un, bp); 12758 } 12759 12760 12761 /* 12762 * Function: sd_checksum_iostart 12763 * 12764 * Description: A stub function for a layer that's currently not used. 12765 * For now just a placeholder. 12766 * 12767 * Context: Kernel thread context 12768 */ 12769 12770 static void 12771 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12772 { 12773 ASSERT(un != NULL); 12774 ASSERT(bp != NULL); 12775 ASSERT(!mutex_owned(SD_MUTEX(un))); 12776 SD_NEXT_IOSTART(index, un, bp); 12777 } 12778 12779 12780 /* 12781 * Function: sd_checksum_iodone 12782 * 12783 * Description: A stub function for a layer that's currently not used. 12784 * For now just a placeholder. 12785 * 12786 * Context: May be called under interrupt context 12787 */ 12788 12789 static void 12790 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12791 { 12792 ASSERT(un != NULL); 12793 ASSERT(bp != NULL); 12794 ASSERT(!mutex_owned(SD_MUTEX(un))); 12795 SD_NEXT_IODONE(index, un, bp); 12796 } 12797 12798 12799 /* 12800 * Function: sd_checksum_uscsi_iostart 12801 * 12802 * Description: A stub function for a layer that's currently not used. 12803 * For now just a placeholder. 12804 * 12805 * Context: Kernel thread context 12806 */ 12807 12808 static void 12809 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12810 { 12811 ASSERT(un != NULL); 12812 ASSERT(bp != NULL); 12813 ASSERT(!mutex_owned(SD_MUTEX(un))); 12814 SD_NEXT_IOSTART(index, un, bp); 12815 } 12816 12817 12818 /* 12819 * Function: sd_checksum_uscsi_iodone 12820 * 12821 * Description: A stub function for a layer that's currently not used. 12822 * For now just a placeholder. 12823 * 12824 * Context: May be called under interrupt context 12825 */ 12826 12827 static void 12828 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12829 { 12830 ASSERT(un != NULL); 12831 ASSERT(bp != NULL); 12832 ASSERT(!mutex_owned(SD_MUTEX(un))); 12833 SD_NEXT_IODONE(index, un, bp); 12834 } 12835 12836 12837 /* 12838 * Function: sd_pm_iostart 12839 * 12840 * Description: iostart-side routine for Power mangement. 12841 * 12842 * Context: Kernel thread context 12843 */ 12844 12845 static void 12846 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12847 { 12848 ASSERT(un != NULL); 12849 ASSERT(bp != NULL); 12850 ASSERT(!mutex_owned(SD_MUTEX(un))); 12851 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12852 12853 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12854 12855 if (sd_pm_entry(un) != DDI_SUCCESS) { 12856 /* 12857 * Set up to return the failed buf back up the 'iodone' 12858 * side of the calling chain. 12859 */ 12860 bioerror(bp, EIO); 12861 bp->b_resid = bp->b_bcount; 12862 12863 SD_BEGIN_IODONE(index, un, bp); 12864 12865 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12866 return; 12867 } 12868 12869 SD_NEXT_IOSTART(index, un, bp); 12870 12871 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12872 } 12873 12874 12875 /* 12876 * Function: sd_pm_iodone 12877 * 12878 * Description: iodone-side routine for power mangement. 12879 * 12880 * Context: may be called from interrupt context 12881 */ 12882 12883 static void 12884 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12885 { 12886 ASSERT(un != NULL); 12887 ASSERT(bp != NULL); 12888 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12889 12890 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12891 12892 /* 12893 * After attach the following flag is only read, so don't 12894 * take the penalty of acquiring a mutex for it. 12895 */ 12896 if (un->un_f_pm_is_enabled == TRUE) { 12897 sd_pm_exit(un); 12898 } 12899 12900 SD_NEXT_IODONE(index, un, bp); 12901 12902 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12903 } 12904 12905 12906 /* 12907 * Function: sd_core_iostart 12908 * 12909 * Description: Primary driver function for enqueuing buf(9S) structs from 12910 * the system and initiating IO to the target device 12911 * 12912 * Context: Kernel thread context. Can sleep. 12913 * 12914 * Assumptions: - The given xp->xb_blkno is absolute 12915 * (ie, relative to the start of the device). 12916 * - The IO is to be done using the native blocksize of 12917 * the device, as specified in un->un_tgt_blocksize. 12918 */ 12919 /* ARGSUSED */ 12920 static void 12921 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12922 { 12923 struct sd_xbuf *xp; 12924 12925 ASSERT(un != NULL); 12926 ASSERT(bp != NULL); 12927 ASSERT(!mutex_owned(SD_MUTEX(un))); 12928 ASSERT(bp->b_resid == 0); 12929 12930 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12931 12932 xp = SD_GET_XBUF(bp); 12933 ASSERT(xp != NULL); 12934 12935 mutex_enter(SD_MUTEX(un)); 12936 12937 /* 12938 * If we are currently in the failfast state, fail any new IO 12939 * that has B_FAILFAST set, then return. 12940 */ 12941 if ((bp->b_flags & B_FAILFAST) && 12942 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12943 mutex_exit(SD_MUTEX(un)); 12944 bioerror(bp, EIO); 12945 bp->b_resid = bp->b_bcount; 12946 SD_BEGIN_IODONE(index, un, bp); 12947 return; 12948 } 12949 12950 if (SD_IS_DIRECT_PRIORITY(xp)) { 12951 /* 12952 * Priority command -- transport it immediately. 12953 * 12954 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12955 * because all direct priority commands should be associated 12956 * with error recovery actions which we don't want to retry. 12957 */ 12958 sd_start_cmds(un, bp); 12959 } else { 12960 /* 12961 * Normal command -- add it to the wait queue, then start 12962 * transporting commands from the wait queue. 12963 */ 12964 sd_add_buf_to_waitq(un, bp); 12965 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12966 sd_start_cmds(un, NULL); 12967 } 12968 12969 mutex_exit(SD_MUTEX(un)); 12970 12971 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12972 } 12973 12974 12975 /* 12976 * Function: sd_init_cdb_limits 12977 * 12978 * Description: This is to handle scsi_pkt initialization differences 12979 * between the driver platforms. 12980 * 12981 * Legacy behaviors: 12982 * 12983 * If the block number or the sector count exceeds the 12984 * capabilities of a Group 0 command, shift over to a 12985 * Group 1 command. We don't blindly use Group 1 12986 * commands because a) some drives (CDC Wren IVs) get a 12987 * bit confused, and b) there is probably a fair amount 12988 * of speed difference for a target to receive and decode 12989 * a 10 byte command instead of a 6 byte command. 12990 * 12991 * The xfer time difference of 6 vs 10 byte CDBs is 12992 * still significant so this code is still worthwhile. 12993 * 10 byte CDBs are very inefficient with the fas HBA driver 12994 * and older disks. Each CDB byte took 1 usec with some 12995 * popular disks. 12996 * 12997 * Context: Must be called at attach time 12998 */ 12999 13000 static void 13001 sd_init_cdb_limits(struct sd_lun *un) 13002 { 13003 /* 13004 * Use CDB_GROUP1 commands for most devices except for 13005 * parallel SCSI fixed drives in which case we get better 13006 * performance using CDB_GROUP0 commands (where applicable). 13007 */ 13008 un->un_mincdb = SD_CDB_GROUP1; 13009 #if !defined(__fibre) 13010 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13011 !ISREMOVABLE(un)) { 13012 un->un_mincdb = SD_CDB_GROUP0; 13013 } 13014 #endif 13015 13016 /* 13017 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13018 * commands for fixed disks unless we are building for a 32 bit 13019 * kernel. 13020 */ 13021 #ifdef _LP64 13022 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 13023 #else 13024 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 13025 #endif 13026 13027 /* 13028 * x86 systems require the PKT_DMA_PARTIAL flag 13029 */ 13030 #if defined(__x86) 13031 un->un_pkt_flags = PKT_DMA_PARTIAL; 13032 #else 13033 un->un_pkt_flags = 0; 13034 #endif 13035 13036 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13037 ? sizeof (struct scsi_arq_status) : 1); 13038 un->un_cmd_timeout = (ushort_t)sd_io_time; 13039 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13040 } 13041 13042 13043 /* 13044 * Function: sd_initpkt_for_buf 13045 * 13046 * Description: Allocate and initialize for transport a scsi_pkt struct, 13047 * based upon the info specified in the given buf struct. 13048 * 13049 * Assumes the xb_blkno in the request is absolute (ie, 13050 * relative to the start of the device (NOT partition!). 13051 * Also assumes that the request is using the native block 13052 * size of the device (as returned by the READ CAPACITY 13053 * command). 13054 * 13055 * Return Code: SD_PKT_ALLOC_SUCCESS 13056 * SD_PKT_ALLOC_FAILURE 13057 * SD_PKT_ALLOC_FAILURE_NO_DMA 13058 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13059 * 13060 * Context: Kernel thread and may be called from software interrupt context 13061 * as part of a sdrunout callback. This function may not block or 13062 * call routines that block 13063 */ 13064 13065 static int 13066 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13067 { 13068 struct sd_xbuf *xp; 13069 struct scsi_pkt *pktp = NULL; 13070 struct sd_lun *un; 13071 size_t blockcount; 13072 daddr_t startblock; 13073 int rval; 13074 int cmd_flags; 13075 13076 ASSERT(bp != NULL); 13077 ASSERT(pktpp != NULL); 13078 xp = SD_GET_XBUF(bp); 13079 ASSERT(xp != NULL); 13080 un = SD_GET_UN(bp); 13081 ASSERT(un != NULL); 13082 ASSERT(mutex_owned(SD_MUTEX(un))); 13083 ASSERT(bp->b_resid == 0); 13084 13085 SD_TRACE(SD_LOG_IO_CORE, un, 13086 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13087 13088 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13089 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13090 /* 13091 * Already have a scsi_pkt -- just need DMA resources. 13092 * We must recompute the CDB in case the mapping returns 13093 * a nonzero pkt_resid. 13094 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13095 * that is being retried, the unmap/remap of the DMA resouces 13096 * will result in the entire transfer starting over again 13097 * from the very first block. 13098 */ 13099 ASSERT(xp->xb_pktp != NULL); 13100 pktp = xp->xb_pktp; 13101 } else { 13102 pktp = NULL; 13103 } 13104 #endif /* __i386 || __amd64 */ 13105 13106 startblock = xp->xb_blkno; /* Absolute block num. */ 13107 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13108 13109 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13110 13111 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13112 13113 #else 13114 13115 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 13116 13117 #endif 13118 13119 /* 13120 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13121 * call scsi_init_pkt, and build the CDB. 13122 */ 13123 rval = sd_setup_rw_pkt(un, &pktp, bp, 13124 cmd_flags, sdrunout, (caddr_t)un, 13125 startblock, blockcount); 13126 13127 if (rval == 0) { 13128 /* 13129 * Success. 13130 * 13131 * If partial DMA is being used and required for this transfer. 13132 * set it up here. 13133 */ 13134 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13135 (pktp->pkt_resid != 0)) { 13136 13137 /* 13138 * Save the CDB length and pkt_resid for the 13139 * next xfer 13140 */ 13141 xp->xb_dma_resid = pktp->pkt_resid; 13142 13143 /* rezero resid */ 13144 pktp->pkt_resid = 0; 13145 13146 } else { 13147 xp->xb_dma_resid = 0; 13148 } 13149 13150 pktp->pkt_flags = un->un_tagflags; 13151 pktp->pkt_time = un->un_cmd_timeout; 13152 pktp->pkt_comp = sdintr; 13153 13154 pktp->pkt_private = bp; 13155 *pktpp = pktp; 13156 13157 SD_TRACE(SD_LOG_IO_CORE, un, 13158 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13159 13160 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13161 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13162 #endif 13163 13164 return (SD_PKT_ALLOC_SUCCESS); 13165 13166 } 13167 13168 /* 13169 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13170 * from sd_setup_rw_pkt. 13171 */ 13172 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13173 13174 if (rval == SD_PKT_ALLOC_FAILURE) { 13175 *pktpp = NULL; 13176 /* 13177 * Set the driver state to RWAIT to indicate the driver 13178 * is waiting on resource allocations. The driver will not 13179 * suspend, pm_suspend, or detatch while the state is RWAIT. 13180 */ 13181 New_state(un, SD_STATE_RWAIT); 13182 13183 SD_ERROR(SD_LOG_IO_CORE, un, 13184 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13185 13186 if ((bp->b_flags & B_ERROR) != 0) { 13187 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13188 } 13189 return (SD_PKT_ALLOC_FAILURE); 13190 } else { 13191 /* 13192 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13193 * 13194 * This should never happen. Maybe someone messed with the 13195 * kernel's minphys? 13196 */ 13197 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13198 "Request rejected: too large for CDB: " 13199 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13200 SD_ERROR(SD_LOG_IO_CORE, un, 13201 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13202 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13203 13204 } 13205 } 13206 13207 13208 /* 13209 * Function: sd_destroypkt_for_buf 13210 * 13211 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13212 * 13213 * Context: Kernel thread or interrupt context 13214 */ 13215 13216 static void 13217 sd_destroypkt_for_buf(struct buf *bp) 13218 { 13219 ASSERT(bp != NULL); 13220 ASSERT(SD_GET_UN(bp) != NULL); 13221 13222 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13223 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13224 13225 ASSERT(SD_GET_PKTP(bp) != NULL); 13226 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13227 13228 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13229 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13230 } 13231 13232 /* 13233 * Function: sd_setup_rw_pkt 13234 * 13235 * Description: Determines appropriate CDB group for the requested LBA 13236 * and transfer length, calls scsi_init_pkt, and builds 13237 * the CDB. Do not use for partial DMA transfers except 13238 * for the initial transfer since the CDB size must 13239 * remain constant. 13240 * 13241 * Context: Kernel thread and may be called from software interrupt 13242 * context as part of a sdrunout callback. This function may not 13243 * block or call routines that block 13244 */ 13245 13246 13247 int 13248 sd_setup_rw_pkt(struct sd_lun *un, 13249 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13250 int (*callback)(caddr_t), caddr_t callback_arg, 13251 diskaddr_t lba, uint32_t blockcount) 13252 { 13253 struct scsi_pkt *return_pktp; 13254 union scsi_cdb *cdbp; 13255 struct sd_cdbinfo *cp = NULL; 13256 int i; 13257 13258 /* 13259 * See which size CDB to use, based upon the request. 13260 */ 13261 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13262 13263 /* 13264 * Check lba and block count against sd_cdbtab limits. 13265 * In the partial DMA case, we have to use the same size 13266 * CDB for all the transfers. Check lba + blockcount 13267 * against the max LBA so we know that segment of the 13268 * transfer can use the CDB we select. 13269 */ 13270 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13271 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13272 13273 /* 13274 * The command will fit into the CDB type 13275 * specified by sd_cdbtab[i]. 13276 */ 13277 cp = sd_cdbtab + i; 13278 13279 /* 13280 * Call scsi_init_pkt so we can fill in the 13281 * CDB. 13282 */ 13283 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13284 bp, cp->sc_grpcode, un->un_status_len, 0, 13285 flags, callback, callback_arg); 13286 13287 if (return_pktp != NULL) { 13288 13289 /* 13290 * Return new value of pkt 13291 */ 13292 *pktpp = return_pktp; 13293 13294 /* 13295 * To be safe, zero the CDB insuring there is 13296 * no leftover data from a previous command. 13297 */ 13298 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13299 13300 /* 13301 * Handle partial DMA mapping 13302 */ 13303 if (return_pktp->pkt_resid != 0) { 13304 13305 /* 13306 * Not going to xfer as many blocks as 13307 * originally expected 13308 */ 13309 blockcount -= 13310 SD_BYTES2TGTBLOCKS(un, 13311 return_pktp->pkt_resid); 13312 } 13313 13314 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13315 13316 /* 13317 * Set command byte based on the CDB 13318 * type we matched. 13319 */ 13320 cdbp->scc_cmd = cp->sc_grpmask | 13321 ((bp->b_flags & B_READ) ? 13322 SCMD_READ : SCMD_WRITE); 13323 13324 SD_FILL_SCSI1_LUN(un, return_pktp); 13325 13326 /* 13327 * Fill in LBA and length 13328 */ 13329 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13330 (cp->sc_grpcode == CDB_GROUP4) || 13331 (cp->sc_grpcode == CDB_GROUP0) || 13332 (cp->sc_grpcode == CDB_GROUP5)); 13333 13334 if (cp->sc_grpcode == CDB_GROUP1) { 13335 FORMG1ADDR(cdbp, lba); 13336 FORMG1COUNT(cdbp, blockcount); 13337 return (0); 13338 } else if (cp->sc_grpcode == CDB_GROUP4) { 13339 FORMG4LONGADDR(cdbp, lba); 13340 FORMG4COUNT(cdbp, blockcount); 13341 return (0); 13342 } else if (cp->sc_grpcode == CDB_GROUP0) { 13343 FORMG0ADDR(cdbp, lba); 13344 FORMG0COUNT(cdbp, blockcount); 13345 return (0); 13346 } else if (cp->sc_grpcode == CDB_GROUP5) { 13347 FORMG5ADDR(cdbp, lba); 13348 FORMG5COUNT(cdbp, blockcount); 13349 return (0); 13350 } 13351 13352 /* 13353 * It should be impossible to not match one 13354 * of the CDB types above, so we should never 13355 * reach this point. Set the CDB command byte 13356 * to test-unit-ready to avoid writing 13357 * to somewhere we don't intend. 13358 */ 13359 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13360 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13361 } else { 13362 /* 13363 * Couldn't get scsi_pkt 13364 */ 13365 return (SD_PKT_ALLOC_FAILURE); 13366 } 13367 } 13368 } 13369 13370 /* 13371 * None of the available CDB types were suitable. This really 13372 * should never happen: on a 64 bit system we support 13373 * READ16/WRITE16 which will hold an entire 64 bit disk address 13374 * and on a 32 bit system we will refuse to bind to a device 13375 * larger than 2TB so addresses will never be larger than 32 bits. 13376 */ 13377 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13378 } 13379 13380 #if defined(__i386) || defined(__amd64) 13381 /* 13382 * Function: sd_setup_next_rw_pkt 13383 * 13384 * Description: Setup packet for partial DMA transfers, except for the 13385 * initial transfer. sd_setup_rw_pkt should be used for 13386 * the initial transfer. 13387 * 13388 * Context: Kernel thread and may be called from interrupt context. 13389 */ 13390 13391 int 13392 sd_setup_next_rw_pkt(struct sd_lun *un, 13393 struct scsi_pkt *pktp, struct buf *bp, 13394 diskaddr_t lba, uint32_t blockcount) 13395 { 13396 uchar_t com; 13397 union scsi_cdb *cdbp; 13398 uchar_t cdb_group_id; 13399 13400 ASSERT(pktp != NULL); 13401 ASSERT(pktp->pkt_cdbp != NULL); 13402 13403 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13404 com = cdbp->scc_cmd; 13405 cdb_group_id = CDB_GROUPID(com); 13406 13407 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13408 (cdb_group_id == CDB_GROUPID_1) || 13409 (cdb_group_id == CDB_GROUPID_4) || 13410 (cdb_group_id == CDB_GROUPID_5)); 13411 13412 /* 13413 * Move pkt to the next portion of the xfer. 13414 * func is NULL_FUNC so we do not have to release 13415 * the disk mutex here. 13416 */ 13417 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13418 NULL_FUNC, NULL) == pktp) { 13419 /* Success. Handle partial DMA */ 13420 if (pktp->pkt_resid != 0) { 13421 blockcount -= 13422 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13423 } 13424 13425 cdbp->scc_cmd = com; 13426 SD_FILL_SCSI1_LUN(un, pktp); 13427 if (cdb_group_id == CDB_GROUPID_1) { 13428 FORMG1ADDR(cdbp, lba); 13429 FORMG1COUNT(cdbp, blockcount); 13430 return (0); 13431 } else if (cdb_group_id == CDB_GROUPID_4) { 13432 FORMG4LONGADDR(cdbp, lba); 13433 FORMG4COUNT(cdbp, blockcount); 13434 return (0); 13435 } else if (cdb_group_id == CDB_GROUPID_0) { 13436 FORMG0ADDR(cdbp, lba); 13437 FORMG0COUNT(cdbp, blockcount); 13438 return (0); 13439 } else if (cdb_group_id == CDB_GROUPID_5) { 13440 FORMG5ADDR(cdbp, lba); 13441 FORMG5COUNT(cdbp, blockcount); 13442 return (0); 13443 } 13444 13445 /* Unreachable */ 13446 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13447 } 13448 13449 /* 13450 * Error setting up next portion of cmd transfer. 13451 * Something is definitely very wrong and this 13452 * should not happen. 13453 */ 13454 return (SD_PKT_ALLOC_FAILURE); 13455 } 13456 #endif /* defined(__i386) || defined(__amd64) */ 13457 13458 /* 13459 * Function: sd_initpkt_for_uscsi 13460 * 13461 * Description: Allocate and initialize for transport a scsi_pkt struct, 13462 * based upon the info specified in the given uscsi_cmd struct. 13463 * 13464 * Return Code: SD_PKT_ALLOC_SUCCESS 13465 * SD_PKT_ALLOC_FAILURE 13466 * SD_PKT_ALLOC_FAILURE_NO_DMA 13467 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13468 * 13469 * Context: Kernel thread and may be called from software interrupt context 13470 * as part of a sdrunout callback. This function may not block or 13471 * call routines that block 13472 */ 13473 13474 static int 13475 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13476 { 13477 struct uscsi_cmd *uscmd; 13478 struct sd_xbuf *xp; 13479 struct scsi_pkt *pktp; 13480 struct sd_lun *un; 13481 uint32_t flags = 0; 13482 13483 ASSERT(bp != NULL); 13484 ASSERT(pktpp != NULL); 13485 xp = SD_GET_XBUF(bp); 13486 ASSERT(xp != NULL); 13487 un = SD_GET_UN(bp); 13488 ASSERT(un != NULL); 13489 ASSERT(mutex_owned(SD_MUTEX(un))); 13490 13491 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13492 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13493 ASSERT(uscmd != NULL); 13494 13495 SD_TRACE(SD_LOG_IO_CORE, un, 13496 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13497 13498 /* 13499 * Allocate the scsi_pkt for the command. 13500 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13501 * during scsi_init_pkt time and will continue to use the 13502 * same path as long as the same scsi_pkt is used without 13503 * intervening scsi_dma_free(). Since uscsi command does 13504 * not call scsi_dmafree() before retry failed command, it 13505 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13506 * set such that scsi_vhci can use other available path for 13507 * retry. Besides, ucsci command does not allow DMA breakup, 13508 * so there is no need to set PKT_DMA_PARTIAL flag. 13509 */ 13510 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13511 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13512 sizeof (struct scsi_arq_status), 0, 13513 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13514 sdrunout, (caddr_t)un); 13515 13516 if (pktp == NULL) { 13517 *pktpp = NULL; 13518 /* 13519 * Set the driver state to RWAIT to indicate the driver 13520 * is waiting on resource allocations. The driver will not 13521 * suspend, pm_suspend, or detatch while the state is RWAIT. 13522 */ 13523 New_state(un, SD_STATE_RWAIT); 13524 13525 SD_ERROR(SD_LOG_IO_CORE, un, 13526 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13527 13528 if ((bp->b_flags & B_ERROR) != 0) { 13529 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13530 } 13531 return (SD_PKT_ALLOC_FAILURE); 13532 } 13533 13534 /* 13535 * We do not do DMA breakup for USCSI commands, so return failure 13536 * here if all the needed DMA resources were not allocated. 13537 */ 13538 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13539 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13540 scsi_destroy_pkt(pktp); 13541 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13542 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13543 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13544 } 13545 13546 /* Init the cdb from the given uscsi struct */ 13547 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13548 uscmd->uscsi_cdb[0], 0, 0, 0); 13549 13550 SD_FILL_SCSI1_LUN(un, pktp); 13551 13552 /* 13553 * Set up the optional USCSI flags. See the uscsi (7I) man page 13554 * for listing of the supported flags. 13555 */ 13556 13557 if (uscmd->uscsi_flags & USCSI_SILENT) { 13558 flags |= FLAG_SILENT; 13559 } 13560 13561 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13562 flags |= FLAG_DIAGNOSE; 13563 } 13564 13565 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13566 flags |= FLAG_ISOLATE; 13567 } 13568 13569 if (un->un_f_is_fibre == FALSE) { 13570 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13571 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13572 } 13573 } 13574 13575 /* 13576 * Set the pkt flags here so we save time later. 13577 * Note: These flags are NOT in the uscsi man page!!! 13578 */ 13579 if (uscmd->uscsi_flags & USCSI_HEAD) { 13580 flags |= FLAG_HEAD; 13581 } 13582 13583 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13584 flags |= FLAG_NOINTR; 13585 } 13586 13587 /* 13588 * For tagged queueing, things get a bit complicated. 13589 * Check first for head of queue and last for ordered queue. 13590 * If neither head nor order, use the default driver tag flags. 13591 */ 13592 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13593 if (uscmd->uscsi_flags & USCSI_HTAG) { 13594 flags |= FLAG_HTAG; 13595 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13596 flags |= FLAG_OTAG; 13597 } else { 13598 flags |= un->un_tagflags & FLAG_TAGMASK; 13599 } 13600 } 13601 13602 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13603 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13604 } 13605 13606 pktp->pkt_flags = flags; 13607 13608 /* Copy the caller's CDB into the pkt... */ 13609 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13610 13611 if (uscmd->uscsi_timeout == 0) { 13612 pktp->pkt_time = un->un_uscsi_timeout; 13613 } else { 13614 pktp->pkt_time = uscmd->uscsi_timeout; 13615 } 13616 13617 /* need it later to identify USCSI request in sdintr */ 13618 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13619 13620 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13621 13622 pktp->pkt_private = bp; 13623 pktp->pkt_comp = sdintr; 13624 *pktpp = pktp; 13625 13626 SD_TRACE(SD_LOG_IO_CORE, un, 13627 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13628 13629 return (SD_PKT_ALLOC_SUCCESS); 13630 } 13631 13632 13633 /* 13634 * Function: sd_destroypkt_for_uscsi 13635 * 13636 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13637 * IOs.. Also saves relevant info into the associated uscsi_cmd 13638 * struct. 13639 * 13640 * Context: May be called under interrupt context 13641 */ 13642 13643 static void 13644 sd_destroypkt_for_uscsi(struct buf *bp) 13645 { 13646 struct uscsi_cmd *uscmd; 13647 struct sd_xbuf *xp; 13648 struct scsi_pkt *pktp; 13649 struct sd_lun *un; 13650 13651 ASSERT(bp != NULL); 13652 xp = SD_GET_XBUF(bp); 13653 ASSERT(xp != NULL); 13654 un = SD_GET_UN(bp); 13655 ASSERT(un != NULL); 13656 ASSERT(!mutex_owned(SD_MUTEX(un))); 13657 pktp = SD_GET_PKTP(bp); 13658 ASSERT(pktp != NULL); 13659 13660 SD_TRACE(SD_LOG_IO_CORE, un, 13661 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13662 13663 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13664 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13665 ASSERT(uscmd != NULL); 13666 13667 /* Save the status and the residual into the uscsi_cmd struct */ 13668 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13669 uscmd->uscsi_resid = bp->b_resid; 13670 13671 /* 13672 * If enabled, copy any saved sense data into the area specified 13673 * by the uscsi command. 13674 */ 13675 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13676 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13677 /* 13678 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13679 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13680 */ 13681 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13682 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13683 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13684 } 13685 13686 /* We are done with the scsi_pkt; free it now */ 13687 ASSERT(SD_GET_PKTP(bp) != NULL); 13688 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13689 13690 SD_TRACE(SD_LOG_IO_CORE, un, 13691 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13692 } 13693 13694 13695 /* 13696 * Function: sd_bioclone_alloc 13697 * 13698 * Description: Allocate a buf(9S) and init it as per the given buf 13699 * and the various arguments. The associated sd_xbuf 13700 * struct is (nearly) duplicated. The struct buf *bp 13701 * argument is saved in new_xp->xb_private. 13702 * 13703 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13704 * datalen - size of data area for the shadow bp 13705 * blkno - starting LBA 13706 * func - function pointer for b_iodone in the shadow buf. (May 13707 * be NULL if none.) 13708 * 13709 * Return Code: Pointer to allocates buf(9S) struct 13710 * 13711 * Context: Can sleep. 13712 */ 13713 13714 static struct buf * 13715 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13716 daddr_t blkno, int (*func)(struct buf *)) 13717 { 13718 struct sd_lun *un; 13719 struct sd_xbuf *xp; 13720 struct sd_xbuf *new_xp; 13721 struct buf *new_bp; 13722 13723 ASSERT(bp != NULL); 13724 xp = SD_GET_XBUF(bp); 13725 ASSERT(xp != NULL); 13726 un = SD_GET_UN(bp); 13727 ASSERT(un != NULL); 13728 ASSERT(!mutex_owned(SD_MUTEX(un))); 13729 13730 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13731 NULL, KM_SLEEP); 13732 13733 new_bp->b_lblkno = blkno; 13734 13735 /* 13736 * Allocate an xbuf for the shadow bp and copy the contents of the 13737 * original xbuf into it. 13738 */ 13739 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13740 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13741 13742 /* 13743 * The given bp is automatically saved in the xb_private member 13744 * of the new xbuf. Callers are allowed to depend on this. 13745 */ 13746 new_xp->xb_private = bp; 13747 13748 new_bp->b_private = new_xp; 13749 13750 return (new_bp); 13751 } 13752 13753 /* 13754 * Function: sd_shadow_buf_alloc 13755 * 13756 * Description: Allocate a buf(9S) and init it as per the given buf 13757 * and the various arguments. The associated sd_xbuf 13758 * struct is (nearly) duplicated. The struct buf *bp 13759 * argument is saved in new_xp->xb_private. 13760 * 13761 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13762 * datalen - size of data area for the shadow bp 13763 * bflags - B_READ or B_WRITE (pseudo flag) 13764 * blkno - starting LBA 13765 * func - function pointer for b_iodone in the shadow buf. (May 13766 * be NULL if none.) 13767 * 13768 * Return Code: Pointer to allocates buf(9S) struct 13769 * 13770 * Context: Can sleep. 13771 */ 13772 13773 static struct buf * 13774 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13775 daddr_t blkno, int (*func)(struct buf *)) 13776 { 13777 struct sd_lun *un; 13778 struct sd_xbuf *xp; 13779 struct sd_xbuf *new_xp; 13780 struct buf *new_bp; 13781 13782 ASSERT(bp != NULL); 13783 xp = SD_GET_XBUF(bp); 13784 ASSERT(xp != NULL); 13785 un = SD_GET_UN(bp); 13786 ASSERT(un != NULL); 13787 ASSERT(!mutex_owned(SD_MUTEX(un))); 13788 13789 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13790 bp_mapin(bp); 13791 } 13792 13793 bflags &= (B_READ | B_WRITE); 13794 #if defined(__i386) || defined(__amd64) 13795 new_bp = getrbuf(KM_SLEEP); 13796 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13797 new_bp->b_bcount = datalen; 13798 new_bp->b_flags = bp->b_flags | bflags; 13799 #else 13800 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13801 datalen, bflags, SLEEP_FUNC, NULL); 13802 #endif 13803 new_bp->av_forw = NULL; 13804 new_bp->av_back = NULL; 13805 new_bp->b_dev = bp->b_dev; 13806 new_bp->b_blkno = blkno; 13807 new_bp->b_iodone = func; 13808 new_bp->b_edev = bp->b_edev; 13809 new_bp->b_resid = 0; 13810 13811 /* We need to preserve the B_FAILFAST flag */ 13812 if (bp->b_flags & B_FAILFAST) { 13813 new_bp->b_flags |= B_FAILFAST; 13814 } 13815 13816 /* 13817 * Allocate an xbuf for the shadow bp and copy the contents of the 13818 * original xbuf into it. 13819 */ 13820 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13821 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13822 13823 /* Need later to copy data between the shadow buf & original buf! */ 13824 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13825 13826 /* 13827 * The given bp is automatically saved in the xb_private member 13828 * of the new xbuf. Callers are allowed to depend on this. 13829 */ 13830 new_xp->xb_private = bp; 13831 13832 new_bp->b_private = new_xp; 13833 13834 return (new_bp); 13835 } 13836 13837 /* 13838 * Function: sd_bioclone_free 13839 * 13840 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13841 * in the larger than partition operation. 13842 * 13843 * Context: May be called under interrupt context 13844 */ 13845 13846 static void 13847 sd_bioclone_free(struct buf *bp) 13848 { 13849 struct sd_xbuf *xp; 13850 13851 ASSERT(bp != NULL); 13852 xp = SD_GET_XBUF(bp); 13853 ASSERT(xp != NULL); 13854 13855 /* 13856 * Call bp_mapout() before freeing the buf, in case a lower 13857 * layer or HBA had done a bp_mapin(). we must do this here 13858 * as we are the "originator" of the shadow buf. 13859 */ 13860 bp_mapout(bp); 13861 13862 /* 13863 * Null out b_iodone before freeing the bp, to ensure that the driver 13864 * never gets confused by a stale value in this field. (Just a little 13865 * extra defensiveness here.) 13866 */ 13867 bp->b_iodone = NULL; 13868 13869 freerbuf(bp); 13870 13871 kmem_free(xp, sizeof (struct sd_xbuf)); 13872 } 13873 13874 /* 13875 * Function: sd_shadow_buf_free 13876 * 13877 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13878 * 13879 * Context: May be called under interrupt context 13880 */ 13881 13882 static void 13883 sd_shadow_buf_free(struct buf *bp) 13884 { 13885 struct sd_xbuf *xp; 13886 13887 ASSERT(bp != NULL); 13888 xp = SD_GET_XBUF(bp); 13889 ASSERT(xp != NULL); 13890 13891 #if defined(__sparc) 13892 /* 13893 * Call bp_mapout() before freeing the buf, in case a lower 13894 * layer or HBA had done a bp_mapin(). we must do this here 13895 * as we are the "originator" of the shadow buf. 13896 */ 13897 bp_mapout(bp); 13898 #endif 13899 13900 /* 13901 * Null out b_iodone before freeing the bp, to ensure that the driver 13902 * never gets confused by a stale value in this field. (Just a little 13903 * extra defensiveness here.) 13904 */ 13905 bp->b_iodone = NULL; 13906 13907 #if defined(__i386) || defined(__amd64) 13908 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13909 freerbuf(bp); 13910 #else 13911 scsi_free_consistent_buf(bp); 13912 #endif 13913 13914 kmem_free(xp, sizeof (struct sd_xbuf)); 13915 } 13916 13917 13918 /* 13919 * Function: sd_print_transport_rejected_message 13920 * 13921 * Description: This implements the ludicrously complex rules for printing 13922 * a "transport rejected" message. This is to address the 13923 * specific problem of having a flood of this error message 13924 * produced when a failover occurs. 13925 * 13926 * Context: Any. 13927 */ 13928 13929 static void 13930 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13931 int code) 13932 { 13933 ASSERT(un != NULL); 13934 ASSERT(mutex_owned(SD_MUTEX(un))); 13935 ASSERT(xp != NULL); 13936 13937 /* 13938 * Print the "transport rejected" message under the following 13939 * conditions: 13940 * 13941 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13942 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13943 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13944 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13945 * scsi_transport(9F) (which indicates that the target might have 13946 * gone off-line). This uses the un->un_tran_fatal_count 13947 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13948 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13949 * from scsi_transport(). 13950 * 13951 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13952 * the preceeding cases in order for the message to be printed. 13953 */ 13954 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13955 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13956 (code != TRAN_FATAL_ERROR) || 13957 (un->un_tran_fatal_count == 1)) { 13958 switch (code) { 13959 case TRAN_BADPKT: 13960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13961 "transport rejected bad packet\n"); 13962 break; 13963 case TRAN_FATAL_ERROR: 13964 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13965 "transport rejected fatal error\n"); 13966 break; 13967 default: 13968 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13969 "transport rejected (%d)\n", code); 13970 break; 13971 } 13972 } 13973 } 13974 } 13975 13976 13977 /* 13978 * Function: sd_add_buf_to_waitq 13979 * 13980 * Description: Add the given buf(9S) struct to the wait queue for the 13981 * instance. If sorting is enabled, then the buf is added 13982 * to the queue via an elevator sort algorithm (a la 13983 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13984 * If sorting is not enabled, then the buf is just added 13985 * to the end of the wait queue. 13986 * 13987 * Return Code: void 13988 * 13989 * Context: Does not sleep/block, therefore technically can be called 13990 * from any context. However if sorting is enabled then the 13991 * execution time is indeterminate, and may take long if 13992 * the wait queue grows large. 13993 */ 13994 13995 static void 13996 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13997 { 13998 struct buf *ap; 13999 14000 ASSERT(bp != NULL); 14001 ASSERT(un != NULL); 14002 ASSERT(mutex_owned(SD_MUTEX(un))); 14003 14004 /* If the queue is empty, add the buf as the only entry & return. */ 14005 if (un->un_waitq_headp == NULL) { 14006 ASSERT(un->un_waitq_tailp == NULL); 14007 un->un_waitq_headp = un->un_waitq_tailp = bp; 14008 bp->av_forw = NULL; 14009 return; 14010 } 14011 14012 ASSERT(un->un_waitq_tailp != NULL); 14013 14014 /* 14015 * If sorting is disabled, just add the buf to the tail end of 14016 * the wait queue and return. 14017 */ 14018 if (un->un_f_disksort_disabled) { 14019 un->un_waitq_tailp->av_forw = bp; 14020 un->un_waitq_tailp = bp; 14021 bp->av_forw = NULL; 14022 return; 14023 } 14024 14025 /* 14026 * Sort thru the list of requests currently on the wait queue 14027 * and add the new buf request at the appropriate position. 14028 * 14029 * The un->un_waitq_headp is an activity chain pointer on which 14030 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14031 * first queue holds those requests which are positioned after 14032 * the current SD_GET_BLKNO() (in the first request); the second holds 14033 * requests which came in after their SD_GET_BLKNO() number was passed. 14034 * Thus we implement a one way scan, retracting after reaching 14035 * the end of the drive to the first request on the second 14036 * queue, at which time it becomes the first queue. 14037 * A one-way scan is natural because of the way UNIX read-ahead 14038 * blocks are allocated. 14039 * 14040 * If we lie after the first request, then we must locate the 14041 * second request list and add ourselves to it. 14042 */ 14043 ap = un->un_waitq_headp; 14044 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14045 while (ap->av_forw != NULL) { 14046 /* 14047 * Look for an "inversion" in the (normally 14048 * ascending) block numbers. This indicates 14049 * the start of the second request list. 14050 */ 14051 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14052 /* 14053 * Search the second request list for the 14054 * first request at a larger block number. 14055 * We go before that; however if there is 14056 * no such request, we go at the end. 14057 */ 14058 do { 14059 if (SD_GET_BLKNO(bp) < 14060 SD_GET_BLKNO(ap->av_forw)) { 14061 goto insert; 14062 } 14063 ap = ap->av_forw; 14064 } while (ap->av_forw != NULL); 14065 goto insert; /* after last */ 14066 } 14067 ap = ap->av_forw; 14068 } 14069 14070 /* 14071 * No inversions... we will go after the last, and 14072 * be the first request in the second request list. 14073 */ 14074 goto insert; 14075 } 14076 14077 /* 14078 * Request is at/after the current request... 14079 * sort in the first request list. 14080 */ 14081 while (ap->av_forw != NULL) { 14082 /* 14083 * We want to go after the current request (1) if 14084 * there is an inversion after it (i.e. it is the end 14085 * of the first request list), or (2) if the next 14086 * request is a larger block no. than our request. 14087 */ 14088 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14089 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14090 goto insert; 14091 } 14092 ap = ap->av_forw; 14093 } 14094 14095 /* 14096 * Neither a second list nor a larger request, therefore 14097 * we go at the end of the first list (which is the same 14098 * as the end of the whole schebang). 14099 */ 14100 insert: 14101 bp->av_forw = ap->av_forw; 14102 ap->av_forw = bp; 14103 14104 /* 14105 * If we inserted onto the tail end of the waitq, make sure the 14106 * tail pointer is updated. 14107 */ 14108 if (ap == un->un_waitq_tailp) { 14109 un->un_waitq_tailp = bp; 14110 } 14111 } 14112 14113 14114 /* 14115 * Function: sd_start_cmds 14116 * 14117 * Description: Remove and transport cmds from the driver queues. 14118 * 14119 * Arguments: un - pointer to the unit (soft state) struct for the target. 14120 * 14121 * immed_bp - ptr to a buf to be transported immediately. Only 14122 * the immed_bp is transported; bufs on the waitq are not 14123 * processed and the un_retry_bp is not checked. If immed_bp is 14124 * NULL, then normal queue processing is performed. 14125 * 14126 * Context: May be called from kernel thread context, interrupt context, 14127 * or runout callback context. This function may not block or 14128 * call routines that block. 14129 */ 14130 14131 static void 14132 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14133 { 14134 struct sd_xbuf *xp; 14135 struct buf *bp; 14136 void (*statp)(kstat_io_t *); 14137 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14138 void (*saved_statp)(kstat_io_t *); 14139 #endif 14140 int rval; 14141 14142 ASSERT(un != NULL); 14143 ASSERT(mutex_owned(SD_MUTEX(un))); 14144 ASSERT(un->un_ncmds_in_transport >= 0); 14145 ASSERT(un->un_throttle >= 0); 14146 14147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14148 14149 do { 14150 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14151 saved_statp = NULL; 14152 #endif 14153 14154 /* 14155 * If we are syncing or dumping, fail the command to 14156 * avoid recursively calling back into scsi_transport(). 14157 * The dump I/O itself uses a separate code path so this 14158 * only prevents non-dump I/O from being sent while dumping. 14159 * File system sync takes place before dumping begins. 14160 * During panic, filesystem I/O is allowed provided 14161 * un_in_callback is <= 1. This is to prevent recursion 14162 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14163 * sd_start_cmds and so on. See panic.c for more information 14164 * about the states the system can be in during panic. 14165 */ 14166 if ((un->un_state == SD_STATE_DUMPING) || 14167 (ddi_in_panic() && (un->un_in_callback > 1))) { 14168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14169 "sd_start_cmds: panicking\n"); 14170 goto exit; 14171 } 14172 14173 if ((bp = immed_bp) != NULL) { 14174 /* 14175 * We have a bp that must be transported immediately. 14176 * It's OK to transport the immed_bp here without doing 14177 * the throttle limit check because the immed_bp is 14178 * always used in a retry/recovery case. This means 14179 * that we know we are not at the throttle limit by 14180 * virtue of the fact that to get here we must have 14181 * already gotten a command back via sdintr(). This also 14182 * relies on (1) the command on un_retry_bp preventing 14183 * further commands from the waitq from being issued; 14184 * and (2) the code in sd_retry_command checking the 14185 * throttle limit before issuing a delayed or immediate 14186 * retry. This holds even if the throttle limit is 14187 * currently ratcheted down from its maximum value. 14188 */ 14189 statp = kstat_runq_enter; 14190 if (bp == un->un_retry_bp) { 14191 ASSERT((un->un_retry_statp == NULL) || 14192 (un->un_retry_statp == kstat_waitq_enter) || 14193 (un->un_retry_statp == 14194 kstat_runq_back_to_waitq)); 14195 /* 14196 * If the waitq kstat was incremented when 14197 * sd_set_retry_bp() queued this bp for a retry, 14198 * then we must set up statp so that the waitq 14199 * count will get decremented correctly below. 14200 * Also we must clear un->un_retry_statp to 14201 * ensure that we do not act on a stale value 14202 * in this field. 14203 */ 14204 if ((un->un_retry_statp == kstat_waitq_enter) || 14205 (un->un_retry_statp == 14206 kstat_runq_back_to_waitq)) { 14207 statp = kstat_waitq_to_runq; 14208 } 14209 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14210 saved_statp = un->un_retry_statp; 14211 #endif 14212 un->un_retry_statp = NULL; 14213 14214 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14215 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14216 "un_throttle:%d un_ncmds_in_transport:%d\n", 14217 un, un->un_retry_bp, un->un_throttle, 14218 un->un_ncmds_in_transport); 14219 } else { 14220 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14221 "processing priority bp:0x%p\n", bp); 14222 } 14223 14224 } else if ((bp = un->un_waitq_headp) != NULL) { 14225 /* 14226 * A command on the waitq is ready to go, but do not 14227 * send it if: 14228 * 14229 * (1) the throttle limit has been reached, or 14230 * (2) a retry is pending, or 14231 * (3) a START_STOP_UNIT callback pending, or 14232 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14233 * command is pending. 14234 * 14235 * For all of these conditions, IO processing will 14236 * restart after the condition is cleared. 14237 */ 14238 if (un->un_ncmds_in_transport >= un->un_throttle) { 14239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14240 "sd_start_cmds: exiting, " 14241 "throttle limit reached!\n"); 14242 goto exit; 14243 } 14244 if (un->un_retry_bp != NULL) { 14245 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14246 "sd_start_cmds: exiting, retry pending!\n"); 14247 goto exit; 14248 } 14249 if (un->un_startstop_timeid != NULL) { 14250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14251 "sd_start_cmds: exiting, " 14252 "START_STOP pending!\n"); 14253 goto exit; 14254 } 14255 if (un->un_direct_priority_timeid != NULL) { 14256 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14257 "sd_start_cmds: exiting, " 14258 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14259 goto exit; 14260 } 14261 14262 /* Dequeue the command */ 14263 un->un_waitq_headp = bp->av_forw; 14264 if (un->un_waitq_headp == NULL) { 14265 un->un_waitq_tailp = NULL; 14266 } 14267 bp->av_forw = NULL; 14268 statp = kstat_waitq_to_runq; 14269 SD_TRACE(SD_LOG_IO_CORE, un, 14270 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14271 14272 } else { 14273 /* No work to do so bail out now */ 14274 SD_TRACE(SD_LOG_IO_CORE, un, 14275 "sd_start_cmds: no more work, exiting!\n"); 14276 goto exit; 14277 } 14278 14279 /* 14280 * Reset the state to normal. This is the mechanism by which 14281 * the state transitions from either SD_STATE_RWAIT or 14282 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14283 * If state is SD_STATE_PM_CHANGING then this command is 14284 * part of the device power control and the state must 14285 * not be put back to normal. Doing so would would 14286 * allow new commands to proceed when they shouldn't, 14287 * the device may be going off. 14288 */ 14289 if ((un->un_state != SD_STATE_SUSPENDED) && 14290 (un->un_state != SD_STATE_PM_CHANGING)) { 14291 New_state(un, SD_STATE_NORMAL); 14292 } 14293 14294 xp = SD_GET_XBUF(bp); 14295 ASSERT(xp != NULL); 14296 14297 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14298 /* 14299 * Allocate the scsi_pkt if we need one, or attach DMA 14300 * resources if we have a scsi_pkt that needs them. The 14301 * latter should only occur for commands that are being 14302 * retried. 14303 */ 14304 if ((xp->xb_pktp == NULL) || 14305 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14306 #else 14307 if (xp->xb_pktp == NULL) { 14308 #endif 14309 /* 14310 * There is no scsi_pkt allocated for this buf. Call 14311 * the initpkt function to allocate & init one. 14312 * 14313 * The scsi_init_pkt runout callback functionality is 14314 * implemented as follows: 14315 * 14316 * 1) The initpkt function always calls 14317 * scsi_init_pkt(9F) with sdrunout specified as the 14318 * callback routine. 14319 * 2) A successful packet allocation is initialized and 14320 * the I/O is transported. 14321 * 3) The I/O associated with an allocation resource 14322 * failure is left on its queue to be retried via 14323 * runout or the next I/O. 14324 * 4) The I/O associated with a DMA error is removed 14325 * from the queue and failed with EIO. Processing of 14326 * the transport queues is also halted to be 14327 * restarted via runout or the next I/O. 14328 * 5) The I/O associated with a CDB size or packet 14329 * size error is removed from the queue and failed 14330 * with EIO. Processing of the transport queues is 14331 * continued. 14332 * 14333 * Note: there is no interface for canceling a runout 14334 * callback. To prevent the driver from detaching or 14335 * suspending while a runout is pending the driver 14336 * state is set to SD_STATE_RWAIT 14337 * 14338 * Note: using the scsi_init_pkt callback facility can 14339 * result in an I/O request persisting at the head of 14340 * the list which cannot be satisfied even after 14341 * multiple retries. In the future the driver may 14342 * implement some kind of maximum runout count before 14343 * failing an I/O. 14344 * 14345 * Note: the use of funcp below may seem superfluous, 14346 * but it helps warlock figure out the correct 14347 * initpkt function calls (see [s]sd.wlcmd). 14348 */ 14349 struct scsi_pkt *pktp; 14350 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14351 14352 ASSERT(bp != un->un_rqs_bp); 14353 14354 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14355 switch ((*funcp)(bp, &pktp)) { 14356 case SD_PKT_ALLOC_SUCCESS: 14357 xp->xb_pktp = pktp; 14358 SD_TRACE(SD_LOG_IO_CORE, un, 14359 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14360 pktp); 14361 goto got_pkt; 14362 14363 case SD_PKT_ALLOC_FAILURE: 14364 /* 14365 * Temporary (hopefully) resource depletion. 14366 * Since retries and RQS commands always have a 14367 * scsi_pkt allocated, these cases should never 14368 * get here. So the only cases this needs to 14369 * handle is a bp from the waitq (which we put 14370 * back onto the waitq for sdrunout), or a bp 14371 * sent as an immed_bp (which we just fail). 14372 */ 14373 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14374 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14375 14376 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14377 14378 if (bp == immed_bp) { 14379 /* 14380 * If SD_XB_DMA_FREED is clear, then 14381 * this is a failure to allocate a 14382 * scsi_pkt, and we must fail the 14383 * command. 14384 */ 14385 if ((xp->xb_pkt_flags & 14386 SD_XB_DMA_FREED) == 0) { 14387 break; 14388 } 14389 14390 /* 14391 * If this immediate command is NOT our 14392 * un_retry_bp, then we must fail it. 14393 */ 14394 if (bp != un->un_retry_bp) { 14395 break; 14396 } 14397 14398 /* 14399 * We get here if this cmd is our 14400 * un_retry_bp that was DMAFREED, but 14401 * scsi_init_pkt() failed to reallocate 14402 * DMA resources when we attempted to 14403 * retry it. This can happen when an 14404 * mpxio failover is in progress, but 14405 * we don't want to just fail the 14406 * command in this case. 14407 * 14408 * Use timeout(9F) to restart it after 14409 * a 100ms delay. We don't want to 14410 * let sdrunout() restart it, because 14411 * sdrunout() is just supposed to start 14412 * commands that are sitting on the 14413 * wait queue. The un_retry_bp stays 14414 * set until the command completes, but 14415 * sdrunout can be called many times 14416 * before that happens. Since sdrunout 14417 * cannot tell if the un_retry_bp is 14418 * already in the transport, it could 14419 * end up calling scsi_transport() for 14420 * the un_retry_bp multiple times. 14421 * 14422 * Also: don't schedule the callback 14423 * if some other callback is already 14424 * pending. 14425 */ 14426 if (un->un_retry_statp == NULL) { 14427 /* 14428 * restore the kstat pointer to 14429 * keep kstat counts coherent 14430 * when we do retry the command. 14431 */ 14432 un->un_retry_statp = 14433 saved_statp; 14434 } 14435 14436 if ((un->un_startstop_timeid == NULL) && 14437 (un->un_retry_timeid == NULL) && 14438 (un->un_direct_priority_timeid == 14439 NULL)) { 14440 14441 un->un_retry_timeid = 14442 timeout( 14443 sd_start_retry_command, 14444 un, SD_RESTART_TIMEOUT); 14445 } 14446 goto exit; 14447 } 14448 14449 #else 14450 if (bp == immed_bp) { 14451 break; /* Just fail the command */ 14452 } 14453 #endif 14454 14455 /* Add the buf back to the head of the waitq */ 14456 bp->av_forw = un->un_waitq_headp; 14457 un->un_waitq_headp = bp; 14458 if (un->un_waitq_tailp == NULL) { 14459 un->un_waitq_tailp = bp; 14460 } 14461 goto exit; 14462 14463 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14464 /* 14465 * HBA DMA resource failure. Fail the command 14466 * and continue processing of the queues. 14467 */ 14468 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14469 "sd_start_cmds: " 14470 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14471 break; 14472 14473 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14474 /* 14475 * Note:x86: Partial DMA mapping not supported 14476 * for USCSI commands, and all the needed DMA 14477 * resources were not allocated. 14478 */ 14479 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14480 "sd_start_cmds: " 14481 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14482 break; 14483 14484 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14485 /* 14486 * Note:x86: Request cannot fit into CDB based 14487 * on lba and len. 14488 */ 14489 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14490 "sd_start_cmds: " 14491 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14492 break; 14493 14494 default: 14495 /* Should NEVER get here! */ 14496 panic("scsi_initpkt error"); 14497 /*NOTREACHED*/ 14498 } 14499 14500 /* 14501 * Fatal error in allocating a scsi_pkt for this buf. 14502 * Update kstats & return the buf with an error code. 14503 * We must use sd_return_failed_command_no_restart() to 14504 * avoid a recursive call back into sd_start_cmds(). 14505 * However this also means that we must keep processing 14506 * the waitq here in order to avoid stalling. 14507 */ 14508 if (statp == kstat_waitq_to_runq) { 14509 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14510 } 14511 sd_return_failed_command_no_restart(un, bp, EIO); 14512 if (bp == immed_bp) { 14513 /* immed_bp is gone by now, so clear this */ 14514 immed_bp = NULL; 14515 } 14516 continue; 14517 } 14518 got_pkt: 14519 if (bp == immed_bp) { 14520 /* goto the head of the class.... */ 14521 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14522 } 14523 14524 un->un_ncmds_in_transport++; 14525 SD_UPDATE_KSTATS(un, statp, bp); 14526 14527 /* 14528 * Call scsi_transport() to send the command to the target. 14529 * According to SCSA architecture, we must drop the mutex here 14530 * before calling scsi_transport() in order to avoid deadlock. 14531 * Note that the scsi_pkt's completion routine can be executed 14532 * (from interrupt context) even before the call to 14533 * scsi_transport() returns. 14534 */ 14535 SD_TRACE(SD_LOG_IO_CORE, un, 14536 "sd_start_cmds: calling scsi_transport()\n"); 14537 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14538 14539 mutex_exit(SD_MUTEX(un)); 14540 rval = scsi_transport(xp->xb_pktp); 14541 mutex_enter(SD_MUTEX(un)); 14542 14543 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14544 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14545 14546 switch (rval) { 14547 case TRAN_ACCEPT: 14548 /* Clear this with every pkt accepted by the HBA */ 14549 un->un_tran_fatal_count = 0; 14550 break; /* Success; try the next cmd (if any) */ 14551 14552 case TRAN_BUSY: 14553 un->un_ncmds_in_transport--; 14554 ASSERT(un->un_ncmds_in_transport >= 0); 14555 14556 /* 14557 * Don't retry request sense, the sense data 14558 * is lost when another request is sent. 14559 * Free up the rqs buf and retry 14560 * the original failed cmd. Update kstat. 14561 */ 14562 if (bp == un->un_rqs_bp) { 14563 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14564 bp = sd_mark_rqs_idle(un, xp); 14565 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14566 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14567 kstat_waitq_enter); 14568 goto exit; 14569 } 14570 14571 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14572 /* 14573 * Free the DMA resources for the scsi_pkt. This will 14574 * allow mpxio to select another path the next time 14575 * we call scsi_transport() with this scsi_pkt. 14576 * See sdintr() for the rationalization behind this. 14577 */ 14578 if ((un->un_f_is_fibre == TRUE) && 14579 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14580 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14581 scsi_dmafree(xp->xb_pktp); 14582 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14583 } 14584 #endif 14585 14586 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14587 /* 14588 * Commands that are SD_PATH_DIRECT_PRIORITY 14589 * are for error recovery situations. These do 14590 * not use the normal command waitq, so if they 14591 * get a TRAN_BUSY we cannot put them back onto 14592 * the waitq for later retry. One possible 14593 * problem is that there could already be some 14594 * other command on un_retry_bp that is waiting 14595 * for this one to complete, so we would be 14596 * deadlocked if we put this command back onto 14597 * the waitq for later retry (since un_retry_bp 14598 * must complete before the driver gets back to 14599 * commands on the waitq). 14600 * 14601 * To avoid deadlock we must schedule a callback 14602 * that will restart this command after a set 14603 * interval. This should keep retrying for as 14604 * long as the underlying transport keeps 14605 * returning TRAN_BUSY (just like for other 14606 * commands). Use the same timeout interval as 14607 * for the ordinary TRAN_BUSY retry. 14608 */ 14609 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14610 "sd_start_cmds: scsi_transport() returned " 14611 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14612 14613 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14614 un->un_direct_priority_timeid = 14615 timeout(sd_start_direct_priority_command, 14616 bp, SD_BSY_TIMEOUT / 500); 14617 14618 goto exit; 14619 } 14620 14621 /* 14622 * For TRAN_BUSY, we want to reduce the throttle value, 14623 * unless we are retrying a command. 14624 */ 14625 if (bp != un->un_retry_bp) { 14626 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14627 } 14628 14629 /* 14630 * Set up the bp to be tried again 10 ms later. 14631 * Note:x86: Is there a timeout value in the sd_lun 14632 * for this condition? 14633 */ 14634 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14635 kstat_runq_back_to_waitq); 14636 goto exit; 14637 14638 case TRAN_FATAL_ERROR: 14639 un->un_tran_fatal_count++; 14640 /* FALLTHRU */ 14641 14642 case TRAN_BADPKT: 14643 default: 14644 un->un_ncmds_in_transport--; 14645 ASSERT(un->un_ncmds_in_transport >= 0); 14646 14647 /* 14648 * If this is our REQUEST SENSE command with a 14649 * transport error, we must get back the pointers 14650 * to the original buf, and mark the REQUEST 14651 * SENSE command as "available". 14652 */ 14653 if (bp == un->un_rqs_bp) { 14654 bp = sd_mark_rqs_idle(un, xp); 14655 xp = SD_GET_XBUF(bp); 14656 } else { 14657 /* 14658 * Legacy behavior: do not update transport 14659 * error count for request sense commands. 14660 */ 14661 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14662 } 14663 14664 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14665 sd_print_transport_rejected_message(un, xp, rval); 14666 14667 /* 14668 * We must use sd_return_failed_command_no_restart() to 14669 * avoid a recursive call back into sd_start_cmds(). 14670 * However this also means that we must keep processing 14671 * the waitq here in order to avoid stalling. 14672 */ 14673 sd_return_failed_command_no_restart(un, bp, EIO); 14674 14675 /* 14676 * Notify any threads waiting in sd_ddi_suspend() that 14677 * a command completion has occurred. 14678 */ 14679 if (un->un_state == SD_STATE_SUSPENDED) { 14680 cv_broadcast(&un->un_disk_busy_cv); 14681 } 14682 14683 if (bp == immed_bp) { 14684 /* immed_bp is gone by now, so clear this */ 14685 immed_bp = NULL; 14686 } 14687 break; 14688 } 14689 14690 } while (immed_bp == NULL); 14691 14692 exit: 14693 ASSERT(mutex_owned(SD_MUTEX(un))); 14694 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14695 } 14696 14697 14698 /* 14699 * Function: sd_return_command 14700 * 14701 * Description: Returns a command to its originator (with or without an 14702 * error). Also starts commands waiting to be transported 14703 * to the target. 14704 * 14705 * Context: May be called from interrupt, kernel, or timeout context 14706 */ 14707 14708 static void 14709 sd_return_command(struct sd_lun *un, struct buf *bp) 14710 { 14711 struct sd_xbuf *xp; 14712 #if defined(__i386) || defined(__amd64) 14713 struct scsi_pkt *pktp; 14714 #endif 14715 14716 ASSERT(bp != NULL); 14717 ASSERT(un != NULL); 14718 ASSERT(mutex_owned(SD_MUTEX(un))); 14719 ASSERT(bp != un->un_rqs_bp); 14720 xp = SD_GET_XBUF(bp); 14721 ASSERT(xp != NULL); 14722 14723 #if defined(__i386) || defined(__amd64) 14724 pktp = SD_GET_PKTP(bp); 14725 #endif 14726 14727 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14728 14729 #if defined(__i386) || defined(__amd64) 14730 /* 14731 * Note:x86: check for the "sdrestart failed" case. 14732 */ 14733 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14734 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14735 (xp->xb_pktp->pkt_resid == 0)) { 14736 14737 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14738 /* 14739 * Successfully set up next portion of cmd 14740 * transfer, try sending it 14741 */ 14742 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14743 NULL, NULL, 0, (clock_t)0, NULL); 14744 sd_start_cmds(un, NULL); 14745 return; /* Note:x86: need a return here? */ 14746 } 14747 } 14748 #endif 14749 14750 /* 14751 * If this is the failfast bp, clear it from un_failfast_bp. This 14752 * can happen if upon being re-tried the failfast bp either 14753 * succeeded or encountered another error (possibly even a different 14754 * error than the one that precipitated the failfast state, but in 14755 * that case it would have had to exhaust retries as well). Regardless, 14756 * this should not occur whenever the instance is in the active 14757 * failfast state. 14758 */ 14759 if (bp == un->un_failfast_bp) { 14760 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14761 un->un_failfast_bp = NULL; 14762 } 14763 14764 /* 14765 * Clear the failfast state upon successful completion of ANY cmd. 14766 */ 14767 if (bp->b_error == 0) { 14768 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14769 } 14770 14771 /* 14772 * This is used if the command was retried one or more times. Show that 14773 * we are done with it, and allow processing of the waitq to resume. 14774 */ 14775 if (bp == un->un_retry_bp) { 14776 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14777 "sd_return_command: un:0x%p: " 14778 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14779 un->un_retry_bp = NULL; 14780 un->un_retry_statp = NULL; 14781 } 14782 14783 SD_UPDATE_RDWR_STATS(un, bp); 14784 SD_UPDATE_PARTITION_STATS(un, bp); 14785 14786 switch (un->un_state) { 14787 case SD_STATE_SUSPENDED: 14788 /* 14789 * Notify any threads waiting in sd_ddi_suspend() that 14790 * a command completion has occurred. 14791 */ 14792 cv_broadcast(&un->un_disk_busy_cv); 14793 break; 14794 default: 14795 sd_start_cmds(un, NULL); 14796 break; 14797 } 14798 14799 /* Return this command up the iodone chain to its originator. */ 14800 mutex_exit(SD_MUTEX(un)); 14801 14802 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14803 xp->xb_pktp = NULL; 14804 14805 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14806 14807 ASSERT(!mutex_owned(SD_MUTEX(un))); 14808 mutex_enter(SD_MUTEX(un)); 14809 14810 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14811 } 14812 14813 14814 /* 14815 * Function: sd_return_failed_command 14816 * 14817 * Description: Command completion when an error occurred. 14818 * 14819 * Context: May be called from interrupt context 14820 */ 14821 14822 static void 14823 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14824 { 14825 ASSERT(bp != NULL); 14826 ASSERT(un != NULL); 14827 ASSERT(mutex_owned(SD_MUTEX(un))); 14828 14829 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14830 "sd_return_failed_command: entry\n"); 14831 14832 /* 14833 * b_resid could already be nonzero due to a partial data 14834 * transfer, so do not change it here. 14835 */ 14836 SD_BIOERROR(bp, errcode); 14837 14838 sd_return_command(un, bp); 14839 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14840 "sd_return_failed_command: exit\n"); 14841 } 14842 14843 14844 /* 14845 * Function: sd_return_failed_command_no_restart 14846 * 14847 * Description: Same as sd_return_failed_command, but ensures that no 14848 * call back into sd_start_cmds will be issued. 14849 * 14850 * Context: May be called from interrupt context 14851 */ 14852 14853 static void 14854 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14855 int errcode) 14856 { 14857 struct sd_xbuf *xp; 14858 14859 ASSERT(bp != NULL); 14860 ASSERT(un != NULL); 14861 ASSERT(mutex_owned(SD_MUTEX(un))); 14862 xp = SD_GET_XBUF(bp); 14863 ASSERT(xp != NULL); 14864 ASSERT(errcode != 0); 14865 14866 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14867 "sd_return_failed_command_no_restart: entry\n"); 14868 14869 /* 14870 * b_resid could already be nonzero due to a partial data 14871 * transfer, so do not change it here. 14872 */ 14873 SD_BIOERROR(bp, errcode); 14874 14875 /* 14876 * If this is the failfast bp, clear it. This can happen if the 14877 * failfast bp encounterd a fatal error when we attempted to 14878 * re-try it (such as a scsi_transport(9F) failure). However 14879 * we should NOT be in an active failfast state if the failfast 14880 * bp is not NULL. 14881 */ 14882 if (bp == un->un_failfast_bp) { 14883 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14884 un->un_failfast_bp = NULL; 14885 } 14886 14887 if (bp == un->un_retry_bp) { 14888 /* 14889 * This command was retried one or more times. Show that we are 14890 * done with it, and allow processing of the waitq to resume. 14891 */ 14892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14893 "sd_return_failed_command_no_restart: " 14894 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14895 un->un_retry_bp = NULL; 14896 un->un_retry_statp = NULL; 14897 } 14898 14899 SD_UPDATE_RDWR_STATS(un, bp); 14900 SD_UPDATE_PARTITION_STATS(un, bp); 14901 14902 mutex_exit(SD_MUTEX(un)); 14903 14904 if (xp->xb_pktp != NULL) { 14905 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14906 xp->xb_pktp = NULL; 14907 } 14908 14909 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14910 14911 mutex_enter(SD_MUTEX(un)); 14912 14913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14914 "sd_return_failed_command_no_restart: exit\n"); 14915 } 14916 14917 14918 /* 14919 * Function: sd_retry_command 14920 * 14921 * Description: queue up a command for retry, or (optionally) fail it 14922 * if retry counts are exhausted. 14923 * 14924 * Arguments: un - Pointer to the sd_lun struct for the target. 14925 * 14926 * bp - Pointer to the buf for the command to be retried. 14927 * 14928 * retry_check_flag - Flag to see which (if any) of the retry 14929 * counts should be decremented/checked. If the indicated 14930 * retry count is exhausted, then the command will not be 14931 * retried; it will be failed instead. This should use a 14932 * value equal to one of the following: 14933 * 14934 * SD_RETRIES_NOCHECK 14935 * SD_RESD_RETRIES_STANDARD 14936 * SD_RETRIES_VICTIM 14937 * 14938 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14939 * if the check should be made to see of FLAG_ISOLATE is set 14940 * in the pkt. If FLAG_ISOLATE is set, then the command is 14941 * not retried, it is simply failed. 14942 * 14943 * user_funcp - Ptr to function to call before dispatching the 14944 * command. May be NULL if no action needs to be performed. 14945 * (Primarily intended for printing messages.) 14946 * 14947 * user_arg - Optional argument to be passed along to 14948 * the user_funcp call. 14949 * 14950 * failure_code - errno return code to set in the bp if the 14951 * command is going to be failed. 14952 * 14953 * retry_delay - Retry delay interval in (clock_t) units. May 14954 * be zero which indicates that the retry should be retried 14955 * immediately (ie, without an intervening delay). 14956 * 14957 * statp - Ptr to kstat function to be updated if the command 14958 * is queued for a delayed retry. May be NULL if no kstat 14959 * update is desired. 14960 * 14961 * Context: May be called from interupt context. 14962 */ 14963 14964 static void 14965 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14966 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14967 code), void *user_arg, int failure_code, clock_t retry_delay, 14968 void (*statp)(kstat_io_t *)) 14969 { 14970 struct sd_xbuf *xp; 14971 struct scsi_pkt *pktp; 14972 14973 ASSERT(un != NULL); 14974 ASSERT(mutex_owned(SD_MUTEX(un))); 14975 ASSERT(bp != NULL); 14976 xp = SD_GET_XBUF(bp); 14977 ASSERT(xp != NULL); 14978 pktp = SD_GET_PKTP(bp); 14979 ASSERT(pktp != NULL); 14980 14981 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14982 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14983 14984 /* 14985 * If we are syncing or dumping, fail the command to avoid 14986 * recursively calling back into scsi_transport(). 14987 */ 14988 if (ddi_in_panic()) { 14989 goto fail_command_no_log; 14990 } 14991 14992 /* 14993 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14994 * log an error and fail the command. 14995 */ 14996 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14997 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14998 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14999 sd_dump_memory(un, SD_LOG_IO, "CDB", 15000 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15001 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15002 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15003 goto fail_command; 15004 } 15005 15006 /* 15007 * If we are suspended, then put the command onto head of the 15008 * wait queue since we don't want to start more commands. 15009 */ 15010 switch (un->un_state) { 15011 case SD_STATE_SUSPENDED: 15012 case SD_STATE_DUMPING: 15013 bp->av_forw = un->un_waitq_headp; 15014 un->un_waitq_headp = bp; 15015 if (un->un_waitq_tailp == NULL) { 15016 un->un_waitq_tailp = bp; 15017 } 15018 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15019 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15020 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15021 return; 15022 default: 15023 break; 15024 } 15025 15026 /* 15027 * If the caller wants us to check FLAG_ISOLATE, then see if that 15028 * is set; if it is then we do not want to retry the command. 15029 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15030 */ 15031 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15032 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15033 goto fail_command; 15034 } 15035 } 15036 15037 15038 /* 15039 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15040 * command timeout or a selection timeout has occurred. This means 15041 * that we were unable to establish an kind of communication with 15042 * the target, and subsequent retries and/or commands are likely 15043 * to encounter similar results and take a long time to complete. 15044 * 15045 * If this is a failfast error condition, we need to update the 15046 * failfast state, even if this bp does not have B_FAILFAST set. 15047 */ 15048 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15049 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15050 ASSERT(un->un_failfast_bp == NULL); 15051 /* 15052 * If we are already in the active failfast state, and 15053 * another failfast error condition has been detected, 15054 * then fail this command if it has B_FAILFAST set. 15055 * If B_FAILFAST is clear, then maintain the legacy 15056 * behavior of retrying heroically, even tho this will 15057 * take a lot more time to fail the command. 15058 */ 15059 if (bp->b_flags & B_FAILFAST) { 15060 goto fail_command; 15061 } 15062 } else { 15063 /* 15064 * We're not in the active failfast state, but we 15065 * have a failfast error condition, so we must begin 15066 * transition to the next state. We do this regardless 15067 * of whether or not this bp has B_FAILFAST set. 15068 */ 15069 if (un->un_failfast_bp == NULL) { 15070 /* 15071 * This is the first bp to meet a failfast 15072 * condition so save it on un_failfast_bp & 15073 * do normal retry processing. Do not enter 15074 * active failfast state yet. This marks 15075 * entry into the "failfast pending" state. 15076 */ 15077 un->un_failfast_bp = bp; 15078 15079 } else if (un->un_failfast_bp == bp) { 15080 /* 15081 * This is the second time *this* bp has 15082 * encountered a failfast error condition, 15083 * so enter active failfast state & flush 15084 * queues as appropriate. 15085 */ 15086 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15087 un->un_failfast_bp = NULL; 15088 sd_failfast_flushq(un); 15089 15090 /* 15091 * Fail this bp now if B_FAILFAST set; 15092 * otherwise continue with retries. (It would 15093 * be pretty ironic if this bp succeeded on a 15094 * subsequent retry after we just flushed all 15095 * the queues). 15096 */ 15097 if (bp->b_flags & B_FAILFAST) { 15098 goto fail_command; 15099 } 15100 15101 #if !defined(lint) && !defined(__lint) 15102 } else { 15103 /* 15104 * If neither of the preceeding conditionals 15105 * was true, it means that there is some 15106 * *other* bp that has met an inital failfast 15107 * condition and is currently either being 15108 * retried or is waiting to be retried. In 15109 * that case we should perform normal retry 15110 * processing on *this* bp, since there is a 15111 * chance that the current failfast condition 15112 * is transient and recoverable. If that does 15113 * not turn out to be the case, then retries 15114 * will be cleared when the wait queue is 15115 * flushed anyway. 15116 */ 15117 #endif 15118 } 15119 } 15120 } else { 15121 /* 15122 * SD_RETRIES_FAILFAST is clear, which indicates that we 15123 * likely were able to at least establish some level of 15124 * communication with the target and subsequent commands 15125 * and/or retries are likely to get through to the target, 15126 * In this case we want to be aggressive about clearing 15127 * the failfast state. Note that this does not affect 15128 * the "failfast pending" condition. 15129 */ 15130 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15131 } 15132 15133 15134 /* 15135 * Check the specified retry count to see if we can still do 15136 * any retries with this pkt before we should fail it. 15137 */ 15138 switch (retry_check_flag & SD_RETRIES_MASK) { 15139 case SD_RETRIES_VICTIM: 15140 /* 15141 * Check the victim retry count. If exhausted, then fall 15142 * thru & check against the standard retry count. 15143 */ 15144 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15145 /* Increment count & proceed with the retry */ 15146 xp->xb_victim_retry_count++; 15147 break; 15148 } 15149 /* Victim retries exhausted, fall back to std. retries... */ 15150 /* FALLTHRU */ 15151 15152 case SD_RETRIES_STANDARD: 15153 if (xp->xb_retry_count >= un->un_retry_count) { 15154 /* Retries exhausted, fail the command */ 15155 SD_TRACE(SD_LOG_IO_CORE, un, 15156 "sd_retry_command: retries exhausted!\n"); 15157 /* 15158 * update b_resid for failed SCMD_READ & SCMD_WRITE 15159 * commands with nonzero pkt_resid. 15160 */ 15161 if ((pktp->pkt_reason == CMD_CMPLT) && 15162 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15163 (pktp->pkt_resid != 0)) { 15164 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15165 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15166 SD_UPDATE_B_RESID(bp, pktp); 15167 } 15168 } 15169 goto fail_command; 15170 } 15171 xp->xb_retry_count++; 15172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15173 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15174 break; 15175 15176 case SD_RETRIES_UA: 15177 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15178 /* Retries exhausted, fail the command */ 15179 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15180 "Unit Attention retries exhausted. " 15181 "Check the target.\n"); 15182 goto fail_command; 15183 } 15184 xp->xb_ua_retry_count++; 15185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15186 "sd_retry_command: retry count:%d\n", 15187 xp->xb_ua_retry_count); 15188 break; 15189 15190 case SD_RETRIES_BUSY: 15191 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15192 /* Retries exhausted, fail the command */ 15193 SD_TRACE(SD_LOG_IO_CORE, un, 15194 "sd_retry_command: retries exhausted!\n"); 15195 goto fail_command; 15196 } 15197 xp->xb_retry_count++; 15198 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15199 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15200 break; 15201 15202 case SD_RETRIES_NOCHECK: 15203 default: 15204 /* No retry count to check. Just proceed with the retry */ 15205 break; 15206 } 15207 15208 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15209 15210 /* 15211 * If we were given a zero timeout, we must attempt to retry the 15212 * command immediately (ie, without a delay). 15213 */ 15214 if (retry_delay == 0) { 15215 /* 15216 * Check some limiting conditions to see if we can actually 15217 * do the immediate retry. If we cannot, then we must 15218 * fall back to queueing up a delayed retry. 15219 */ 15220 if (un->un_ncmds_in_transport >= un->un_throttle) { 15221 /* 15222 * We are at the throttle limit for the target, 15223 * fall back to delayed retry. 15224 */ 15225 retry_delay = SD_BSY_TIMEOUT; 15226 statp = kstat_waitq_enter; 15227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15228 "sd_retry_command: immed. retry hit " 15229 "throttle!\n"); 15230 } else { 15231 /* 15232 * We're clear to proceed with the immediate retry. 15233 * First call the user-provided function (if any) 15234 */ 15235 if (user_funcp != NULL) { 15236 (*user_funcp)(un, bp, user_arg, 15237 SD_IMMEDIATE_RETRY_ISSUED); 15238 } 15239 15240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15241 "sd_retry_command: issuing immediate retry\n"); 15242 15243 /* 15244 * Call sd_start_cmds() to transport the command to 15245 * the target. 15246 */ 15247 sd_start_cmds(un, bp); 15248 15249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15250 "sd_retry_command exit\n"); 15251 return; 15252 } 15253 } 15254 15255 /* 15256 * Set up to retry the command after a delay. 15257 * First call the user-provided function (if any) 15258 */ 15259 if (user_funcp != NULL) { 15260 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15261 } 15262 15263 sd_set_retry_bp(un, bp, retry_delay, statp); 15264 15265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15266 return; 15267 15268 fail_command: 15269 15270 if (user_funcp != NULL) { 15271 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15272 } 15273 15274 fail_command_no_log: 15275 15276 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15277 "sd_retry_command: returning failed command\n"); 15278 15279 sd_return_failed_command(un, bp, failure_code); 15280 15281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15282 } 15283 15284 15285 /* 15286 * Function: sd_set_retry_bp 15287 * 15288 * Description: Set up the given bp for retry. 15289 * 15290 * Arguments: un - ptr to associated softstate 15291 * bp - ptr to buf(9S) for the command 15292 * retry_delay - time interval before issuing retry (may be 0) 15293 * statp - optional pointer to kstat function 15294 * 15295 * Context: May be called under interrupt context 15296 */ 15297 15298 static void 15299 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15300 void (*statp)(kstat_io_t *)) 15301 { 15302 ASSERT(un != NULL); 15303 ASSERT(mutex_owned(SD_MUTEX(un))); 15304 ASSERT(bp != NULL); 15305 15306 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15307 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15308 15309 /* 15310 * Indicate that the command is being retried. This will not allow any 15311 * other commands on the wait queue to be transported to the target 15312 * until this command has been completed (success or failure). The 15313 * "retry command" is not transported to the target until the given 15314 * time delay expires, unless the user specified a 0 retry_delay. 15315 * 15316 * Note: the timeout(9F) callback routine is what actually calls 15317 * sd_start_cmds() to transport the command, with the exception of a 15318 * zero retry_delay. The only current implementor of a zero retry delay 15319 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15320 */ 15321 if (un->un_retry_bp == NULL) { 15322 ASSERT(un->un_retry_statp == NULL); 15323 un->un_retry_bp = bp; 15324 15325 /* 15326 * If the user has not specified a delay the command should 15327 * be queued and no timeout should be scheduled. 15328 */ 15329 if (retry_delay == 0) { 15330 /* 15331 * Save the kstat pointer that will be used in the 15332 * call to SD_UPDATE_KSTATS() below, so that 15333 * sd_start_cmds() can correctly decrement the waitq 15334 * count when it is time to transport this command. 15335 */ 15336 un->un_retry_statp = statp; 15337 goto done; 15338 } 15339 } 15340 15341 if (un->un_retry_bp == bp) { 15342 /* 15343 * Save the kstat pointer that will be used in the call to 15344 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15345 * correctly decrement the waitq count when it is time to 15346 * transport this command. 15347 */ 15348 un->un_retry_statp = statp; 15349 15350 /* 15351 * Schedule a timeout if: 15352 * 1) The user has specified a delay. 15353 * 2) There is not a START_STOP_UNIT callback pending. 15354 * 15355 * If no delay has been specified, then it is up to the caller 15356 * to ensure that IO processing continues without stalling. 15357 * Effectively, this means that the caller will issue the 15358 * required call to sd_start_cmds(). The START_STOP_UNIT 15359 * callback does this after the START STOP UNIT command has 15360 * completed. In either of these cases we should not schedule 15361 * a timeout callback here. Also don't schedule the timeout if 15362 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15363 */ 15364 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15365 (un->un_direct_priority_timeid == NULL)) { 15366 un->un_retry_timeid = 15367 timeout(sd_start_retry_command, un, retry_delay); 15368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15369 "sd_set_retry_bp: setting timeout: un: 0x%p" 15370 " bp:0x%p un_retry_timeid:0x%p\n", 15371 un, bp, un->un_retry_timeid); 15372 } 15373 } else { 15374 /* 15375 * We only get in here if there is already another command 15376 * waiting to be retried. In this case, we just put the 15377 * given command onto the wait queue, so it can be transported 15378 * after the current retry command has completed. 15379 * 15380 * Also we have to make sure that if the command at the head 15381 * of the wait queue is the un_failfast_bp, that we do not 15382 * put ahead of it any other commands that are to be retried. 15383 */ 15384 if ((un->un_failfast_bp != NULL) && 15385 (un->un_failfast_bp == un->un_waitq_headp)) { 15386 /* 15387 * Enqueue this command AFTER the first command on 15388 * the wait queue (which is also un_failfast_bp). 15389 */ 15390 bp->av_forw = un->un_waitq_headp->av_forw; 15391 un->un_waitq_headp->av_forw = bp; 15392 if (un->un_waitq_headp == un->un_waitq_tailp) { 15393 un->un_waitq_tailp = bp; 15394 } 15395 } else { 15396 /* Enqueue this command at the head of the waitq. */ 15397 bp->av_forw = un->un_waitq_headp; 15398 un->un_waitq_headp = bp; 15399 if (un->un_waitq_tailp == NULL) { 15400 un->un_waitq_tailp = bp; 15401 } 15402 } 15403 15404 if (statp == NULL) { 15405 statp = kstat_waitq_enter; 15406 } 15407 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15408 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15409 } 15410 15411 done: 15412 if (statp != NULL) { 15413 SD_UPDATE_KSTATS(un, statp, bp); 15414 } 15415 15416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15417 "sd_set_retry_bp: exit un:0x%p\n", un); 15418 } 15419 15420 15421 /* 15422 * Function: sd_start_retry_command 15423 * 15424 * Description: Start the command that has been waiting on the target's 15425 * retry queue. Called from timeout(9F) context after the 15426 * retry delay interval has expired. 15427 * 15428 * Arguments: arg - pointer to associated softstate for the device. 15429 * 15430 * Context: timeout(9F) thread context. May not sleep. 15431 */ 15432 15433 static void 15434 sd_start_retry_command(void *arg) 15435 { 15436 struct sd_lun *un = arg; 15437 15438 ASSERT(un != NULL); 15439 ASSERT(!mutex_owned(SD_MUTEX(un))); 15440 15441 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15442 "sd_start_retry_command: entry\n"); 15443 15444 mutex_enter(SD_MUTEX(un)); 15445 15446 un->un_retry_timeid = NULL; 15447 15448 if (un->un_retry_bp != NULL) { 15449 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15450 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15451 un, un->un_retry_bp); 15452 sd_start_cmds(un, un->un_retry_bp); 15453 } 15454 15455 mutex_exit(SD_MUTEX(un)); 15456 15457 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15458 "sd_start_retry_command: exit\n"); 15459 } 15460 15461 15462 /* 15463 * Function: sd_start_direct_priority_command 15464 * 15465 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15466 * received TRAN_BUSY when we called scsi_transport() to send it 15467 * to the underlying HBA. This function is called from timeout(9F) 15468 * context after the delay interval has expired. 15469 * 15470 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15471 * 15472 * Context: timeout(9F) thread context. May not sleep. 15473 */ 15474 15475 static void 15476 sd_start_direct_priority_command(void *arg) 15477 { 15478 struct buf *priority_bp = arg; 15479 struct sd_lun *un; 15480 15481 ASSERT(priority_bp != NULL); 15482 un = SD_GET_UN(priority_bp); 15483 ASSERT(un != NULL); 15484 ASSERT(!mutex_owned(SD_MUTEX(un))); 15485 15486 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15487 "sd_start_direct_priority_command: entry\n"); 15488 15489 mutex_enter(SD_MUTEX(un)); 15490 un->un_direct_priority_timeid = NULL; 15491 sd_start_cmds(un, priority_bp); 15492 mutex_exit(SD_MUTEX(un)); 15493 15494 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15495 "sd_start_direct_priority_command: exit\n"); 15496 } 15497 15498 15499 /* 15500 * Function: sd_send_request_sense_command 15501 * 15502 * Description: Sends a REQUEST SENSE command to the target 15503 * 15504 * Context: May be called from interrupt context. 15505 */ 15506 15507 static void 15508 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15509 struct scsi_pkt *pktp) 15510 { 15511 ASSERT(bp != NULL); 15512 ASSERT(un != NULL); 15513 ASSERT(mutex_owned(SD_MUTEX(un))); 15514 15515 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15516 "entry: buf:0x%p\n", bp); 15517 15518 /* 15519 * If we are syncing or dumping, then fail the command to avoid a 15520 * recursive callback into scsi_transport(). Also fail the command 15521 * if we are suspended (legacy behavior). 15522 */ 15523 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15524 (un->un_state == SD_STATE_DUMPING)) { 15525 sd_return_failed_command(un, bp, EIO); 15526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15527 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15528 return; 15529 } 15530 15531 /* 15532 * Retry the failed command and don't issue the request sense if: 15533 * 1) the sense buf is busy 15534 * 2) we have 1 or more outstanding commands on the target 15535 * (the sense data will be cleared or invalidated any way) 15536 * 15537 * Note: There could be an issue with not checking a retry limit here, 15538 * the problem is determining which retry limit to check. 15539 */ 15540 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15541 /* Don't retry if the command is flagged as non-retryable */ 15542 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15544 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15545 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15546 "sd_send_request_sense_command: " 15547 "at full throttle, retrying exit\n"); 15548 } else { 15549 sd_return_failed_command(un, bp, EIO); 15550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15551 "sd_send_request_sense_command: " 15552 "at full throttle, non-retryable exit\n"); 15553 } 15554 return; 15555 } 15556 15557 sd_mark_rqs_busy(un, bp); 15558 sd_start_cmds(un, un->un_rqs_bp); 15559 15560 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15561 "sd_send_request_sense_command: exit\n"); 15562 } 15563 15564 15565 /* 15566 * Function: sd_mark_rqs_busy 15567 * 15568 * Description: Indicate that the request sense bp for this instance is 15569 * in use. 15570 * 15571 * Context: May be called under interrupt context 15572 */ 15573 15574 static void 15575 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15576 { 15577 struct sd_xbuf *sense_xp; 15578 15579 ASSERT(un != NULL); 15580 ASSERT(bp != NULL); 15581 ASSERT(mutex_owned(SD_MUTEX(un))); 15582 ASSERT(un->un_sense_isbusy == 0); 15583 15584 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15585 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15586 15587 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15588 ASSERT(sense_xp != NULL); 15589 15590 SD_INFO(SD_LOG_IO, un, 15591 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15592 15593 ASSERT(sense_xp->xb_pktp != NULL); 15594 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15595 == (FLAG_SENSING | FLAG_HEAD)); 15596 15597 un->un_sense_isbusy = 1; 15598 un->un_rqs_bp->b_resid = 0; 15599 sense_xp->xb_pktp->pkt_resid = 0; 15600 sense_xp->xb_pktp->pkt_reason = 0; 15601 15602 /* So we can get back the bp at interrupt time! */ 15603 sense_xp->xb_sense_bp = bp; 15604 15605 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15606 15607 /* 15608 * Mark this buf as awaiting sense data. (This is already set in 15609 * the pkt_flags for the RQS packet.) 15610 */ 15611 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15612 15613 sense_xp->xb_retry_count = 0; 15614 sense_xp->xb_victim_retry_count = 0; 15615 sense_xp->xb_ua_retry_count = 0; 15616 sense_xp->xb_dma_resid = 0; 15617 15618 /* Clean up the fields for auto-request sense */ 15619 sense_xp->xb_sense_status = 0; 15620 sense_xp->xb_sense_state = 0; 15621 sense_xp->xb_sense_resid = 0; 15622 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15623 15624 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15625 } 15626 15627 15628 /* 15629 * Function: sd_mark_rqs_idle 15630 * 15631 * Description: SD_MUTEX must be held continuously through this routine 15632 * to prevent reuse of the rqs struct before the caller can 15633 * complete it's processing. 15634 * 15635 * Return Code: Pointer to the RQS buf 15636 * 15637 * Context: May be called under interrupt context 15638 */ 15639 15640 static struct buf * 15641 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15642 { 15643 struct buf *bp; 15644 ASSERT(un != NULL); 15645 ASSERT(sense_xp != NULL); 15646 ASSERT(mutex_owned(SD_MUTEX(un))); 15647 ASSERT(un->un_sense_isbusy != 0); 15648 15649 un->un_sense_isbusy = 0; 15650 bp = sense_xp->xb_sense_bp; 15651 sense_xp->xb_sense_bp = NULL; 15652 15653 /* This pkt is no longer interested in getting sense data */ 15654 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15655 15656 return (bp); 15657 } 15658 15659 15660 15661 /* 15662 * Function: sd_alloc_rqs 15663 * 15664 * Description: Set up the unit to receive auto request sense data 15665 * 15666 * Return Code: DDI_SUCCESS or DDI_FAILURE 15667 * 15668 * Context: Called under attach(9E) context 15669 */ 15670 15671 static int 15672 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15673 { 15674 struct sd_xbuf *xp; 15675 15676 ASSERT(un != NULL); 15677 ASSERT(!mutex_owned(SD_MUTEX(un))); 15678 ASSERT(un->un_rqs_bp == NULL); 15679 ASSERT(un->un_rqs_pktp == NULL); 15680 15681 /* 15682 * First allocate the required buf and scsi_pkt structs, then set up 15683 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15684 */ 15685 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15686 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15687 if (un->un_rqs_bp == NULL) { 15688 return (DDI_FAILURE); 15689 } 15690 15691 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15692 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15693 15694 if (un->un_rqs_pktp == NULL) { 15695 sd_free_rqs(un); 15696 return (DDI_FAILURE); 15697 } 15698 15699 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15700 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15701 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15702 15703 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15704 15705 /* Set up the other needed members in the ARQ scsi_pkt. */ 15706 un->un_rqs_pktp->pkt_comp = sdintr; 15707 un->un_rqs_pktp->pkt_time = sd_io_time; 15708 un->un_rqs_pktp->pkt_flags |= 15709 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15710 15711 /* 15712 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15713 * provide any intpkt, destroypkt routines as we take care of 15714 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15715 */ 15716 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15717 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15718 xp->xb_pktp = un->un_rqs_pktp; 15719 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15720 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15721 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15722 15723 /* 15724 * Save the pointer to the request sense private bp so it can 15725 * be retrieved in sdintr. 15726 */ 15727 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15728 ASSERT(un->un_rqs_bp->b_private == xp); 15729 15730 /* 15731 * See if the HBA supports auto-request sense for the specified 15732 * target/lun. If it does, then try to enable it (if not already 15733 * enabled). 15734 * 15735 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15736 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15737 * return success. However, in both of these cases ARQ is always 15738 * enabled and scsi_ifgetcap will always return true. The best approach 15739 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15740 * 15741 * The 3rd case is the HBA (adp) always return enabled on 15742 * scsi_ifgetgetcap even when it's not enable, the best approach 15743 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15744 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15745 */ 15746 15747 if (un->un_f_is_fibre == TRUE) { 15748 un->un_f_arq_enabled = TRUE; 15749 } else { 15750 #if defined(__i386) || defined(__amd64) 15751 /* 15752 * Circumvent the Adaptec bug, remove this code when 15753 * the bug is fixed 15754 */ 15755 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15756 #endif 15757 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15758 case 0: 15759 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15760 "sd_alloc_rqs: HBA supports ARQ\n"); 15761 /* 15762 * ARQ is supported by this HBA but currently is not 15763 * enabled. Attempt to enable it and if successful then 15764 * mark this instance as ARQ enabled. 15765 */ 15766 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15767 == 1) { 15768 /* Successfully enabled ARQ in the HBA */ 15769 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15770 "sd_alloc_rqs: ARQ enabled\n"); 15771 un->un_f_arq_enabled = TRUE; 15772 } else { 15773 /* Could not enable ARQ in the HBA */ 15774 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15775 "sd_alloc_rqs: failed ARQ enable\n"); 15776 un->un_f_arq_enabled = FALSE; 15777 } 15778 break; 15779 case 1: 15780 /* 15781 * ARQ is supported by this HBA and is already enabled. 15782 * Just mark ARQ as enabled for this instance. 15783 */ 15784 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15785 "sd_alloc_rqs: ARQ already enabled\n"); 15786 un->un_f_arq_enabled = TRUE; 15787 break; 15788 default: 15789 /* 15790 * ARQ is not supported by this HBA; disable it for this 15791 * instance. 15792 */ 15793 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15794 "sd_alloc_rqs: HBA does not support ARQ\n"); 15795 un->un_f_arq_enabled = FALSE; 15796 break; 15797 } 15798 } 15799 15800 return (DDI_SUCCESS); 15801 } 15802 15803 15804 /* 15805 * Function: sd_free_rqs 15806 * 15807 * Description: Cleanup for the pre-instance RQS command. 15808 * 15809 * Context: Kernel thread context 15810 */ 15811 15812 static void 15813 sd_free_rqs(struct sd_lun *un) 15814 { 15815 ASSERT(un != NULL); 15816 15817 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15818 15819 /* 15820 * If consistent memory is bound to a scsi_pkt, the pkt 15821 * has to be destroyed *before* freeing the consistent memory. 15822 * Don't change the sequence of this operations. 15823 * scsi_destroy_pkt() might access memory, which isn't allowed, 15824 * after it was freed in scsi_free_consistent_buf(). 15825 */ 15826 if (un->un_rqs_pktp != NULL) { 15827 scsi_destroy_pkt(un->un_rqs_pktp); 15828 un->un_rqs_pktp = NULL; 15829 } 15830 15831 if (un->un_rqs_bp != NULL) { 15832 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15833 scsi_free_consistent_buf(un->un_rqs_bp); 15834 un->un_rqs_bp = NULL; 15835 } 15836 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15837 } 15838 15839 15840 15841 /* 15842 * Function: sd_reduce_throttle 15843 * 15844 * Description: Reduces the maximun # of outstanding commands on a 15845 * target to the current number of outstanding commands. 15846 * Queues a tiemout(9F) callback to restore the limit 15847 * after a specified interval has elapsed. 15848 * Typically used when we get a TRAN_BUSY return code 15849 * back from scsi_transport(). 15850 * 15851 * Arguments: un - ptr to the sd_lun softstate struct 15852 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15853 * 15854 * Context: May be called from interrupt context 15855 */ 15856 15857 static void 15858 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15859 { 15860 ASSERT(un != NULL); 15861 ASSERT(mutex_owned(SD_MUTEX(un))); 15862 ASSERT(un->un_ncmds_in_transport >= 0); 15863 15864 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15865 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15866 un, un->un_throttle, un->un_ncmds_in_transport); 15867 15868 if (un->un_throttle > 1) { 15869 if (un->un_f_use_adaptive_throttle == TRUE) { 15870 switch (throttle_type) { 15871 case SD_THROTTLE_TRAN_BUSY: 15872 if (un->un_busy_throttle == 0) { 15873 un->un_busy_throttle = un->un_throttle; 15874 } 15875 break; 15876 case SD_THROTTLE_QFULL: 15877 un->un_busy_throttle = 0; 15878 break; 15879 default: 15880 ASSERT(FALSE); 15881 } 15882 15883 if (un->un_ncmds_in_transport > 0) { 15884 un->un_throttle = un->un_ncmds_in_transport; 15885 } 15886 15887 } else { 15888 if (un->un_ncmds_in_transport == 0) { 15889 un->un_throttle = 1; 15890 } else { 15891 un->un_throttle = un->un_ncmds_in_transport; 15892 } 15893 } 15894 } 15895 15896 /* Reschedule the timeout if none is currently active */ 15897 if (un->un_reset_throttle_timeid == NULL) { 15898 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15899 un, SD_THROTTLE_RESET_INTERVAL); 15900 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15901 "sd_reduce_throttle: timeout scheduled!\n"); 15902 } 15903 15904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15905 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15906 } 15907 15908 15909 15910 /* 15911 * Function: sd_restore_throttle 15912 * 15913 * Description: Callback function for timeout(9F). Resets the current 15914 * value of un->un_throttle to its default. 15915 * 15916 * Arguments: arg - pointer to associated softstate for the device. 15917 * 15918 * Context: May be called from interrupt context 15919 */ 15920 15921 static void 15922 sd_restore_throttle(void *arg) 15923 { 15924 struct sd_lun *un = arg; 15925 15926 ASSERT(un != NULL); 15927 ASSERT(!mutex_owned(SD_MUTEX(un))); 15928 15929 mutex_enter(SD_MUTEX(un)); 15930 15931 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15932 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15933 15934 un->un_reset_throttle_timeid = NULL; 15935 15936 if (un->un_f_use_adaptive_throttle == TRUE) { 15937 /* 15938 * If un_busy_throttle is nonzero, then it contains the 15939 * value that un_throttle was when we got a TRAN_BUSY back 15940 * from scsi_transport(). We want to revert back to this 15941 * value. 15942 * 15943 * In the QFULL case, the throttle limit will incrementally 15944 * increase until it reaches max throttle. 15945 */ 15946 if (un->un_busy_throttle > 0) { 15947 un->un_throttle = un->un_busy_throttle; 15948 un->un_busy_throttle = 0; 15949 } else { 15950 /* 15951 * increase throttle by 10% open gate slowly, schedule 15952 * another restore if saved throttle has not been 15953 * reached 15954 */ 15955 short throttle; 15956 if (sd_qfull_throttle_enable) { 15957 throttle = un->un_throttle + 15958 max((un->un_throttle / 10), 1); 15959 un->un_throttle = 15960 (throttle < un->un_saved_throttle) ? 15961 throttle : un->un_saved_throttle; 15962 if (un->un_throttle < un->un_saved_throttle) { 15963 un->un_reset_throttle_timeid = 15964 timeout(sd_restore_throttle, 15965 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 15966 } 15967 } 15968 } 15969 15970 /* 15971 * If un_throttle has fallen below the low-water mark, we 15972 * restore the maximum value here (and allow it to ratchet 15973 * down again if necessary). 15974 */ 15975 if (un->un_throttle < un->un_min_throttle) { 15976 un->un_throttle = un->un_saved_throttle; 15977 } 15978 } else { 15979 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15980 "restoring limit from 0x%x to 0x%x\n", 15981 un->un_throttle, un->un_saved_throttle); 15982 un->un_throttle = un->un_saved_throttle; 15983 } 15984 15985 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15986 "sd_restore_throttle: calling sd_start_cmds!\n"); 15987 15988 sd_start_cmds(un, NULL); 15989 15990 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15991 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15992 un, un->un_throttle); 15993 15994 mutex_exit(SD_MUTEX(un)); 15995 15996 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15997 } 15998 15999 /* 16000 * Function: sdrunout 16001 * 16002 * Description: Callback routine for scsi_init_pkt when a resource allocation 16003 * fails. 16004 * 16005 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16006 * soft state instance. 16007 * 16008 * Return Code: The scsi_init_pkt routine allows for the callback function to 16009 * return a 0 indicating the callback should be rescheduled or a 1 16010 * indicating not to reschedule. This routine always returns 1 16011 * because the driver always provides a callback function to 16012 * scsi_init_pkt. This results in a callback always being scheduled 16013 * (via the scsi_init_pkt callback implementation) if a resource 16014 * failure occurs. 16015 * 16016 * Context: This callback function may not block or call routines that block 16017 * 16018 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16019 * request persisting at the head of the list which cannot be 16020 * satisfied even after multiple retries. In the future the driver 16021 * may implement some time of maximum runout count before failing 16022 * an I/O. 16023 */ 16024 16025 static int 16026 sdrunout(caddr_t arg) 16027 { 16028 struct sd_lun *un = (struct sd_lun *)arg; 16029 16030 ASSERT(un != NULL); 16031 ASSERT(!mutex_owned(SD_MUTEX(un))); 16032 16033 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16034 16035 mutex_enter(SD_MUTEX(un)); 16036 sd_start_cmds(un, NULL); 16037 mutex_exit(SD_MUTEX(un)); 16038 /* 16039 * This callback routine always returns 1 (i.e. do not reschedule) 16040 * because we always specify sdrunout as the callback handler for 16041 * scsi_init_pkt inside the call to sd_start_cmds. 16042 */ 16043 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16044 return (1); 16045 } 16046 16047 16048 /* 16049 * Function: sdintr 16050 * 16051 * Description: Completion callback routine for scsi_pkt(9S) structs 16052 * sent to the HBA driver via scsi_transport(9F). 16053 * 16054 * Context: Interrupt context 16055 */ 16056 16057 static void 16058 sdintr(struct scsi_pkt *pktp) 16059 { 16060 struct buf *bp; 16061 struct sd_xbuf *xp; 16062 struct sd_lun *un; 16063 16064 ASSERT(pktp != NULL); 16065 bp = (struct buf *)pktp->pkt_private; 16066 ASSERT(bp != NULL); 16067 xp = SD_GET_XBUF(bp); 16068 ASSERT(xp != NULL); 16069 ASSERT(xp->xb_pktp != NULL); 16070 un = SD_GET_UN(bp); 16071 ASSERT(un != NULL); 16072 ASSERT(!mutex_owned(SD_MUTEX(un))); 16073 16074 #ifdef SD_FAULT_INJECTION 16075 16076 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16077 /* SD FaultInjection */ 16078 sd_faultinjection(pktp); 16079 16080 #endif /* SD_FAULT_INJECTION */ 16081 16082 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16083 " xp:0x%p, un:0x%p\n", bp, xp, un); 16084 16085 mutex_enter(SD_MUTEX(un)); 16086 16087 /* Reduce the count of the #commands currently in transport */ 16088 un->un_ncmds_in_transport--; 16089 ASSERT(un->un_ncmds_in_transport >= 0); 16090 16091 /* Increment counter to indicate that the callback routine is active */ 16092 un->un_in_callback++; 16093 16094 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16095 16096 #ifdef SDDEBUG 16097 if (bp == un->un_retry_bp) { 16098 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16099 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16100 un, un->un_retry_bp, un->un_ncmds_in_transport); 16101 } 16102 #endif 16103 16104 /* 16105 * If pkt_reason is CMD_DEV_GONE, just fail the command 16106 */ 16107 if (pktp->pkt_reason == CMD_DEV_GONE) { 16108 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16109 "Device is gone\n"); 16110 sd_return_failed_command(un, bp, EIO); 16111 goto exit; 16112 } 16113 16114 /* 16115 * First see if the pkt has auto-request sense data with it.... 16116 * Look at the packet state first so we don't take a performance 16117 * hit looking at the arq enabled flag unless absolutely necessary. 16118 */ 16119 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16120 (un->un_f_arq_enabled == TRUE)) { 16121 /* 16122 * The HBA did an auto request sense for this command so check 16123 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16124 * driver command that should not be retried. 16125 */ 16126 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16127 /* 16128 * Save the relevant sense info into the xp for the 16129 * original cmd. 16130 */ 16131 struct scsi_arq_status *asp; 16132 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16133 xp->xb_sense_status = 16134 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16135 xp->xb_sense_state = asp->sts_rqpkt_state; 16136 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16137 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16138 min(sizeof (struct scsi_extended_sense), 16139 SENSE_LENGTH)); 16140 16141 /* fail the command */ 16142 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16143 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16144 sd_return_failed_command(un, bp, EIO); 16145 goto exit; 16146 } 16147 16148 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16149 /* 16150 * We want to either retry or fail this command, so free 16151 * the DMA resources here. If we retry the command then 16152 * the DMA resources will be reallocated in sd_start_cmds(). 16153 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16154 * causes the *entire* transfer to start over again from the 16155 * beginning of the request, even for PARTIAL chunks that 16156 * have already transferred successfully. 16157 */ 16158 if ((un->un_f_is_fibre == TRUE) && 16159 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16160 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16161 scsi_dmafree(pktp); 16162 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16163 } 16164 #endif 16165 16166 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16167 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16168 16169 sd_handle_auto_request_sense(un, bp, xp, pktp); 16170 goto exit; 16171 } 16172 16173 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16174 if (pktp->pkt_flags & FLAG_SENSING) { 16175 /* This pktp is from the unit's REQUEST_SENSE command */ 16176 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16177 "sdintr: sd_handle_request_sense\n"); 16178 sd_handle_request_sense(un, bp, xp, pktp); 16179 goto exit; 16180 } 16181 16182 /* 16183 * Check to see if the command successfully completed as requested; 16184 * this is the most common case (and also the hot performance path). 16185 * 16186 * Requirements for successful completion are: 16187 * pkt_reason is CMD_CMPLT and packet status is status good. 16188 * In addition: 16189 * - A residual of zero indicates successful completion no matter what 16190 * the command is. 16191 * - If the residual is not zero and the command is not a read or 16192 * write, then it's still defined as successful completion. In other 16193 * words, if the command is a read or write the residual must be 16194 * zero for successful completion. 16195 * - If the residual is not zero and the command is a read or 16196 * write, and it's a USCSICMD, then it's still defined as 16197 * successful completion. 16198 */ 16199 if ((pktp->pkt_reason == CMD_CMPLT) && 16200 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16201 16202 /* 16203 * Since this command is returned with a good status, we 16204 * can reset the count for Sonoma failover. 16205 */ 16206 un->un_sonoma_failure_count = 0; 16207 16208 /* 16209 * Return all USCSI commands on good status 16210 */ 16211 if (pktp->pkt_resid == 0) { 16212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16213 "sdintr: returning command for resid == 0\n"); 16214 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16215 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16216 SD_UPDATE_B_RESID(bp, pktp); 16217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16218 "sdintr: returning command for resid != 0\n"); 16219 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16220 SD_UPDATE_B_RESID(bp, pktp); 16221 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16222 "sdintr: returning uscsi command\n"); 16223 } else { 16224 goto not_successful; 16225 } 16226 sd_return_command(un, bp); 16227 16228 /* 16229 * Decrement counter to indicate that the callback routine 16230 * is done. 16231 */ 16232 un->un_in_callback--; 16233 ASSERT(un->un_in_callback >= 0); 16234 mutex_exit(SD_MUTEX(un)); 16235 16236 return; 16237 } 16238 16239 not_successful: 16240 16241 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16242 /* 16243 * The following is based upon knowledge of the underlying transport 16244 * and its use of DMA resources. This code should be removed when 16245 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16246 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16247 * and sd_start_cmds(). 16248 * 16249 * Free any DMA resources associated with this command if there 16250 * is a chance it could be retried or enqueued for later retry. 16251 * If we keep the DMA binding then mpxio cannot reissue the 16252 * command on another path whenever a path failure occurs. 16253 * 16254 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16255 * causes the *entire* transfer to start over again from the 16256 * beginning of the request, even for PARTIAL chunks that 16257 * have already transferred successfully. 16258 * 16259 * This is only done for non-uscsi commands (and also skipped for the 16260 * driver's internal RQS command). Also just do this for Fibre Channel 16261 * devices as these are the only ones that support mpxio. 16262 */ 16263 if ((un->un_f_is_fibre == TRUE) && 16264 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16265 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16266 scsi_dmafree(pktp); 16267 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16268 } 16269 #endif 16270 16271 /* 16272 * The command did not successfully complete as requested so check 16273 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16274 * driver command that should not be retried so just return. If 16275 * FLAG_DIAGNOSE is not set the error will be processed below. 16276 */ 16277 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16279 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16280 /* 16281 * Issue a request sense if a check condition caused the error 16282 * (we handle the auto request sense case above), otherwise 16283 * just fail the command. 16284 */ 16285 if ((pktp->pkt_reason == CMD_CMPLT) && 16286 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16287 sd_send_request_sense_command(un, bp, pktp); 16288 } else { 16289 sd_return_failed_command(un, bp, EIO); 16290 } 16291 goto exit; 16292 } 16293 16294 /* 16295 * The command did not successfully complete as requested so process 16296 * the error, retry, and/or attempt recovery. 16297 */ 16298 switch (pktp->pkt_reason) { 16299 case CMD_CMPLT: 16300 switch (SD_GET_PKT_STATUS(pktp)) { 16301 case STATUS_GOOD: 16302 /* 16303 * The command completed successfully with a non-zero 16304 * residual 16305 */ 16306 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16307 "sdintr: STATUS_GOOD \n"); 16308 sd_pkt_status_good(un, bp, xp, pktp); 16309 break; 16310 16311 case STATUS_CHECK: 16312 case STATUS_TERMINATED: 16313 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16314 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16315 sd_pkt_status_check_condition(un, bp, xp, pktp); 16316 break; 16317 16318 case STATUS_BUSY: 16319 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16320 "sdintr: STATUS_BUSY\n"); 16321 sd_pkt_status_busy(un, bp, xp, pktp); 16322 break; 16323 16324 case STATUS_RESERVATION_CONFLICT: 16325 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16326 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16327 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16328 break; 16329 16330 case STATUS_QFULL: 16331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16332 "sdintr: STATUS_QFULL\n"); 16333 sd_pkt_status_qfull(un, bp, xp, pktp); 16334 break; 16335 16336 case STATUS_MET: 16337 case STATUS_INTERMEDIATE: 16338 case STATUS_SCSI2: 16339 case STATUS_INTERMEDIATE_MET: 16340 case STATUS_ACA_ACTIVE: 16341 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16342 "Unexpected SCSI status received: 0x%x\n", 16343 SD_GET_PKT_STATUS(pktp)); 16344 sd_return_failed_command(un, bp, EIO); 16345 break; 16346 16347 default: 16348 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16349 "Invalid SCSI status received: 0x%x\n", 16350 SD_GET_PKT_STATUS(pktp)); 16351 sd_return_failed_command(un, bp, EIO); 16352 break; 16353 16354 } 16355 break; 16356 16357 case CMD_INCOMPLETE: 16358 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16359 "sdintr: CMD_INCOMPLETE\n"); 16360 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16361 break; 16362 case CMD_TRAN_ERR: 16363 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16364 "sdintr: CMD_TRAN_ERR\n"); 16365 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16366 break; 16367 case CMD_RESET: 16368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16369 "sdintr: CMD_RESET \n"); 16370 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16371 break; 16372 case CMD_ABORTED: 16373 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16374 "sdintr: CMD_ABORTED \n"); 16375 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16376 break; 16377 case CMD_TIMEOUT: 16378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16379 "sdintr: CMD_TIMEOUT\n"); 16380 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16381 break; 16382 case CMD_UNX_BUS_FREE: 16383 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16384 "sdintr: CMD_UNX_BUS_FREE \n"); 16385 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16386 break; 16387 case CMD_TAG_REJECT: 16388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16389 "sdintr: CMD_TAG_REJECT\n"); 16390 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16391 break; 16392 default: 16393 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16394 "sdintr: default\n"); 16395 sd_pkt_reason_default(un, bp, xp, pktp); 16396 break; 16397 } 16398 16399 exit: 16400 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16401 16402 /* Decrement counter to indicate that the callback routine is done. */ 16403 un->un_in_callback--; 16404 ASSERT(un->un_in_callback >= 0); 16405 16406 /* 16407 * At this point, the pkt has been dispatched, ie, it is either 16408 * being re-tried or has been returned to its caller and should 16409 * not be referenced. 16410 */ 16411 16412 mutex_exit(SD_MUTEX(un)); 16413 } 16414 16415 16416 /* 16417 * Function: sd_print_incomplete_msg 16418 * 16419 * Description: Prints the error message for a CMD_INCOMPLETE error. 16420 * 16421 * Arguments: un - ptr to associated softstate for the device. 16422 * bp - ptr to the buf(9S) for the command. 16423 * arg - message string ptr 16424 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16425 * or SD_NO_RETRY_ISSUED. 16426 * 16427 * Context: May be called under interrupt context 16428 */ 16429 16430 static void 16431 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16432 { 16433 struct scsi_pkt *pktp; 16434 char *msgp; 16435 char *cmdp = arg; 16436 16437 ASSERT(un != NULL); 16438 ASSERT(mutex_owned(SD_MUTEX(un))); 16439 ASSERT(bp != NULL); 16440 ASSERT(arg != NULL); 16441 pktp = SD_GET_PKTP(bp); 16442 ASSERT(pktp != NULL); 16443 16444 switch (code) { 16445 case SD_DELAYED_RETRY_ISSUED: 16446 case SD_IMMEDIATE_RETRY_ISSUED: 16447 msgp = "retrying"; 16448 break; 16449 case SD_NO_RETRY_ISSUED: 16450 default: 16451 msgp = "giving up"; 16452 break; 16453 } 16454 16455 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16457 "incomplete %s- %s\n", cmdp, msgp); 16458 } 16459 } 16460 16461 16462 16463 /* 16464 * Function: sd_pkt_status_good 16465 * 16466 * Description: Processing for a STATUS_GOOD code in pkt_status. 16467 * 16468 * Context: May be called under interrupt context 16469 */ 16470 16471 static void 16472 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16473 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16474 { 16475 char *cmdp; 16476 16477 ASSERT(un != NULL); 16478 ASSERT(mutex_owned(SD_MUTEX(un))); 16479 ASSERT(bp != NULL); 16480 ASSERT(xp != NULL); 16481 ASSERT(pktp != NULL); 16482 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16483 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16484 ASSERT(pktp->pkt_resid != 0); 16485 16486 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16487 16488 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16489 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16490 case SCMD_READ: 16491 cmdp = "read"; 16492 break; 16493 case SCMD_WRITE: 16494 cmdp = "write"; 16495 break; 16496 default: 16497 SD_UPDATE_B_RESID(bp, pktp); 16498 sd_return_command(un, bp); 16499 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16500 return; 16501 } 16502 16503 /* 16504 * See if we can retry the read/write, preferrably immediately. 16505 * If retries are exhaused, then sd_retry_command() will update 16506 * the b_resid count. 16507 */ 16508 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16509 cmdp, EIO, (clock_t)0, NULL); 16510 16511 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16512 } 16513 16514 16515 16516 16517 16518 /* 16519 * Function: sd_handle_request_sense 16520 * 16521 * Description: Processing for non-auto Request Sense command. 16522 * 16523 * Arguments: un - ptr to associated softstate 16524 * sense_bp - ptr to buf(9S) for the RQS command 16525 * sense_xp - ptr to the sd_xbuf for the RQS command 16526 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16527 * 16528 * Context: May be called under interrupt context 16529 */ 16530 16531 static void 16532 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16533 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16534 { 16535 struct buf *cmd_bp; /* buf for the original command */ 16536 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16537 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16538 16539 ASSERT(un != NULL); 16540 ASSERT(mutex_owned(SD_MUTEX(un))); 16541 ASSERT(sense_bp != NULL); 16542 ASSERT(sense_xp != NULL); 16543 ASSERT(sense_pktp != NULL); 16544 16545 /* 16546 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16547 * RQS command and not the original command. 16548 */ 16549 ASSERT(sense_pktp == un->un_rqs_pktp); 16550 ASSERT(sense_bp == un->un_rqs_bp); 16551 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16552 (FLAG_SENSING | FLAG_HEAD)); 16553 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16554 FLAG_SENSING) == FLAG_SENSING); 16555 16556 /* These are the bp, xp, and pktp for the original command */ 16557 cmd_bp = sense_xp->xb_sense_bp; 16558 cmd_xp = SD_GET_XBUF(cmd_bp); 16559 cmd_pktp = SD_GET_PKTP(cmd_bp); 16560 16561 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16562 /* 16563 * The REQUEST SENSE command failed. Release the REQUEST 16564 * SENSE command for re-use, get back the bp for the original 16565 * command, and attempt to re-try the original command if 16566 * FLAG_DIAGNOSE is not set in the original packet. 16567 */ 16568 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16569 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16570 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16571 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16572 NULL, NULL, EIO, (clock_t)0, NULL); 16573 return; 16574 } 16575 } 16576 16577 /* 16578 * Save the relevant sense info into the xp for the original cmd. 16579 * 16580 * Note: if the request sense failed the state info will be zero 16581 * as set in sd_mark_rqs_busy() 16582 */ 16583 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16584 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16585 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16586 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16587 16588 /* 16589 * Free up the RQS command.... 16590 * NOTE: 16591 * Must do this BEFORE calling sd_validate_sense_data! 16592 * sd_validate_sense_data may return the original command in 16593 * which case the pkt will be freed and the flags can no 16594 * longer be touched. 16595 * SD_MUTEX is held through this process until the command 16596 * is dispatched based upon the sense data, so there are 16597 * no race conditions. 16598 */ 16599 (void) sd_mark_rqs_idle(un, sense_xp); 16600 16601 /* 16602 * For a retryable command see if we have valid sense data, if so then 16603 * turn it over to sd_decode_sense() to figure out the right course of 16604 * action. Just fail a non-retryable command. 16605 */ 16606 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16607 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16608 SD_SENSE_DATA_IS_VALID) { 16609 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16610 } 16611 } else { 16612 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16613 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16614 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16615 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16616 sd_return_failed_command(un, cmd_bp, EIO); 16617 } 16618 } 16619 16620 16621 16622 16623 /* 16624 * Function: sd_handle_auto_request_sense 16625 * 16626 * Description: Processing for auto-request sense information. 16627 * 16628 * Arguments: un - ptr to associated softstate 16629 * bp - ptr to buf(9S) for the command 16630 * xp - ptr to the sd_xbuf for the command 16631 * pktp - ptr to the scsi_pkt(9S) for the command 16632 * 16633 * Context: May be called under interrupt context 16634 */ 16635 16636 static void 16637 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16638 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16639 { 16640 struct scsi_arq_status *asp; 16641 16642 ASSERT(un != NULL); 16643 ASSERT(mutex_owned(SD_MUTEX(un))); 16644 ASSERT(bp != NULL); 16645 ASSERT(xp != NULL); 16646 ASSERT(pktp != NULL); 16647 ASSERT(pktp != un->un_rqs_pktp); 16648 ASSERT(bp != un->un_rqs_bp); 16649 16650 /* 16651 * For auto-request sense, we get a scsi_arq_status back from 16652 * the HBA, with the sense data in the sts_sensedata member. 16653 * The pkt_scbp of the packet points to this scsi_arq_status. 16654 */ 16655 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16656 16657 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16658 /* 16659 * The auto REQUEST SENSE failed; see if we can re-try 16660 * the original command. 16661 */ 16662 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16663 "auto request sense failed (reason=%s)\n", 16664 scsi_rname(asp->sts_rqpkt_reason)); 16665 16666 sd_reset_target(un, pktp); 16667 16668 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16669 NULL, NULL, EIO, (clock_t)0, NULL); 16670 return; 16671 } 16672 16673 /* Save the relevant sense info into the xp for the original cmd. */ 16674 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16675 xp->xb_sense_state = asp->sts_rqpkt_state; 16676 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16677 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16678 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16679 16680 /* 16681 * See if we have valid sense data, if so then turn it over to 16682 * sd_decode_sense() to figure out the right course of action. 16683 */ 16684 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16685 sd_decode_sense(un, bp, xp, pktp); 16686 } 16687 } 16688 16689 16690 /* 16691 * Function: sd_print_sense_failed_msg 16692 * 16693 * Description: Print log message when RQS has failed. 16694 * 16695 * Arguments: un - ptr to associated softstate 16696 * bp - ptr to buf(9S) for the command 16697 * arg - generic message string ptr 16698 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16699 * or SD_NO_RETRY_ISSUED 16700 * 16701 * Context: May be called from interrupt context 16702 */ 16703 16704 static void 16705 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16706 int code) 16707 { 16708 char *msgp = arg; 16709 16710 ASSERT(un != NULL); 16711 ASSERT(mutex_owned(SD_MUTEX(un))); 16712 ASSERT(bp != NULL); 16713 16714 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16716 } 16717 } 16718 16719 16720 /* 16721 * Function: sd_validate_sense_data 16722 * 16723 * Description: Check the given sense data for validity. 16724 * If the sense data is not valid, the command will 16725 * be either failed or retried! 16726 * 16727 * Return Code: SD_SENSE_DATA_IS_INVALID 16728 * SD_SENSE_DATA_IS_VALID 16729 * 16730 * Context: May be called from interrupt context 16731 */ 16732 16733 static int 16734 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16735 { 16736 struct scsi_extended_sense *esp; 16737 struct scsi_pkt *pktp; 16738 size_t actual_len; 16739 char *msgp = NULL; 16740 16741 ASSERT(un != NULL); 16742 ASSERT(mutex_owned(SD_MUTEX(un))); 16743 ASSERT(bp != NULL); 16744 ASSERT(bp != un->un_rqs_bp); 16745 ASSERT(xp != NULL); 16746 16747 pktp = SD_GET_PKTP(bp); 16748 ASSERT(pktp != NULL); 16749 16750 /* 16751 * Check the status of the RQS command (auto or manual). 16752 */ 16753 switch (xp->xb_sense_status & STATUS_MASK) { 16754 case STATUS_GOOD: 16755 break; 16756 16757 case STATUS_RESERVATION_CONFLICT: 16758 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16759 return (SD_SENSE_DATA_IS_INVALID); 16760 16761 case STATUS_BUSY: 16762 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16763 "Busy Status on REQUEST SENSE\n"); 16764 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16765 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16766 return (SD_SENSE_DATA_IS_INVALID); 16767 16768 case STATUS_QFULL: 16769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16770 "QFULL Status on REQUEST SENSE\n"); 16771 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16772 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16773 return (SD_SENSE_DATA_IS_INVALID); 16774 16775 case STATUS_CHECK: 16776 case STATUS_TERMINATED: 16777 msgp = "Check Condition on REQUEST SENSE\n"; 16778 goto sense_failed; 16779 16780 default: 16781 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16782 goto sense_failed; 16783 } 16784 16785 /* 16786 * See if we got the minimum required amount of sense data. 16787 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16788 * or less. 16789 */ 16790 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16791 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16792 (actual_len == 0)) { 16793 msgp = "Request Sense couldn't get sense data\n"; 16794 goto sense_failed; 16795 } 16796 16797 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16798 msgp = "Not enough sense information\n"; 16799 goto sense_failed; 16800 } 16801 16802 /* 16803 * We require the extended sense data 16804 */ 16805 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16806 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16807 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16808 static char tmp[8]; 16809 static char buf[148]; 16810 char *p = (char *)(xp->xb_sense_data); 16811 int i; 16812 16813 mutex_enter(&sd_sense_mutex); 16814 (void) strcpy(buf, "undecodable sense information:"); 16815 for (i = 0; i < actual_len; i++) { 16816 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16817 (void) strcpy(&buf[strlen(buf)], tmp); 16818 } 16819 i = strlen(buf); 16820 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16822 mutex_exit(&sd_sense_mutex); 16823 } 16824 /* Note: Legacy behavior, fail the command with no retry */ 16825 sd_return_failed_command(un, bp, EIO); 16826 return (SD_SENSE_DATA_IS_INVALID); 16827 } 16828 16829 /* 16830 * Check that es_code is valid (es_class concatenated with es_code 16831 * make up the "response code" field. es_class will always be 7, so 16832 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16833 * format. 16834 */ 16835 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16836 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16837 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16838 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16839 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16840 goto sense_failed; 16841 } 16842 16843 return (SD_SENSE_DATA_IS_VALID); 16844 16845 sense_failed: 16846 /* 16847 * If the request sense failed (for whatever reason), attempt 16848 * to retry the original command. 16849 */ 16850 #if defined(__i386) || defined(__amd64) 16851 /* 16852 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16853 * sddef.h for Sparc platform, and x86 uses 1 binary 16854 * for both SCSI/FC. 16855 * The SD_RETRY_DELAY value need to be adjusted here 16856 * when SD_RETRY_DELAY change in sddef.h 16857 */ 16858 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16859 sd_print_sense_failed_msg, msgp, EIO, 16860 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16861 #else 16862 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16863 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16864 #endif 16865 16866 return (SD_SENSE_DATA_IS_INVALID); 16867 } 16868 16869 16870 16871 /* 16872 * Function: sd_decode_sense 16873 * 16874 * Description: Take recovery action(s) when SCSI Sense Data is received. 16875 * 16876 * Context: Interrupt context. 16877 */ 16878 16879 static void 16880 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16881 struct scsi_pkt *pktp) 16882 { 16883 struct scsi_extended_sense *esp; 16884 struct scsi_descr_sense_hdr *sdsp; 16885 uint8_t asc, ascq, sense_key; 16886 16887 ASSERT(un != NULL); 16888 ASSERT(mutex_owned(SD_MUTEX(un))); 16889 ASSERT(bp != NULL); 16890 ASSERT(bp != un->un_rqs_bp); 16891 ASSERT(xp != NULL); 16892 ASSERT(pktp != NULL); 16893 16894 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16895 16896 switch (esp->es_code) { 16897 case CODE_FMT_DESCR_CURRENT: 16898 case CODE_FMT_DESCR_DEFERRED: 16899 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16900 sense_key = sdsp->ds_key; 16901 asc = sdsp->ds_add_code; 16902 ascq = sdsp->ds_qual_code; 16903 break; 16904 case CODE_FMT_VENDOR_SPECIFIC: 16905 case CODE_FMT_FIXED_CURRENT: 16906 case CODE_FMT_FIXED_DEFERRED: 16907 default: 16908 sense_key = esp->es_key; 16909 asc = esp->es_add_code; 16910 ascq = esp->es_qual_code; 16911 break; 16912 } 16913 16914 switch (sense_key) { 16915 case KEY_NO_SENSE: 16916 sd_sense_key_no_sense(un, bp, xp, pktp); 16917 break; 16918 case KEY_RECOVERABLE_ERROR: 16919 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16920 break; 16921 case KEY_NOT_READY: 16922 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16923 break; 16924 case KEY_MEDIUM_ERROR: 16925 case KEY_HARDWARE_ERROR: 16926 sd_sense_key_medium_or_hardware_error(un, 16927 sense_key, asc, bp, xp, pktp); 16928 break; 16929 case KEY_ILLEGAL_REQUEST: 16930 sd_sense_key_illegal_request(un, bp, xp, pktp); 16931 break; 16932 case KEY_UNIT_ATTENTION: 16933 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16934 break; 16935 case KEY_WRITE_PROTECT: 16936 case KEY_VOLUME_OVERFLOW: 16937 case KEY_MISCOMPARE: 16938 sd_sense_key_fail_command(un, bp, xp, pktp); 16939 break; 16940 case KEY_BLANK_CHECK: 16941 sd_sense_key_blank_check(un, bp, xp, pktp); 16942 break; 16943 case KEY_ABORTED_COMMAND: 16944 sd_sense_key_aborted_command(un, bp, xp, pktp); 16945 break; 16946 case KEY_VENDOR_UNIQUE: 16947 case KEY_COPY_ABORTED: 16948 case KEY_EQUAL: 16949 case KEY_RESERVED: 16950 default: 16951 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16952 break; 16953 } 16954 } 16955 16956 16957 /* 16958 * Function: sd_dump_memory 16959 * 16960 * Description: Debug logging routine to print the contents of a user provided 16961 * buffer. The output of the buffer is broken up into 256 byte 16962 * segments due to a size constraint of the scsi_log. 16963 * implementation. 16964 * 16965 * Arguments: un - ptr to softstate 16966 * comp - component mask 16967 * title - "title" string to preceed data when printed 16968 * data - ptr to data block to be printed 16969 * len - size of data block to be printed 16970 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16971 * 16972 * Context: May be called from interrupt context 16973 */ 16974 16975 #define SD_DUMP_MEMORY_BUF_SIZE 256 16976 16977 static char *sd_dump_format_string[] = { 16978 " 0x%02x", 16979 " %c" 16980 }; 16981 16982 static void 16983 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16984 int len, int fmt) 16985 { 16986 int i, j; 16987 int avail_count; 16988 int start_offset; 16989 int end_offset; 16990 size_t entry_len; 16991 char *bufp; 16992 char *local_buf; 16993 char *format_string; 16994 16995 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16996 16997 /* 16998 * In the debug version of the driver, this function is called from a 16999 * number of places which are NOPs in the release driver. 17000 * The debug driver therefore has additional methods of filtering 17001 * debug output. 17002 */ 17003 #ifdef SDDEBUG 17004 /* 17005 * In the debug version of the driver we can reduce the amount of debug 17006 * messages by setting sd_error_level to something other than 17007 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17008 * sd_component_mask. 17009 */ 17010 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17011 (sd_error_level != SCSI_ERR_ALL)) { 17012 return; 17013 } 17014 if (((sd_component_mask & comp) == 0) || 17015 (sd_error_level != SCSI_ERR_ALL)) { 17016 return; 17017 } 17018 #else 17019 if (sd_error_level != SCSI_ERR_ALL) { 17020 return; 17021 } 17022 #endif 17023 17024 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17025 bufp = local_buf; 17026 /* 17027 * Available length is the length of local_buf[], minus the 17028 * length of the title string, minus one for the ":", minus 17029 * one for the newline, minus one for the NULL terminator. 17030 * This gives the #bytes available for holding the printed 17031 * values from the given data buffer. 17032 */ 17033 if (fmt == SD_LOG_HEX) { 17034 format_string = sd_dump_format_string[0]; 17035 } else /* SD_LOG_CHAR */ { 17036 format_string = sd_dump_format_string[1]; 17037 } 17038 /* 17039 * Available count is the number of elements from the given 17040 * data buffer that we can fit into the available length. 17041 * This is based upon the size of the format string used. 17042 * Make one entry and find it's size. 17043 */ 17044 (void) sprintf(bufp, format_string, data[0]); 17045 entry_len = strlen(bufp); 17046 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17047 17048 j = 0; 17049 while (j < len) { 17050 bufp = local_buf; 17051 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17052 start_offset = j; 17053 17054 end_offset = start_offset + avail_count; 17055 17056 (void) sprintf(bufp, "%s:", title); 17057 bufp += strlen(bufp); 17058 for (i = start_offset; ((i < end_offset) && (j < len)); 17059 i++, j++) { 17060 (void) sprintf(bufp, format_string, data[i]); 17061 bufp += entry_len; 17062 } 17063 (void) sprintf(bufp, "\n"); 17064 17065 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17066 } 17067 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17068 } 17069 17070 /* 17071 * Function: sd_print_sense_msg 17072 * 17073 * Description: Log a message based upon the given sense data. 17074 * 17075 * Arguments: un - ptr to associated softstate 17076 * bp - ptr to buf(9S) for the command 17077 * arg - ptr to associate sd_sense_info struct 17078 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17079 * or SD_NO_RETRY_ISSUED 17080 * 17081 * Context: May be called from interrupt context 17082 */ 17083 17084 static void 17085 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17086 { 17087 struct sd_xbuf *xp; 17088 struct scsi_pkt *pktp; 17089 struct scsi_extended_sense *sensep; 17090 daddr_t request_blkno; 17091 diskaddr_t err_blkno; 17092 int severity; 17093 int pfa_flag; 17094 int fixed_format = TRUE; 17095 extern struct scsi_key_strings scsi_cmds[]; 17096 17097 ASSERT(un != NULL); 17098 ASSERT(mutex_owned(SD_MUTEX(un))); 17099 ASSERT(bp != NULL); 17100 xp = SD_GET_XBUF(bp); 17101 ASSERT(xp != NULL); 17102 pktp = SD_GET_PKTP(bp); 17103 ASSERT(pktp != NULL); 17104 ASSERT(arg != NULL); 17105 17106 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17107 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17108 17109 if ((code == SD_DELAYED_RETRY_ISSUED) || 17110 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17111 severity = SCSI_ERR_RETRYABLE; 17112 } 17113 17114 /* Use absolute block number for the request block number */ 17115 request_blkno = xp->xb_blkno; 17116 17117 /* 17118 * Now try to get the error block number from the sense data 17119 */ 17120 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 17121 switch (sensep->es_code) { 17122 case CODE_FMT_DESCR_CURRENT: 17123 case CODE_FMT_DESCR_DEFERRED: 17124 err_blkno = 17125 sd_extract_sense_info_descr( 17126 (struct scsi_descr_sense_hdr *)sensep); 17127 fixed_format = FALSE; 17128 break; 17129 case CODE_FMT_FIXED_CURRENT: 17130 case CODE_FMT_FIXED_DEFERRED: 17131 case CODE_FMT_VENDOR_SPECIFIC: 17132 default: 17133 /* 17134 * With the es_valid bit set, we assume that the error 17135 * blkno is in the sense data. Also, if xp->xb_blkno is 17136 * greater than 0xffffffff then the target *should* have used 17137 * a descriptor sense format (or it shouldn't have set 17138 * the es_valid bit), and we may as well ignore the 17139 * 32-bit value. 17140 */ 17141 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 17142 err_blkno = (diskaddr_t) 17143 ((sensep->es_info_1 << 24) | 17144 (sensep->es_info_2 << 16) | 17145 (sensep->es_info_3 << 8) | 17146 (sensep->es_info_4)); 17147 } else { 17148 err_blkno = (diskaddr_t)-1; 17149 } 17150 break; 17151 } 17152 17153 if (err_blkno == (diskaddr_t)-1) { 17154 /* 17155 * Without the es_valid bit set (for fixed format) or an 17156 * information descriptor (for descriptor format) we cannot 17157 * be certain of the error blkno, so just use the 17158 * request_blkno. 17159 */ 17160 err_blkno = (diskaddr_t)request_blkno; 17161 } else { 17162 /* 17163 * We retrieved the error block number from the information 17164 * portion of the sense data. 17165 * 17166 * For USCSI commands we are better off using the error 17167 * block no. as the requested block no. (This is the best 17168 * we can estimate.) 17169 */ 17170 if ((SD_IS_BUFIO(xp) == FALSE) && 17171 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17172 request_blkno = err_blkno; 17173 } 17174 } 17175 17176 /* 17177 * The following will log the buffer contents for the release driver 17178 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17179 * level is set to verbose. 17180 */ 17181 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17182 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17183 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17184 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17185 17186 if (pfa_flag == FALSE) { 17187 /* This is normally only set for USCSI */ 17188 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17189 return; 17190 } 17191 17192 if ((SD_IS_BUFIO(xp) == TRUE) && 17193 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17194 (severity < sd_error_level))) { 17195 return; 17196 } 17197 } 17198 17199 /* 17200 * If the data is fixed format then check for Sonoma Failover, 17201 * and keep a count of how many failed I/O's. We should not have 17202 * to worry about Sonoma returning descriptor format sense data, 17203 * and asc/ascq are in a different location in descriptor format. 17204 */ 17205 if (fixed_format && 17206 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 17207 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 17208 un->un_sonoma_failure_count++; 17209 if (un->un_sonoma_failure_count > 1) { 17210 return; 17211 } 17212 } 17213 17214 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17215 request_blkno, err_blkno, scsi_cmds, sensep, 17216 un->un_additional_codes, NULL); 17217 } 17218 17219 /* 17220 * Function: sd_extract_sense_info_descr 17221 * 17222 * Description: Retrieve "information" field from descriptor format 17223 * sense data. Iterates through each sense descriptor 17224 * looking for the information descriptor and returns 17225 * the information field from that descriptor. 17226 * 17227 * Context: May be called from interrupt context 17228 */ 17229 17230 static diskaddr_t 17231 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 17232 { 17233 diskaddr_t result; 17234 uint8_t *descr_offset; 17235 int valid_sense_length; 17236 struct scsi_information_sense_descr *isd; 17237 17238 /* 17239 * Initialize result to -1 indicating there is no information 17240 * descriptor 17241 */ 17242 result = (diskaddr_t)-1; 17243 17244 /* 17245 * The first descriptor will immediately follow the header 17246 */ 17247 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 17248 17249 /* 17250 * Calculate the amount of valid sense data 17251 */ 17252 valid_sense_length = 17253 min((sizeof (struct scsi_descr_sense_hdr) + 17254 sdsp->ds_addl_sense_length), 17255 SENSE_LENGTH); 17256 17257 /* 17258 * Iterate through the list of descriptors, stopping when we 17259 * run out of sense data 17260 */ 17261 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 17262 (uint8_t *)sdsp + valid_sense_length) { 17263 /* 17264 * Check if this is an information descriptor. We can 17265 * use the scsi_information_sense_descr structure as a 17266 * template sense the first two fields are always the 17267 * same 17268 */ 17269 isd = (struct scsi_information_sense_descr *)descr_offset; 17270 if (isd->isd_descr_type == DESCR_INFORMATION) { 17271 /* 17272 * Found an information descriptor. Copy the 17273 * information field. There will only be one 17274 * information descriptor so we can stop looking. 17275 */ 17276 result = 17277 (((diskaddr_t)isd->isd_information[0] << 56) | 17278 ((diskaddr_t)isd->isd_information[1] << 48) | 17279 ((diskaddr_t)isd->isd_information[2] << 40) | 17280 ((diskaddr_t)isd->isd_information[3] << 32) | 17281 ((diskaddr_t)isd->isd_information[4] << 24) | 17282 ((diskaddr_t)isd->isd_information[5] << 16) | 17283 ((diskaddr_t)isd->isd_information[6] << 8) | 17284 ((diskaddr_t)isd->isd_information[7])); 17285 break; 17286 } 17287 17288 /* 17289 * Get pointer to the next descriptor. The "additional 17290 * length" field holds the length of the descriptor except 17291 * for the "type" and "additional length" fields, so 17292 * we need to add 2 to get the total length. 17293 */ 17294 descr_offset += (isd->isd_addl_length + 2); 17295 } 17296 17297 return (result); 17298 } 17299 17300 /* 17301 * Function: sd_sense_key_no_sense 17302 * 17303 * Description: Recovery action when sense data was not received. 17304 * 17305 * Context: May be called from interrupt context 17306 */ 17307 17308 static void 17309 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17310 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17311 { 17312 struct sd_sense_info si; 17313 17314 ASSERT(un != NULL); 17315 ASSERT(mutex_owned(SD_MUTEX(un))); 17316 ASSERT(bp != NULL); 17317 ASSERT(xp != NULL); 17318 ASSERT(pktp != NULL); 17319 17320 si.ssi_severity = SCSI_ERR_FATAL; 17321 si.ssi_pfa_flag = FALSE; 17322 17323 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17324 17325 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17326 &si, EIO, (clock_t)0, NULL); 17327 } 17328 17329 17330 /* 17331 * Function: sd_sense_key_recoverable_error 17332 * 17333 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17334 * 17335 * Context: May be called from interrupt context 17336 */ 17337 17338 static void 17339 sd_sense_key_recoverable_error(struct sd_lun *un, 17340 uint8_t asc, 17341 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17342 { 17343 struct sd_sense_info si; 17344 17345 ASSERT(un != NULL); 17346 ASSERT(mutex_owned(SD_MUTEX(un))); 17347 ASSERT(bp != NULL); 17348 ASSERT(xp != NULL); 17349 ASSERT(pktp != NULL); 17350 17351 /* 17352 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17353 */ 17354 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17355 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17356 si.ssi_severity = SCSI_ERR_INFO; 17357 si.ssi_pfa_flag = TRUE; 17358 } else { 17359 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17360 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17361 si.ssi_severity = SCSI_ERR_RECOVERED; 17362 si.ssi_pfa_flag = FALSE; 17363 } 17364 17365 if (pktp->pkt_resid == 0) { 17366 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17367 sd_return_command(un, bp); 17368 return; 17369 } 17370 17371 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17372 &si, EIO, (clock_t)0, NULL); 17373 } 17374 17375 17376 17377 17378 /* 17379 * Function: sd_sense_key_not_ready 17380 * 17381 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17382 * 17383 * Context: May be called from interrupt context 17384 */ 17385 17386 static void 17387 sd_sense_key_not_ready(struct sd_lun *un, 17388 uint8_t asc, uint8_t ascq, 17389 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17390 { 17391 struct sd_sense_info si; 17392 17393 ASSERT(un != NULL); 17394 ASSERT(mutex_owned(SD_MUTEX(un))); 17395 ASSERT(bp != NULL); 17396 ASSERT(xp != NULL); 17397 ASSERT(pktp != NULL); 17398 17399 si.ssi_severity = SCSI_ERR_FATAL; 17400 si.ssi_pfa_flag = FALSE; 17401 17402 /* 17403 * Update error stats after first NOT READY error. Disks may have 17404 * been powered down and may need to be restarted. For CDROMs, 17405 * report NOT READY errors only if media is present. 17406 */ 17407 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17408 (xp->xb_retry_count > 0)) { 17409 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17410 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17411 } 17412 17413 /* 17414 * Just fail if the "not ready" retry limit has been reached. 17415 */ 17416 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17417 /* Special check for error message printing for removables. */ 17418 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17419 (ascq >= 0x04)) { 17420 si.ssi_severity = SCSI_ERR_ALL; 17421 } 17422 goto fail_command; 17423 } 17424 17425 /* 17426 * Check the ASC and ASCQ in the sense data as needed, to determine 17427 * what to do. 17428 */ 17429 switch (asc) { 17430 case 0x04: /* LOGICAL UNIT NOT READY */ 17431 /* 17432 * disk drives that don't spin up result in a very long delay 17433 * in format without warning messages. We will log a message 17434 * if the error level is set to verbose. 17435 */ 17436 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17437 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17438 "logical unit not ready, resetting disk\n"); 17439 } 17440 17441 /* 17442 * There are different requirements for CDROMs and disks for 17443 * the number of retries. If a CD-ROM is giving this, it is 17444 * probably reading TOC and is in the process of getting 17445 * ready, so we should keep on trying for a long time to make 17446 * sure that all types of media are taken in account (for 17447 * some media the drive takes a long time to read TOC). For 17448 * disks we do not want to retry this too many times as this 17449 * can cause a long hang in format when the drive refuses to 17450 * spin up (a very common failure). 17451 */ 17452 switch (ascq) { 17453 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17454 /* 17455 * Disk drives frequently refuse to spin up which 17456 * results in a very long hang in format without 17457 * warning messages. 17458 * 17459 * Note: This code preserves the legacy behavior of 17460 * comparing xb_retry_count against zero for fibre 17461 * channel targets instead of comparing against the 17462 * un_reset_retry_count value. The reason for this 17463 * discrepancy has been so utterly lost beneath the 17464 * Sands of Time that even Indiana Jones could not 17465 * find it. 17466 */ 17467 if (un->un_f_is_fibre == TRUE) { 17468 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17469 (xp->xb_retry_count > 0)) && 17470 (un->un_startstop_timeid == NULL)) { 17471 scsi_log(SD_DEVINFO(un), sd_label, 17472 CE_WARN, "logical unit not ready, " 17473 "resetting disk\n"); 17474 sd_reset_target(un, pktp); 17475 } 17476 } else { 17477 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17478 (xp->xb_retry_count > 17479 un->un_reset_retry_count)) && 17480 (un->un_startstop_timeid == NULL)) { 17481 scsi_log(SD_DEVINFO(un), sd_label, 17482 CE_WARN, "logical unit not ready, " 17483 "resetting disk\n"); 17484 sd_reset_target(un, pktp); 17485 } 17486 } 17487 break; 17488 17489 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17490 /* 17491 * If the target is in the process of becoming 17492 * ready, just proceed with the retry. This can 17493 * happen with CD-ROMs that take a long time to 17494 * read TOC after a power cycle or reset. 17495 */ 17496 goto do_retry; 17497 17498 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17499 break; 17500 17501 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17502 /* 17503 * Retries cannot help here so just fail right away. 17504 */ 17505 goto fail_command; 17506 17507 case 0x88: 17508 /* 17509 * Vendor-unique code for T3/T4: it indicates a 17510 * path problem in a mutipathed config, but as far as 17511 * the target driver is concerned it equates to a fatal 17512 * error, so we should just fail the command right away 17513 * (without printing anything to the console). If this 17514 * is not a T3/T4, fall thru to the default recovery 17515 * action. 17516 * T3/T4 is FC only, don't need to check is_fibre 17517 */ 17518 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17519 sd_return_failed_command(un, bp, EIO); 17520 return; 17521 } 17522 /* FALLTHRU */ 17523 17524 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17525 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17526 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17527 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17528 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17529 default: /* Possible future codes in SCSI spec? */ 17530 /* 17531 * For removable-media devices, do not retry if 17532 * ASCQ > 2 as these result mostly from USCSI commands 17533 * on MMC devices issued to check status of an 17534 * operation initiated in immediate mode. Also for 17535 * ASCQ >= 4 do not print console messages as these 17536 * mainly represent a user-initiated operation 17537 * instead of a system failure. 17538 */ 17539 if (ISREMOVABLE(un)) { 17540 si.ssi_severity = SCSI_ERR_ALL; 17541 goto fail_command; 17542 } 17543 break; 17544 } 17545 17546 /* 17547 * As part of our recovery attempt for the NOT READY 17548 * condition, we issue a START STOP UNIT command. However 17549 * we want to wait for a short delay before attempting this 17550 * as there may still be more commands coming back from the 17551 * target with the check condition. To do this we use 17552 * timeout(9F) to call sd_start_stop_unit_callback() after 17553 * the delay interval expires. (sd_start_stop_unit_callback() 17554 * dispatches sd_start_stop_unit_task(), which will issue 17555 * the actual START STOP UNIT command. The delay interval 17556 * is one-half of the delay that we will use to retry the 17557 * command that generated the NOT READY condition. 17558 * 17559 * Note that we could just dispatch sd_start_stop_unit_task() 17560 * from here and allow it to sleep for the delay interval, 17561 * but then we would be tying up the taskq thread 17562 * uncesessarily for the duration of the delay. 17563 * 17564 * Do not issue the START STOP UNIT if the current command 17565 * is already a START STOP UNIT. 17566 */ 17567 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17568 break; 17569 } 17570 17571 /* 17572 * Do not schedule the timeout if one is already pending. 17573 */ 17574 if (un->un_startstop_timeid != NULL) { 17575 SD_INFO(SD_LOG_ERROR, un, 17576 "sd_sense_key_not_ready: restart already issued to" 17577 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17578 ddi_get_instance(SD_DEVINFO(un))); 17579 break; 17580 } 17581 17582 /* 17583 * Schedule the START STOP UNIT command, then queue the command 17584 * for a retry. 17585 * 17586 * Note: A timeout is not scheduled for this retry because we 17587 * want the retry to be serial with the START_STOP_UNIT. The 17588 * retry will be started when the START_STOP_UNIT is completed 17589 * in sd_start_stop_unit_task. 17590 */ 17591 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17592 un, SD_BSY_TIMEOUT / 2); 17593 xp->xb_retry_count++; 17594 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17595 return; 17596 17597 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17598 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17599 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17600 "unit does not respond to selection\n"); 17601 } 17602 break; 17603 17604 case 0x3A: /* MEDIUM NOT PRESENT */ 17605 if (sd_error_level >= SCSI_ERR_FATAL) { 17606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17607 "Caddy not inserted in drive\n"); 17608 } 17609 17610 sr_ejected(un); 17611 un->un_mediastate = DKIO_EJECTED; 17612 /* The state has changed, inform the media watch routines */ 17613 cv_broadcast(&un->un_state_cv); 17614 /* Just fail if no media is present in the drive. */ 17615 goto fail_command; 17616 17617 default: 17618 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17619 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17620 "Unit not Ready. Additional sense code 0x%x\n", 17621 asc); 17622 } 17623 break; 17624 } 17625 17626 do_retry: 17627 17628 /* 17629 * Retry the command, as some targets may report NOT READY for 17630 * several seconds after being reset. 17631 */ 17632 xp->xb_retry_count++; 17633 si.ssi_severity = SCSI_ERR_RETRYABLE; 17634 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17635 &si, EIO, SD_BSY_TIMEOUT, NULL); 17636 17637 return; 17638 17639 fail_command: 17640 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17641 sd_return_failed_command(un, bp, EIO); 17642 } 17643 17644 17645 17646 /* 17647 * Function: sd_sense_key_medium_or_hardware_error 17648 * 17649 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17650 * sense key. 17651 * 17652 * Context: May be called from interrupt context 17653 */ 17654 17655 static void 17656 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17657 int sense_key, uint8_t asc, 17658 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17659 { 17660 struct sd_sense_info si; 17661 17662 ASSERT(un != NULL); 17663 ASSERT(mutex_owned(SD_MUTEX(un))); 17664 ASSERT(bp != NULL); 17665 ASSERT(xp != NULL); 17666 ASSERT(pktp != NULL); 17667 17668 si.ssi_severity = SCSI_ERR_FATAL; 17669 si.ssi_pfa_flag = FALSE; 17670 17671 if (sense_key == KEY_MEDIUM_ERROR) { 17672 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17673 } 17674 17675 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17676 17677 if ((un->un_reset_retry_count != 0) && 17678 (xp->xb_retry_count == un->un_reset_retry_count)) { 17679 mutex_exit(SD_MUTEX(un)); 17680 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17681 if (un->un_f_allow_bus_device_reset == TRUE) { 17682 17683 boolean_t try_resetting_target = B_TRUE; 17684 17685 /* 17686 * We need to be able to handle specific ASC when we are 17687 * handling a KEY_HARDWARE_ERROR. In particular 17688 * taking the default action of resetting the target may 17689 * not be the appropriate way to attempt recovery. 17690 * Resetting a target because of a single LUN failure 17691 * victimizes all LUNs on that target. 17692 * 17693 * This is true for the LSI arrays, if an LSI 17694 * array controller returns an ASC of 0x84 (LUN Dead) we 17695 * should trust it. 17696 */ 17697 17698 if (sense_key == KEY_HARDWARE_ERROR) { 17699 switch (asc) { 17700 case 0x84: 17701 if (SD_IS_LSI(un)) { 17702 try_resetting_target = B_FALSE; 17703 } 17704 break; 17705 default: 17706 break; 17707 } 17708 } 17709 17710 if (try_resetting_target == B_TRUE) { 17711 int reset_retval = 0; 17712 if (un->un_f_lun_reset_enabled == TRUE) { 17713 SD_TRACE(SD_LOG_IO_CORE, un, 17714 "sd_sense_key_medium_or_hardware_" 17715 "error: issuing RESET_LUN\n"); 17716 reset_retval = 17717 scsi_reset(SD_ADDRESS(un), 17718 RESET_LUN); 17719 } 17720 if (reset_retval == 0) { 17721 SD_TRACE(SD_LOG_IO_CORE, un, 17722 "sd_sense_key_medium_or_hardware_" 17723 "error: issuing RESET_TARGET\n"); 17724 (void) scsi_reset(SD_ADDRESS(un), 17725 RESET_TARGET); 17726 } 17727 } 17728 } 17729 mutex_enter(SD_MUTEX(un)); 17730 } 17731 17732 /* 17733 * This really ought to be a fatal error, but we will retry anyway 17734 * as some drives report this as a spurious error. 17735 */ 17736 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17737 &si, EIO, (clock_t)0, NULL); 17738 } 17739 17740 17741 17742 /* 17743 * Function: sd_sense_key_illegal_request 17744 * 17745 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17746 * 17747 * Context: May be called from interrupt context 17748 */ 17749 17750 static void 17751 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17752 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17753 { 17754 struct sd_sense_info si; 17755 17756 ASSERT(un != NULL); 17757 ASSERT(mutex_owned(SD_MUTEX(un))); 17758 ASSERT(bp != NULL); 17759 ASSERT(xp != NULL); 17760 ASSERT(pktp != NULL); 17761 17762 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17763 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17764 17765 si.ssi_severity = SCSI_ERR_INFO; 17766 si.ssi_pfa_flag = FALSE; 17767 17768 /* Pointless to retry if the target thinks it's an illegal request */ 17769 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17770 sd_return_failed_command(un, bp, EIO); 17771 } 17772 17773 17774 17775 17776 /* 17777 * Function: sd_sense_key_unit_attention 17778 * 17779 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17780 * 17781 * Context: May be called from interrupt context 17782 */ 17783 17784 static void 17785 sd_sense_key_unit_attention(struct sd_lun *un, 17786 uint8_t asc, 17787 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17788 { 17789 /* 17790 * For UNIT ATTENTION we allow retries for one minute. Devices 17791 * like Sonoma can return UNIT ATTENTION close to a minute 17792 * under certain conditions. 17793 */ 17794 int retry_check_flag = SD_RETRIES_UA; 17795 struct sd_sense_info si; 17796 17797 ASSERT(un != NULL); 17798 ASSERT(mutex_owned(SD_MUTEX(un))); 17799 ASSERT(bp != NULL); 17800 ASSERT(xp != NULL); 17801 ASSERT(pktp != NULL); 17802 17803 si.ssi_severity = SCSI_ERR_INFO; 17804 si.ssi_pfa_flag = FALSE; 17805 17806 17807 switch (asc) { 17808 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17809 if (sd_report_pfa != 0) { 17810 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17811 si.ssi_pfa_flag = TRUE; 17812 retry_check_flag = SD_RETRIES_STANDARD; 17813 goto do_retry; 17814 } 17815 break; 17816 17817 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17818 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17819 un->un_resvd_status |= 17820 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17821 } 17822 /* FALLTHRU */ 17823 17824 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17825 if (!ISREMOVABLE(un)) { 17826 break; 17827 } 17828 17829 /* 17830 * When we get a unit attention from a removable-media device, 17831 * it may be in a state that will take a long time to recover 17832 * (e.g., from a reset). Since we are executing in interrupt 17833 * context here, we cannot wait around for the device to come 17834 * back. So hand this command off to sd_media_change_task() 17835 * for deferred processing under taskq thread context. (Note 17836 * that the command still may be failed if a problem is 17837 * encountered at a later time.) 17838 */ 17839 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17840 KM_NOSLEEP) == 0) { 17841 /* 17842 * Cannot dispatch the request so fail the command. 17843 */ 17844 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17845 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17846 si.ssi_severity = SCSI_ERR_FATAL; 17847 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17848 sd_return_failed_command(un, bp, EIO); 17849 } 17850 /* 17851 * Either the command has been successfully dispatched to a 17852 * task Q for retrying, or the dispatch failed. In either case 17853 * do NOT retry again by calling sd_retry_command. This sets up 17854 * two retries of the same command and when one completes and 17855 * frees the resources the other will access freed memory, 17856 * a bad thing. 17857 */ 17858 return; 17859 17860 default: 17861 break; 17862 } 17863 17864 if (!ISREMOVABLE(un)) { 17865 /* 17866 * Do not update these here for removables. For removables 17867 * these stats are updated (1) above if we failed to dispatch 17868 * sd_media_change_task(), or (2) sd_media_change_task() may 17869 * update these later if it encounters an error. 17870 */ 17871 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17872 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17873 } 17874 17875 do_retry: 17876 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17877 EIO, SD_UA_RETRY_DELAY, NULL); 17878 } 17879 17880 17881 17882 /* 17883 * Function: sd_sense_key_fail_command 17884 * 17885 * Description: Use to fail a command when we don't like the sense key that 17886 * was returned. 17887 * 17888 * Context: May be called from interrupt context 17889 */ 17890 17891 static void 17892 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17893 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17894 { 17895 struct sd_sense_info si; 17896 17897 ASSERT(un != NULL); 17898 ASSERT(mutex_owned(SD_MUTEX(un))); 17899 ASSERT(bp != NULL); 17900 ASSERT(xp != NULL); 17901 ASSERT(pktp != NULL); 17902 17903 si.ssi_severity = SCSI_ERR_FATAL; 17904 si.ssi_pfa_flag = FALSE; 17905 17906 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17907 sd_return_failed_command(un, bp, EIO); 17908 } 17909 17910 17911 17912 /* 17913 * Function: sd_sense_key_blank_check 17914 * 17915 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17916 * Has no monetary connotation. 17917 * 17918 * Context: May be called from interrupt context 17919 */ 17920 17921 static void 17922 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17923 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17924 { 17925 struct sd_sense_info si; 17926 17927 ASSERT(un != NULL); 17928 ASSERT(mutex_owned(SD_MUTEX(un))); 17929 ASSERT(bp != NULL); 17930 ASSERT(xp != NULL); 17931 ASSERT(pktp != NULL); 17932 17933 /* 17934 * Blank check is not fatal for removable devices, therefore 17935 * it does not require a console message. 17936 */ 17937 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17938 si.ssi_pfa_flag = FALSE; 17939 17940 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17941 sd_return_failed_command(un, bp, EIO); 17942 } 17943 17944 17945 17946 17947 /* 17948 * Function: sd_sense_key_aborted_command 17949 * 17950 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17951 * 17952 * Context: May be called from interrupt context 17953 */ 17954 17955 static void 17956 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17957 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17958 { 17959 struct sd_sense_info si; 17960 17961 ASSERT(un != NULL); 17962 ASSERT(mutex_owned(SD_MUTEX(un))); 17963 ASSERT(bp != NULL); 17964 ASSERT(xp != NULL); 17965 ASSERT(pktp != NULL); 17966 17967 si.ssi_severity = SCSI_ERR_FATAL; 17968 si.ssi_pfa_flag = FALSE; 17969 17970 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17971 17972 /* 17973 * This really ought to be a fatal error, but we will retry anyway 17974 * as some drives report this as a spurious error. 17975 */ 17976 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17977 &si, EIO, (clock_t)0, NULL); 17978 } 17979 17980 17981 17982 /* 17983 * Function: sd_sense_key_default 17984 * 17985 * Description: Default recovery action for several SCSI sense keys (basically 17986 * attempts a retry). 17987 * 17988 * Context: May be called from interrupt context 17989 */ 17990 17991 static void 17992 sd_sense_key_default(struct sd_lun *un, 17993 int sense_key, 17994 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17995 { 17996 struct sd_sense_info si; 17997 17998 ASSERT(un != NULL); 17999 ASSERT(mutex_owned(SD_MUTEX(un))); 18000 ASSERT(bp != NULL); 18001 ASSERT(xp != NULL); 18002 ASSERT(pktp != NULL); 18003 18004 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18005 18006 /* 18007 * Undecoded sense key. Attempt retries and hope that will fix 18008 * the problem. Otherwise, we're dead. 18009 */ 18010 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18011 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18012 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18013 } 18014 18015 si.ssi_severity = SCSI_ERR_FATAL; 18016 si.ssi_pfa_flag = FALSE; 18017 18018 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18019 &si, EIO, (clock_t)0, NULL); 18020 } 18021 18022 18023 18024 /* 18025 * Function: sd_print_retry_msg 18026 * 18027 * Description: Print a message indicating the retry action being taken. 18028 * 18029 * Arguments: un - ptr to associated softstate 18030 * bp - ptr to buf(9S) for the command 18031 * arg - not used. 18032 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18033 * or SD_NO_RETRY_ISSUED 18034 * 18035 * Context: May be called from interrupt context 18036 */ 18037 /* ARGSUSED */ 18038 static void 18039 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18040 { 18041 struct sd_xbuf *xp; 18042 struct scsi_pkt *pktp; 18043 char *reasonp; 18044 char *msgp; 18045 18046 ASSERT(un != NULL); 18047 ASSERT(mutex_owned(SD_MUTEX(un))); 18048 ASSERT(bp != NULL); 18049 pktp = SD_GET_PKTP(bp); 18050 ASSERT(pktp != NULL); 18051 xp = SD_GET_XBUF(bp); 18052 ASSERT(xp != NULL); 18053 18054 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18055 mutex_enter(&un->un_pm_mutex); 18056 if ((un->un_state == SD_STATE_SUSPENDED) || 18057 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18058 (pktp->pkt_flags & FLAG_SILENT)) { 18059 mutex_exit(&un->un_pm_mutex); 18060 goto update_pkt_reason; 18061 } 18062 mutex_exit(&un->un_pm_mutex); 18063 18064 /* 18065 * Suppress messages if they are all the same pkt_reason; with 18066 * TQ, many (up to 256) are returned with the same pkt_reason. 18067 * If we are in panic, then suppress the retry messages. 18068 */ 18069 switch (flag) { 18070 case SD_NO_RETRY_ISSUED: 18071 msgp = "giving up"; 18072 break; 18073 case SD_IMMEDIATE_RETRY_ISSUED: 18074 case SD_DELAYED_RETRY_ISSUED: 18075 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18076 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18077 (sd_error_level != SCSI_ERR_ALL))) { 18078 return; 18079 } 18080 msgp = "retrying command"; 18081 break; 18082 default: 18083 goto update_pkt_reason; 18084 } 18085 18086 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18087 scsi_rname(pktp->pkt_reason)); 18088 18089 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18090 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18091 18092 update_pkt_reason: 18093 /* 18094 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18095 * This is to prevent multiple console messages for the same failure 18096 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18097 * when the command is retried successfully because there still may be 18098 * more commands coming back with the same value of pktp->pkt_reason. 18099 */ 18100 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18101 un->un_last_pkt_reason = pktp->pkt_reason; 18102 } 18103 } 18104 18105 18106 /* 18107 * Function: sd_print_cmd_incomplete_msg 18108 * 18109 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18110 * 18111 * Arguments: un - ptr to associated softstate 18112 * bp - ptr to buf(9S) for the command 18113 * arg - passed to sd_print_retry_msg() 18114 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18115 * or SD_NO_RETRY_ISSUED 18116 * 18117 * Context: May be called from interrupt context 18118 */ 18119 18120 static void 18121 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18122 int code) 18123 { 18124 dev_info_t *dip; 18125 18126 ASSERT(un != NULL); 18127 ASSERT(mutex_owned(SD_MUTEX(un))); 18128 ASSERT(bp != NULL); 18129 18130 switch (code) { 18131 case SD_NO_RETRY_ISSUED: 18132 /* Command was failed. Someone turned off this target? */ 18133 if (un->un_state != SD_STATE_OFFLINE) { 18134 /* 18135 * Suppress message if we are detaching and 18136 * device has been disconnected 18137 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18138 * private interface and not part of the DDI 18139 */ 18140 dip = un->un_sd->sd_dev; 18141 if (!(DEVI_IS_DETACHING(dip) && 18142 DEVI_IS_DEVICE_REMOVED(dip))) { 18143 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18144 "disk not responding to selection\n"); 18145 } 18146 New_state(un, SD_STATE_OFFLINE); 18147 } 18148 break; 18149 18150 case SD_DELAYED_RETRY_ISSUED: 18151 case SD_IMMEDIATE_RETRY_ISSUED: 18152 default: 18153 /* Command was successfully queued for retry */ 18154 sd_print_retry_msg(un, bp, arg, code); 18155 break; 18156 } 18157 } 18158 18159 18160 /* 18161 * Function: sd_pkt_reason_cmd_incomplete 18162 * 18163 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18164 * 18165 * Context: May be called from interrupt context 18166 */ 18167 18168 static void 18169 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18170 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18171 { 18172 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18173 18174 ASSERT(un != NULL); 18175 ASSERT(mutex_owned(SD_MUTEX(un))); 18176 ASSERT(bp != NULL); 18177 ASSERT(xp != NULL); 18178 ASSERT(pktp != NULL); 18179 18180 /* Do not do a reset if selection did not complete */ 18181 /* Note: Should this not just check the bit? */ 18182 if (pktp->pkt_state != STATE_GOT_BUS) { 18183 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18184 sd_reset_target(un, pktp); 18185 } 18186 18187 /* 18188 * If the target was not successfully selected, then set 18189 * SD_RETRIES_FAILFAST to indicate that we lost communication 18190 * with the target, and further retries and/or commands are 18191 * likely to take a long time. 18192 */ 18193 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18194 flag |= SD_RETRIES_FAILFAST; 18195 } 18196 18197 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18198 18199 sd_retry_command(un, bp, flag, 18200 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18201 } 18202 18203 18204 18205 /* 18206 * Function: sd_pkt_reason_cmd_tran_err 18207 * 18208 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18209 * 18210 * Context: May be called from interrupt context 18211 */ 18212 18213 static void 18214 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18215 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18216 { 18217 ASSERT(un != NULL); 18218 ASSERT(mutex_owned(SD_MUTEX(un))); 18219 ASSERT(bp != NULL); 18220 ASSERT(xp != NULL); 18221 ASSERT(pktp != NULL); 18222 18223 /* 18224 * Do not reset if we got a parity error, or if 18225 * selection did not complete. 18226 */ 18227 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18228 /* Note: Should this not just check the bit for pkt_state? */ 18229 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18230 (pktp->pkt_state != STATE_GOT_BUS)) { 18231 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18232 sd_reset_target(un, pktp); 18233 } 18234 18235 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18236 18237 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18238 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18239 } 18240 18241 18242 18243 /* 18244 * Function: sd_pkt_reason_cmd_reset 18245 * 18246 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18247 * 18248 * Context: May be called from interrupt context 18249 */ 18250 18251 static void 18252 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18253 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18254 { 18255 ASSERT(un != NULL); 18256 ASSERT(mutex_owned(SD_MUTEX(un))); 18257 ASSERT(bp != NULL); 18258 ASSERT(xp != NULL); 18259 ASSERT(pktp != NULL); 18260 18261 /* The target may still be running the command, so try to reset. */ 18262 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18263 sd_reset_target(un, pktp); 18264 18265 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18266 18267 /* 18268 * If pkt_reason is CMD_RESET chances are that this pkt got 18269 * reset because another target on this bus caused it. The target 18270 * that caused it should get CMD_TIMEOUT with pkt_statistics 18271 * of STAT_TIMEOUT/STAT_DEV_RESET. 18272 */ 18273 18274 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18275 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18276 } 18277 18278 18279 18280 18281 /* 18282 * Function: sd_pkt_reason_cmd_aborted 18283 * 18284 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18285 * 18286 * Context: May be called from interrupt context 18287 */ 18288 18289 static void 18290 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18291 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18292 { 18293 ASSERT(un != NULL); 18294 ASSERT(mutex_owned(SD_MUTEX(un))); 18295 ASSERT(bp != NULL); 18296 ASSERT(xp != NULL); 18297 ASSERT(pktp != NULL); 18298 18299 /* The target may still be running the command, so try to reset. */ 18300 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18301 sd_reset_target(un, pktp); 18302 18303 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18304 18305 /* 18306 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18307 * aborted because another target on this bus caused it. The target 18308 * that caused it should get CMD_TIMEOUT with pkt_statistics 18309 * of STAT_TIMEOUT/STAT_DEV_RESET. 18310 */ 18311 18312 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18313 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18314 } 18315 18316 18317 18318 /* 18319 * Function: sd_pkt_reason_cmd_timeout 18320 * 18321 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18322 * 18323 * Context: May be called from interrupt context 18324 */ 18325 18326 static void 18327 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18328 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18329 { 18330 ASSERT(un != NULL); 18331 ASSERT(mutex_owned(SD_MUTEX(un))); 18332 ASSERT(bp != NULL); 18333 ASSERT(xp != NULL); 18334 ASSERT(pktp != NULL); 18335 18336 18337 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18338 sd_reset_target(un, pktp); 18339 18340 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18341 18342 /* 18343 * A command timeout indicates that we could not establish 18344 * communication with the target, so set SD_RETRIES_FAILFAST 18345 * as further retries/commands are likely to take a long time. 18346 */ 18347 sd_retry_command(un, bp, 18348 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18349 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18350 } 18351 18352 18353 18354 /* 18355 * Function: sd_pkt_reason_cmd_unx_bus_free 18356 * 18357 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18358 * 18359 * Context: May be called from interrupt context 18360 */ 18361 18362 static void 18363 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18364 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18365 { 18366 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18367 18368 ASSERT(un != NULL); 18369 ASSERT(mutex_owned(SD_MUTEX(un))); 18370 ASSERT(bp != NULL); 18371 ASSERT(xp != NULL); 18372 ASSERT(pktp != NULL); 18373 18374 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18375 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18376 18377 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18378 sd_print_retry_msg : NULL; 18379 18380 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18381 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18382 } 18383 18384 18385 /* 18386 * Function: sd_pkt_reason_cmd_tag_reject 18387 * 18388 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18389 * 18390 * Context: May be called from interrupt context 18391 */ 18392 18393 static void 18394 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18395 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18396 { 18397 ASSERT(un != NULL); 18398 ASSERT(mutex_owned(SD_MUTEX(un))); 18399 ASSERT(bp != NULL); 18400 ASSERT(xp != NULL); 18401 ASSERT(pktp != NULL); 18402 18403 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18404 pktp->pkt_flags = 0; 18405 un->un_tagflags = 0; 18406 if (un->un_f_opt_queueing == TRUE) { 18407 un->un_throttle = min(un->un_throttle, 3); 18408 } else { 18409 un->un_throttle = 1; 18410 } 18411 mutex_exit(SD_MUTEX(un)); 18412 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18413 mutex_enter(SD_MUTEX(un)); 18414 18415 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18416 18417 /* Legacy behavior not to check retry counts here. */ 18418 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18419 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18420 } 18421 18422 18423 /* 18424 * Function: sd_pkt_reason_default 18425 * 18426 * Description: Default recovery actions for SCSA pkt_reason values that 18427 * do not have more explicit recovery actions. 18428 * 18429 * Context: May be called from interrupt context 18430 */ 18431 18432 static void 18433 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18434 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18435 { 18436 ASSERT(un != NULL); 18437 ASSERT(mutex_owned(SD_MUTEX(un))); 18438 ASSERT(bp != NULL); 18439 ASSERT(xp != NULL); 18440 ASSERT(pktp != NULL); 18441 18442 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18443 sd_reset_target(un, pktp); 18444 18445 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18446 18447 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18448 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18449 } 18450 18451 18452 18453 /* 18454 * Function: sd_pkt_status_check_condition 18455 * 18456 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18457 * 18458 * Context: May be called from interrupt context 18459 */ 18460 18461 static void 18462 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18463 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18464 { 18465 ASSERT(un != NULL); 18466 ASSERT(mutex_owned(SD_MUTEX(un))); 18467 ASSERT(bp != NULL); 18468 ASSERT(xp != NULL); 18469 ASSERT(pktp != NULL); 18470 18471 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18472 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18473 18474 /* 18475 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18476 * command will be retried after the request sense). Otherwise, retry 18477 * the command. Note: we are issuing the request sense even though the 18478 * retry limit may have been reached for the failed command. 18479 */ 18480 if (un->un_f_arq_enabled == FALSE) { 18481 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18482 "no ARQ, sending request sense command\n"); 18483 sd_send_request_sense_command(un, bp, pktp); 18484 } else { 18485 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18486 "ARQ,retrying request sense command\n"); 18487 #if defined(__i386) || defined(__amd64) 18488 /* 18489 * The SD_RETRY_DELAY value need to be adjusted here 18490 * when SD_RETRY_DELAY change in sddef.h 18491 */ 18492 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18493 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18494 NULL); 18495 #else 18496 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18497 0, SD_RETRY_DELAY, NULL); 18498 #endif 18499 } 18500 18501 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18502 } 18503 18504 18505 /* 18506 * Function: sd_pkt_status_busy 18507 * 18508 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18509 * 18510 * Context: May be called from interrupt context 18511 */ 18512 18513 static void 18514 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18515 struct scsi_pkt *pktp) 18516 { 18517 ASSERT(un != NULL); 18518 ASSERT(mutex_owned(SD_MUTEX(un))); 18519 ASSERT(bp != NULL); 18520 ASSERT(xp != NULL); 18521 ASSERT(pktp != NULL); 18522 18523 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18524 "sd_pkt_status_busy: entry\n"); 18525 18526 /* If retries are exhausted, just fail the command. */ 18527 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18529 "device busy too long\n"); 18530 sd_return_failed_command(un, bp, EIO); 18531 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18532 "sd_pkt_status_busy: exit\n"); 18533 return; 18534 } 18535 xp->xb_retry_count++; 18536 18537 /* 18538 * Try to reset the target. However, we do not want to perform 18539 * more than one reset if the device continues to fail. The reset 18540 * will be performed when the retry count reaches the reset 18541 * threshold. This threshold should be set such that at least 18542 * one retry is issued before the reset is performed. 18543 */ 18544 if (xp->xb_retry_count == 18545 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18546 int rval = 0; 18547 mutex_exit(SD_MUTEX(un)); 18548 if (un->un_f_allow_bus_device_reset == TRUE) { 18549 /* 18550 * First try to reset the LUN; if we cannot then 18551 * try to reset the target. 18552 */ 18553 if (un->un_f_lun_reset_enabled == TRUE) { 18554 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18555 "sd_pkt_status_busy: RESET_LUN\n"); 18556 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18557 } 18558 if (rval == 0) { 18559 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18560 "sd_pkt_status_busy: RESET_TARGET\n"); 18561 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18562 } 18563 } 18564 if (rval == 0) { 18565 /* 18566 * If the RESET_LUN and/or RESET_TARGET failed, 18567 * try RESET_ALL 18568 */ 18569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18570 "sd_pkt_status_busy: RESET_ALL\n"); 18571 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18572 } 18573 mutex_enter(SD_MUTEX(un)); 18574 if (rval == 0) { 18575 /* 18576 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18577 * At this point we give up & fail the command. 18578 */ 18579 sd_return_failed_command(un, bp, EIO); 18580 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18581 "sd_pkt_status_busy: exit (failed cmd)\n"); 18582 return; 18583 } 18584 } 18585 18586 /* 18587 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18588 * we have already checked the retry counts above. 18589 */ 18590 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18591 EIO, SD_BSY_TIMEOUT, NULL); 18592 18593 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18594 "sd_pkt_status_busy: exit\n"); 18595 } 18596 18597 18598 /* 18599 * Function: sd_pkt_status_reservation_conflict 18600 * 18601 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18602 * command status. 18603 * 18604 * Context: May be called from interrupt context 18605 */ 18606 18607 static void 18608 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18609 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18610 { 18611 ASSERT(un != NULL); 18612 ASSERT(mutex_owned(SD_MUTEX(un))); 18613 ASSERT(bp != NULL); 18614 ASSERT(xp != NULL); 18615 ASSERT(pktp != NULL); 18616 18617 /* 18618 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18619 * conflict could be due to various reasons like incorrect keys, not 18620 * registered or not reserved etc. So, we return EACCES to the caller. 18621 */ 18622 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18623 int cmd = SD_GET_PKT_OPCODE(pktp); 18624 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18625 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18626 sd_return_failed_command(un, bp, EACCES); 18627 return; 18628 } 18629 } 18630 18631 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18632 18633 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18634 if (sd_failfast_enable != 0) { 18635 /* By definition, we must panic here.... */ 18636 panic("Reservation Conflict"); 18637 /*NOTREACHED*/ 18638 } 18639 SD_ERROR(SD_LOG_IO, un, 18640 "sd_handle_resv_conflict: Disk Reserved\n"); 18641 sd_return_failed_command(un, bp, EACCES); 18642 return; 18643 } 18644 18645 /* 18646 * 1147670: retry only if sd_retry_on_reservation_conflict 18647 * property is set (default is 1). Retries will not succeed 18648 * on a disk reserved by another initiator. HA systems 18649 * may reset this via sd.conf to avoid these retries. 18650 * 18651 * Note: The legacy return code for this failure is EIO, however EACCES 18652 * seems more appropriate for a reservation conflict. 18653 */ 18654 if (sd_retry_on_reservation_conflict == 0) { 18655 SD_ERROR(SD_LOG_IO, un, 18656 "sd_handle_resv_conflict: Device Reserved\n"); 18657 sd_return_failed_command(un, bp, EIO); 18658 return; 18659 } 18660 18661 /* 18662 * Retry the command if we can. 18663 * 18664 * Note: The legacy return code for this failure is EIO, however EACCES 18665 * seems more appropriate for a reservation conflict. 18666 */ 18667 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18668 (clock_t)2, NULL); 18669 } 18670 18671 18672 18673 /* 18674 * Function: sd_pkt_status_qfull 18675 * 18676 * Description: Handle a QUEUE FULL condition from the target. This can 18677 * occur if the HBA does not handle the queue full condition. 18678 * (Basically this means third-party HBAs as Sun HBAs will 18679 * handle the queue full condition.) Note that if there are 18680 * some commands already in the transport, then the queue full 18681 * has occurred because the queue for this nexus is actually 18682 * full. If there are no commands in the transport, then the 18683 * queue full is resulting from some other initiator or lun 18684 * consuming all the resources at the target. 18685 * 18686 * Context: May be called from interrupt context 18687 */ 18688 18689 static void 18690 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18691 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18692 { 18693 ASSERT(un != NULL); 18694 ASSERT(mutex_owned(SD_MUTEX(un))); 18695 ASSERT(bp != NULL); 18696 ASSERT(xp != NULL); 18697 ASSERT(pktp != NULL); 18698 18699 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18700 "sd_pkt_status_qfull: entry\n"); 18701 18702 /* 18703 * Just lower the QFULL throttle and retry the command. Note that 18704 * we do not limit the number of retries here. 18705 */ 18706 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18707 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18708 SD_RESTART_TIMEOUT, NULL); 18709 18710 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18711 "sd_pkt_status_qfull: exit\n"); 18712 } 18713 18714 18715 /* 18716 * Function: sd_reset_target 18717 * 18718 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18719 * RESET_TARGET, or RESET_ALL. 18720 * 18721 * Context: May be called under interrupt context. 18722 */ 18723 18724 static void 18725 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18726 { 18727 int rval = 0; 18728 18729 ASSERT(un != NULL); 18730 ASSERT(mutex_owned(SD_MUTEX(un))); 18731 ASSERT(pktp != NULL); 18732 18733 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18734 18735 /* 18736 * No need to reset if the transport layer has already done so. 18737 */ 18738 if ((pktp->pkt_statistics & 18739 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18740 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18741 "sd_reset_target: no reset\n"); 18742 return; 18743 } 18744 18745 mutex_exit(SD_MUTEX(un)); 18746 18747 if (un->un_f_allow_bus_device_reset == TRUE) { 18748 if (un->un_f_lun_reset_enabled == TRUE) { 18749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18750 "sd_reset_target: RESET_LUN\n"); 18751 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18752 } 18753 if (rval == 0) { 18754 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18755 "sd_reset_target: RESET_TARGET\n"); 18756 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18757 } 18758 } 18759 18760 if (rval == 0) { 18761 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18762 "sd_reset_target: RESET_ALL\n"); 18763 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18764 } 18765 18766 mutex_enter(SD_MUTEX(un)); 18767 18768 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18769 } 18770 18771 18772 /* 18773 * Function: sd_media_change_task 18774 * 18775 * Description: Recovery action for CDROM to become available. 18776 * 18777 * Context: Executes in a taskq() thread context 18778 */ 18779 18780 static void 18781 sd_media_change_task(void *arg) 18782 { 18783 struct scsi_pkt *pktp = arg; 18784 struct sd_lun *un; 18785 struct buf *bp; 18786 struct sd_xbuf *xp; 18787 int err = 0; 18788 int retry_count = 0; 18789 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18790 struct sd_sense_info si; 18791 18792 ASSERT(pktp != NULL); 18793 bp = (struct buf *)pktp->pkt_private; 18794 ASSERT(bp != NULL); 18795 xp = SD_GET_XBUF(bp); 18796 ASSERT(xp != NULL); 18797 un = SD_GET_UN(bp); 18798 ASSERT(un != NULL); 18799 ASSERT(!mutex_owned(SD_MUTEX(un))); 18800 ASSERT(ISREMOVABLE(un)); 18801 18802 si.ssi_severity = SCSI_ERR_INFO; 18803 si.ssi_pfa_flag = FALSE; 18804 18805 /* 18806 * When a reset is issued on a CDROM, it takes a long time to 18807 * recover. First few attempts to read capacity and other things 18808 * related to handling unit attention fail (with a ASC 0x4 and 18809 * ASCQ 0x1). In that case we want to do enough retries and we want 18810 * to limit the retries in other cases of genuine failures like 18811 * no media in drive. 18812 */ 18813 while (retry_count++ < retry_limit) { 18814 if ((err = sd_handle_mchange(un)) == 0) { 18815 break; 18816 } 18817 if (err == EAGAIN) { 18818 retry_limit = SD_UNIT_ATTENTION_RETRY; 18819 } 18820 /* Sleep for 0.5 sec. & try again */ 18821 delay(drv_usectohz(500000)); 18822 } 18823 18824 /* 18825 * Dispatch (retry or fail) the original command here, 18826 * along with appropriate console messages.... 18827 * 18828 * Must grab the mutex before calling sd_retry_command, 18829 * sd_print_sense_msg and sd_return_failed_command. 18830 */ 18831 mutex_enter(SD_MUTEX(un)); 18832 if (err != SD_CMD_SUCCESS) { 18833 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18834 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18835 si.ssi_severity = SCSI_ERR_FATAL; 18836 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18837 sd_return_failed_command(un, bp, EIO); 18838 } else { 18839 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18840 &si, EIO, (clock_t)0, NULL); 18841 } 18842 mutex_exit(SD_MUTEX(un)); 18843 } 18844 18845 18846 18847 /* 18848 * Function: sd_handle_mchange 18849 * 18850 * Description: Perform geometry validation & other recovery when CDROM 18851 * has been removed from drive. 18852 * 18853 * Return Code: 0 for success 18854 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18855 * sd_send_scsi_READ_CAPACITY() 18856 * 18857 * Context: Executes in a taskq() thread context 18858 */ 18859 18860 static int 18861 sd_handle_mchange(struct sd_lun *un) 18862 { 18863 uint64_t capacity; 18864 uint32_t lbasize; 18865 int rval; 18866 18867 ASSERT(!mutex_owned(SD_MUTEX(un))); 18868 ASSERT(ISREMOVABLE(un)); 18869 18870 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18871 SD_PATH_DIRECT_PRIORITY)) != 0) { 18872 return (rval); 18873 } 18874 18875 mutex_enter(SD_MUTEX(un)); 18876 sd_update_block_info(un, lbasize, capacity); 18877 18878 if (un->un_errstats != NULL) { 18879 struct sd_errstats *stp = 18880 (struct sd_errstats *)un->un_errstats->ks_data; 18881 stp->sd_capacity.value.ui64 = (uint64_t) 18882 ((uint64_t)un->un_blockcount * 18883 (uint64_t)un->un_tgt_blocksize); 18884 } 18885 18886 /* 18887 * Note: Maybe let the strategy/partitioning chain worry about getting 18888 * valid geometry. 18889 */ 18890 un->un_f_geometry_is_valid = FALSE; 18891 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18892 if (un->un_f_geometry_is_valid == FALSE) { 18893 mutex_exit(SD_MUTEX(un)); 18894 return (EIO); 18895 } 18896 18897 mutex_exit(SD_MUTEX(un)); 18898 18899 /* 18900 * Try to lock the door 18901 */ 18902 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18903 SD_PATH_DIRECT_PRIORITY)); 18904 } 18905 18906 18907 /* 18908 * Function: sd_send_scsi_DOORLOCK 18909 * 18910 * Description: Issue the scsi DOOR LOCK command 18911 * 18912 * Arguments: un - pointer to driver soft state (unit) structure for 18913 * this target. 18914 * flag - SD_REMOVAL_ALLOW 18915 * SD_REMOVAL_PREVENT 18916 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18917 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18918 * to use the USCSI "direct" chain and bypass the normal 18919 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18920 * command is issued as part of an error recovery action. 18921 * 18922 * Return Code: 0 - Success 18923 * errno return code from sd_send_scsi_cmd() 18924 * 18925 * Context: Can sleep. 18926 */ 18927 18928 static int 18929 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18930 { 18931 union scsi_cdb cdb; 18932 struct uscsi_cmd ucmd_buf; 18933 struct scsi_extended_sense sense_buf; 18934 int status; 18935 18936 ASSERT(un != NULL); 18937 ASSERT(!mutex_owned(SD_MUTEX(un))); 18938 18939 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18940 18941 /* already determined doorlock is not supported, fake success */ 18942 if (un->un_f_doorlock_supported == FALSE) { 18943 return (0); 18944 } 18945 18946 bzero(&cdb, sizeof (cdb)); 18947 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18948 18949 cdb.scc_cmd = SCMD_DOORLOCK; 18950 cdb.cdb_opaque[4] = (uchar_t)flag; 18951 18952 ucmd_buf.uscsi_cdb = (char *)&cdb; 18953 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18954 ucmd_buf.uscsi_bufaddr = NULL; 18955 ucmd_buf.uscsi_buflen = 0; 18956 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18957 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18958 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18959 ucmd_buf.uscsi_timeout = 15; 18960 18961 SD_TRACE(SD_LOG_IO, un, 18962 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18963 18964 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18965 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18966 18967 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18968 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18969 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18970 /* fake success and skip subsequent doorlock commands */ 18971 un->un_f_doorlock_supported = FALSE; 18972 return (0); 18973 } 18974 18975 return (status); 18976 } 18977 18978 18979 /* 18980 * Function: sd_send_scsi_READ_CAPACITY 18981 * 18982 * Description: This routine uses the scsi READ CAPACITY command to determine 18983 * the device capacity in number of blocks and the device native 18984 * block size. If this function returns a failure, then the 18985 * values in *capp and *lbap are undefined. If the capacity 18986 * returned is 0xffffffff then the lun is too large for a 18987 * normal READ CAPACITY command and the results of a 18988 * READ CAPACITY 16 will be used instead. 18989 * 18990 * Arguments: un - ptr to soft state struct for the target 18991 * capp - ptr to unsigned 64-bit variable to receive the 18992 * capacity value from the command. 18993 * lbap - ptr to unsigned 32-bit varaible to receive the 18994 * block size value from the command 18995 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18996 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18997 * to use the USCSI "direct" chain and bypass the normal 18998 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18999 * command is issued as part of an error recovery action. 19000 * 19001 * Return Code: 0 - Success 19002 * EIO - IO error 19003 * EACCES - Reservation conflict detected 19004 * EAGAIN - Device is becoming ready 19005 * errno return code from sd_send_scsi_cmd() 19006 * 19007 * Context: Can sleep. Blocks until command completes. 19008 */ 19009 19010 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19011 19012 static int 19013 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 19014 int path_flag) 19015 { 19016 struct scsi_extended_sense sense_buf; 19017 struct uscsi_cmd ucmd_buf; 19018 union scsi_cdb cdb; 19019 uint32_t *capacity_buf; 19020 uint64_t capacity; 19021 uint32_t lbasize; 19022 int status; 19023 19024 ASSERT(un != NULL); 19025 ASSERT(!mutex_owned(SD_MUTEX(un))); 19026 ASSERT(capp != NULL); 19027 ASSERT(lbap != NULL); 19028 19029 SD_TRACE(SD_LOG_IO, un, 19030 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19031 19032 /* 19033 * First send a READ_CAPACITY command to the target. 19034 * (This command is mandatory under SCSI-2.) 19035 * 19036 * Set up the CDB for the READ_CAPACITY command. The Partial 19037 * Medium Indicator bit is cleared. The address field must be 19038 * zero if the PMI bit is zero. 19039 */ 19040 bzero(&cdb, sizeof (cdb)); 19041 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19042 19043 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19044 19045 cdb.scc_cmd = SCMD_READ_CAPACITY; 19046 19047 ucmd_buf.uscsi_cdb = (char *)&cdb; 19048 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19049 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19050 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19051 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19052 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19053 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19054 ucmd_buf.uscsi_timeout = 60; 19055 19056 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19057 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19058 19059 switch (status) { 19060 case 0: 19061 /* Return failure if we did not get valid capacity data. */ 19062 if (ucmd_buf.uscsi_resid != 0) { 19063 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19064 return (EIO); 19065 } 19066 19067 /* 19068 * Read capacity and block size from the READ CAPACITY 10 data. 19069 * This data may be adjusted later due to device specific 19070 * issues. 19071 * 19072 * According to the SCSI spec, the READ CAPACITY 10 19073 * command returns the following: 19074 * 19075 * bytes 0-3: Maximum logical block address available. 19076 * (MSB in byte:0 & LSB in byte:3) 19077 * 19078 * bytes 4-7: Block length in bytes 19079 * (MSB in byte:4 & LSB in byte:7) 19080 * 19081 */ 19082 capacity = BE_32(capacity_buf[0]); 19083 lbasize = BE_32(capacity_buf[1]); 19084 19085 /* 19086 * Done with capacity_buf 19087 */ 19088 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19089 19090 /* 19091 * if the reported capacity is set to all 0xf's, then 19092 * this disk is too large and requires SBC-2 commands. 19093 * Reissue the request using READ CAPACITY 16. 19094 */ 19095 if (capacity == 0xffffffff) { 19096 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 19097 &lbasize, path_flag); 19098 if (status != 0) { 19099 return (status); 19100 } 19101 } 19102 break; /* Success! */ 19103 case EIO: 19104 switch (ucmd_buf.uscsi_status) { 19105 case STATUS_RESERVATION_CONFLICT: 19106 status = EACCES; 19107 break; 19108 case STATUS_CHECK: 19109 /* 19110 * Check condition; look for ASC/ASCQ of 0x04/0x01 19111 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19112 */ 19113 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19114 (sense_buf.es_add_code == 0x04) && 19115 (sense_buf.es_qual_code == 0x01)) { 19116 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19117 return (EAGAIN); 19118 } 19119 break; 19120 default: 19121 break; 19122 } 19123 /* FALLTHRU */ 19124 default: 19125 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19126 return (status); 19127 } 19128 19129 /* 19130 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19131 * (2352 and 0 are common) so for these devices always force the value 19132 * to 2048 as required by the ATAPI specs. 19133 */ 19134 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19135 lbasize = 2048; 19136 } 19137 19138 /* 19139 * Get the maximum LBA value from the READ CAPACITY data. 19140 * Here we assume that the Partial Medium Indicator (PMI) bit 19141 * was cleared when issuing the command. This means that the LBA 19142 * returned from the device is the LBA of the last logical block 19143 * on the logical unit. The actual logical block count will be 19144 * this value plus one. 19145 * 19146 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19147 * so scale the capacity value to reflect this. 19148 */ 19149 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19150 19151 #if defined(__i386) || defined(__amd64) 19152 /* 19153 * On x86, compensate for off-by-1 error (number of sectors on 19154 * media) (1175930) 19155 */ 19156 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 19157 capacity -= 1; 19158 } 19159 #endif 19160 19161 /* 19162 * Copy the values from the READ CAPACITY command into the space 19163 * provided by the caller. 19164 */ 19165 *capp = capacity; 19166 *lbap = lbasize; 19167 19168 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19169 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19170 19171 /* 19172 * Both the lbasize and capacity from the device must be nonzero, 19173 * otherwise we assume that the values are not valid and return 19174 * failure to the caller. (4203735) 19175 */ 19176 if ((capacity == 0) || (lbasize == 0)) { 19177 return (EIO); 19178 } 19179 19180 return (0); 19181 } 19182 19183 /* 19184 * Function: sd_send_scsi_READ_CAPACITY_16 19185 * 19186 * Description: This routine uses the scsi READ CAPACITY 16 command to 19187 * determine the device capacity in number of blocks and the 19188 * device native block size. If this function returns a failure, 19189 * then the values in *capp and *lbap are undefined. 19190 * This routine should always be called by 19191 * sd_send_scsi_READ_CAPACITY which will appy any device 19192 * specific adjustments to capacity and lbasize. 19193 * 19194 * Arguments: un - ptr to soft state struct for the target 19195 * capp - ptr to unsigned 64-bit variable to receive the 19196 * capacity value from the command. 19197 * lbap - ptr to unsigned 32-bit varaible to receive the 19198 * block size value from the command 19199 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19200 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19201 * to use the USCSI "direct" chain and bypass the normal 19202 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19203 * this command is issued as part of an error recovery 19204 * action. 19205 * 19206 * Return Code: 0 - Success 19207 * EIO - IO error 19208 * EACCES - Reservation conflict detected 19209 * EAGAIN - Device is becoming ready 19210 * errno return code from sd_send_scsi_cmd() 19211 * 19212 * Context: Can sleep. Blocks until command completes. 19213 */ 19214 19215 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19216 19217 static int 19218 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 19219 uint32_t *lbap, int path_flag) 19220 { 19221 struct scsi_extended_sense sense_buf; 19222 struct uscsi_cmd ucmd_buf; 19223 union scsi_cdb cdb; 19224 uint64_t *capacity16_buf; 19225 uint64_t capacity; 19226 uint32_t lbasize; 19227 int status; 19228 19229 ASSERT(un != NULL); 19230 ASSERT(!mutex_owned(SD_MUTEX(un))); 19231 ASSERT(capp != NULL); 19232 ASSERT(lbap != NULL); 19233 19234 SD_TRACE(SD_LOG_IO, un, 19235 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19236 19237 /* 19238 * First send a READ_CAPACITY_16 command to the target. 19239 * 19240 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19241 * Medium Indicator bit is cleared. The address field must be 19242 * zero if the PMI bit is zero. 19243 */ 19244 bzero(&cdb, sizeof (cdb)); 19245 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19246 19247 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19248 19249 ucmd_buf.uscsi_cdb = (char *)&cdb; 19250 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19251 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19252 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19253 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19254 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19255 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19256 ucmd_buf.uscsi_timeout = 60; 19257 19258 /* 19259 * Read Capacity (16) is a Service Action In command. One 19260 * command byte (0x9E) is overloaded for multiple operations, 19261 * with the second CDB byte specifying the desired operation 19262 */ 19263 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19264 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19265 19266 /* 19267 * Fill in allocation length field 19268 */ 19269 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19270 19271 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19272 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19273 19274 switch (status) { 19275 case 0: 19276 /* Return failure if we did not get valid capacity data. */ 19277 if (ucmd_buf.uscsi_resid > 20) { 19278 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19279 return (EIO); 19280 } 19281 19282 /* 19283 * Read capacity and block size from the READ CAPACITY 10 data. 19284 * This data may be adjusted later due to device specific 19285 * issues. 19286 * 19287 * According to the SCSI spec, the READ CAPACITY 10 19288 * command returns the following: 19289 * 19290 * bytes 0-7: Maximum logical block address available. 19291 * (MSB in byte:0 & LSB in byte:7) 19292 * 19293 * bytes 8-11: Block length in bytes 19294 * (MSB in byte:8 & LSB in byte:11) 19295 * 19296 */ 19297 capacity = BE_64(capacity16_buf[0]); 19298 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19299 19300 /* 19301 * Done with capacity16_buf 19302 */ 19303 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19304 19305 /* 19306 * if the reported capacity is set to all 0xf's, then 19307 * this disk is too large. This could only happen with 19308 * a device that supports LBAs larger than 64 bits which 19309 * are not defined by any current T10 standards. 19310 */ 19311 if (capacity == 0xffffffffffffffff) { 19312 return (EIO); 19313 } 19314 break; /* Success! */ 19315 case EIO: 19316 switch (ucmd_buf.uscsi_status) { 19317 case STATUS_RESERVATION_CONFLICT: 19318 status = EACCES; 19319 break; 19320 case STATUS_CHECK: 19321 /* 19322 * Check condition; look for ASC/ASCQ of 0x04/0x01 19323 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19324 */ 19325 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19326 (sense_buf.es_add_code == 0x04) && 19327 (sense_buf.es_qual_code == 0x01)) { 19328 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19329 return (EAGAIN); 19330 } 19331 break; 19332 default: 19333 break; 19334 } 19335 /* FALLTHRU */ 19336 default: 19337 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19338 return (status); 19339 } 19340 19341 *capp = capacity; 19342 *lbap = lbasize; 19343 19344 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19345 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19346 19347 return (0); 19348 } 19349 19350 19351 /* 19352 * Function: sd_send_scsi_START_STOP_UNIT 19353 * 19354 * Description: Issue a scsi START STOP UNIT command to the target. 19355 * 19356 * Arguments: un - pointer to driver soft state (unit) structure for 19357 * this target. 19358 * flag - SD_TARGET_START 19359 * SD_TARGET_STOP 19360 * SD_TARGET_EJECT 19361 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19362 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19363 * to use the USCSI "direct" chain and bypass the normal 19364 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19365 * command is issued as part of an error recovery action. 19366 * 19367 * Return Code: 0 - Success 19368 * EIO - IO error 19369 * EACCES - Reservation conflict detected 19370 * ENXIO - Not Ready, medium not present 19371 * errno return code from sd_send_scsi_cmd() 19372 * 19373 * Context: Can sleep. 19374 */ 19375 19376 static int 19377 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19378 { 19379 struct scsi_extended_sense sense_buf; 19380 union scsi_cdb cdb; 19381 struct uscsi_cmd ucmd_buf; 19382 int status; 19383 19384 ASSERT(un != NULL); 19385 ASSERT(!mutex_owned(SD_MUTEX(un))); 19386 19387 SD_TRACE(SD_LOG_IO, un, 19388 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19389 19390 if (ISREMOVABLE(un) && 19391 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19392 (un->un_f_start_stop_supported != TRUE)) { 19393 return (0); 19394 } 19395 19396 bzero(&cdb, sizeof (cdb)); 19397 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19398 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19399 19400 cdb.scc_cmd = SCMD_START_STOP; 19401 cdb.cdb_opaque[4] = (uchar_t)flag; 19402 19403 ucmd_buf.uscsi_cdb = (char *)&cdb; 19404 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19405 ucmd_buf.uscsi_bufaddr = NULL; 19406 ucmd_buf.uscsi_buflen = 0; 19407 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19408 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19409 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19410 ucmd_buf.uscsi_timeout = 200; 19411 19412 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19413 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19414 19415 switch (status) { 19416 case 0: 19417 break; /* Success! */ 19418 case EIO: 19419 switch (ucmd_buf.uscsi_status) { 19420 case STATUS_RESERVATION_CONFLICT: 19421 status = EACCES; 19422 break; 19423 case STATUS_CHECK: 19424 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19425 switch (sense_buf.es_key) { 19426 case KEY_ILLEGAL_REQUEST: 19427 status = ENOTSUP; 19428 break; 19429 case KEY_NOT_READY: 19430 if (sense_buf.es_add_code == 0x3A) { 19431 status = ENXIO; 19432 } 19433 break; 19434 default: 19435 break; 19436 } 19437 } 19438 break; 19439 default: 19440 break; 19441 } 19442 break; 19443 default: 19444 break; 19445 } 19446 19447 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19448 19449 return (status); 19450 } 19451 19452 19453 /* 19454 * Function: sd_start_stop_unit_callback 19455 * 19456 * Description: timeout(9F) callback to begin recovery process for a 19457 * device that has spun down. 19458 * 19459 * Arguments: arg - pointer to associated softstate struct. 19460 * 19461 * Context: Executes in a timeout(9F) thread context 19462 */ 19463 19464 static void 19465 sd_start_stop_unit_callback(void *arg) 19466 { 19467 struct sd_lun *un = arg; 19468 ASSERT(un != NULL); 19469 ASSERT(!mutex_owned(SD_MUTEX(un))); 19470 19471 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19472 19473 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19474 } 19475 19476 19477 /* 19478 * Function: sd_start_stop_unit_task 19479 * 19480 * Description: Recovery procedure when a drive is spun down. 19481 * 19482 * Arguments: arg - pointer to associated softstate struct. 19483 * 19484 * Context: Executes in a taskq() thread context 19485 */ 19486 19487 static void 19488 sd_start_stop_unit_task(void *arg) 19489 { 19490 struct sd_lun *un = arg; 19491 19492 ASSERT(un != NULL); 19493 ASSERT(!mutex_owned(SD_MUTEX(un))); 19494 19495 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19496 19497 /* 19498 * Some unformatted drives report not ready error, no need to 19499 * restart if format has been initiated. 19500 */ 19501 mutex_enter(SD_MUTEX(un)); 19502 if (un->un_f_format_in_progress == TRUE) { 19503 mutex_exit(SD_MUTEX(un)); 19504 return; 19505 } 19506 mutex_exit(SD_MUTEX(un)); 19507 19508 /* 19509 * When a START STOP command is issued from here, it is part of a 19510 * failure recovery operation and must be issued before any other 19511 * commands, including any pending retries. Thus it must be sent 19512 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19513 * succeeds or not, we will start I/O after the attempt. 19514 */ 19515 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19516 SD_PATH_DIRECT_PRIORITY); 19517 19518 /* 19519 * The above call blocks until the START_STOP_UNIT command completes. 19520 * Now that it has completed, we must re-try the original IO that 19521 * received the NOT READY condition in the first place. There are 19522 * three possible conditions here: 19523 * 19524 * (1) The original IO is on un_retry_bp. 19525 * (2) The original IO is on the regular wait queue, and un_retry_bp 19526 * is NULL. 19527 * (3) The original IO is on the regular wait queue, and un_retry_bp 19528 * points to some other, unrelated bp. 19529 * 19530 * For each case, we must call sd_start_cmds() with un_retry_bp 19531 * as the argument. If un_retry_bp is NULL, this will initiate 19532 * processing of the regular wait queue. If un_retry_bp is not NULL, 19533 * then this will process the bp on un_retry_bp. That may or may not 19534 * be the original IO, but that does not matter: the important thing 19535 * is to keep the IO processing going at this point. 19536 * 19537 * Note: This is a very specific error recovery sequence associated 19538 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19539 * serialize the I/O with completion of the spin-up. 19540 */ 19541 mutex_enter(SD_MUTEX(un)); 19542 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19543 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19544 un, un->un_retry_bp); 19545 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19546 sd_start_cmds(un, un->un_retry_bp); 19547 mutex_exit(SD_MUTEX(un)); 19548 19549 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19550 } 19551 19552 19553 /* 19554 * Function: sd_send_scsi_INQUIRY 19555 * 19556 * Description: Issue the scsi INQUIRY command. 19557 * 19558 * Arguments: un 19559 * bufaddr 19560 * buflen 19561 * evpd 19562 * page_code 19563 * page_length 19564 * 19565 * Return Code: 0 - Success 19566 * errno return code from sd_send_scsi_cmd() 19567 * 19568 * Context: Can sleep. Does not return until command is completed. 19569 */ 19570 19571 static int 19572 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19573 uchar_t evpd, uchar_t page_code, size_t *residp) 19574 { 19575 union scsi_cdb cdb; 19576 struct uscsi_cmd ucmd_buf; 19577 int status; 19578 19579 ASSERT(un != NULL); 19580 ASSERT(!mutex_owned(SD_MUTEX(un))); 19581 ASSERT(bufaddr != NULL); 19582 19583 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19584 19585 bzero(&cdb, sizeof (cdb)); 19586 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19587 bzero(bufaddr, buflen); 19588 19589 cdb.scc_cmd = SCMD_INQUIRY; 19590 cdb.cdb_opaque[1] = evpd; 19591 cdb.cdb_opaque[2] = page_code; 19592 FORMG0COUNT(&cdb, buflen); 19593 19594 ucmd_buf.uscsi_cdb = (char *)&cdb; 19595 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19596 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19597 ucmd_buf.uscsi_buflen = buflen; 19598 ucmd_buf.uscsi_rqbuf = NULL; 19599 ucmd_buf.uscsi_rqlen = 0; 19600 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19601 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19602 19603 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19604 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19605 19606 if ((status == 0) && (residp != NULL)) { 19607 *residp = ucmd_buf.uscsi_resid; 19608 } 19609 19610 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19611 19612 return (status); 19613 } 19614 19615 19616 /* 19617 * Function: sd_send_scsi_TEST_UNIT_READY 19618 * 19619 * Description: Issue the scsi TEST UNIT READY command. 19620 * This routine can be told to set the flag USCSI_DIAGNOSE to 19621 * prevent retrying failed commands. Use this when the intent 19622 * is either to check for device readiness, to clear a Unit 19623 * Attention, or to clear any outstanding sense data. 19624 * However under specific conditions the expected behavior 19625 * is for retries to bring a device ready, so use the flag 19626 * with caution. 19627 * 19628 * Arguments: un 19629 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19630 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19631 * 0: dont check for media present, do retries on cmd. 19632 * 19633 * Return Code: 0 - Success 19634 * EIO - IO error 19635 * EACCES - Reservation conflict detected 19636 * ENXIO - Not Ready, medium not present 19637 * errno return code from sd_send_scsi_cmd() 19638 * 19639 * Context: Can sleep. Does not return until command is completed. 19640 */ 19641 19642 static int 19643 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19644 { 19645 struct scsi_extended_sense sense_buf; 19646 union scsi_cdb cdb; 19647 struct uscsi_cmd ucmd_buf; 19648 int status; 19649 19650 ASSERT(un != NULL); 19651 ASSERT(!mutex_owned(SD_MUTEX(un))); 19652 19653 SD_TRACE(SD_LOG_IO, un, 19654 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19655 19656 /* 19657 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19658 * timeouts when they receive a TUR and the queue is not empty. Check 19659 * the configuration flag set during attach (indicating the drive has 19660 * this firmware bug) and un_ncmds_in_transport before issuing the 19661 * TUR. If there are 19662 * pending commands return success, this is a bit arbitrary but is ok 19663 * for non-removables (i.e. the eliteI disks) and non-clustering 19664 * configurations. 19665 */ 19666 if (un->un_f_cfg_tur_check == TRUE) { 19667 mutex_enter(SD_MUTEX(un)); 19668 if (un->un_ncmds_in_transport != 0) { 19669 mutex_exit(SD_MUTEX(un)); 19670 return (0); 19671 } 19672 mutex_exit(SD_MUTEX(un)); 19673 } 19674 19675 bzero(&cdb, sizeof (cdb)); 19676 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19677 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19678 19679 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19680 19681 ucmd_buf.uscsi_cdb = (char *)&cdb; 19682 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19683 ucmd_buf.uscsi_bufaddr = NULL; 19684 ucmd_buf.uscsi_buflen = 0; 19685 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19686 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19687 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19688 19689 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19690 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19691 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19692 } 19693 ucmd_buf.uscsi_timeout = 60; 19694 19695 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19696 UIO_SYSSPACE, UIO_SYSSPACE, 19697 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19698 19699 switch (status) { 19700 case 0: 19701 break; /* Success! */ 19702 case EIO: 19703 switch (ucmd_buf.uscsi_status) { 19704 case STATUS_RESERVATION_CONFLICT: 19705 status = EACCES; 19706 break; 19707 case STATUS_CHECK: 19708 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19709 break; 19710 } 19711 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19712 (sense_buf.es_key == KEY_NOT_READY) && 19713 (sense_buf.es_add_code == 0x3A)) { 19714 status = ENXIO; 19715 } 19716 break; 19717 default: 19718 break; 19719 } 19720 break; 19721 default: 19722 break; 19723 } 19724 19725 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19726 19727 return (status); 19728 } 19729 19730 19731 /* 19732 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19733 * 19734 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19735 * 19736 * Arguments: un 19737 * 19738 * Return Code: 0 - Success 19739 * EACCES 19740 * ENOTSUP 19741 * errno return code from sd_send_scsi_cmd() 19742 * 19743 * Context: Can sleep. Does not return until command is completed. 19744 */ 19745 19746 static int 19747 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19748 uint16_t data_len, uchar_t *data_bufp) 19749 { 19750 struct scsi_extended_sense sense_buf; 19751 union scsi_cdb cdb; 19752 struct uscsi_cmd ucmd_buf; 19753 int status; 19754 int no_caller_buf = FALSE; 19755 19756 ASSERT(un != NULL); 19757 ASSERT(!mutex_owned(SD_MUTEX(un))); 19758 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19759 19760 SD_TRACE(SD_LOG_IO, un, 19761 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19762 19763 bzero(&cdb, sizeof (cdb)); 19764 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19765 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19766 if (data_bufp == NULL) { 19767 /* Allocate a default buf if the caller did not give one */ 19768 ASSERT(data_len == 0); 19769 data_len = MHIOC_RESV_KEY_SIZE; 19770 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19771 no_caller_buf = TRUE; 19772 } 19773 19774 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19775 cdb.cdb_opaque[1] = usr_cmd; 19776 FORMG1COUNT(&cdb, data_len); 19777 19778 ucmd_buf.uscsi_cdb = (char *)&cdb; 19779 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19780 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19781 ucmd_buf.uscsi_buflen = data_len; 19782 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19783 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19784 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19785 ucmd_buf.uscsi_timeout = 60; 19786 19787 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19788 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19789 19790 switch (status) { 19791 case 0: 19792 break; /* Success! */ 19793 case EIO: 19794 switch (ucmd_buf.uscsi_status) { 19795 case STATUS_RESERVATION_CONFLICT: 19796 status = EACCES; 19797 break; 19798 case STATUS_CHECK: 19799 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19800 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19801 status = ENOTSUP; 19802 } 19803 break; 19804 default: 19805 break; 19806 } 19807 break; 19808 default: 19809 break; 19810 } 19811 19812 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19813 19814 if (no_caller_buf == TRUE) { 19815 kmem_free(data_bufp, data_len); 19816 } 19817 19818 return (status); 19819 } 19820 19821 19822 /* 19823 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19824 * 19825 * Description: This routine is the driver entry point for handling CD-ROM 19826 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19827 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19828 * device. 19829 * 19830 * Arguments: un - Pointer to soft state struct for the target. 19831 * usr_cmd SCSI-3 reservation facility command (one of 19832 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19833 * SD_SCSI3_PREEMPTANDABORT) 19834 * usr_bufp - user provided pointer register, reserve descriptor or 19835 * preempt and abort structure (mhioc_register_t, 19836 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19837 * 19838 * Return Code: 0 - Success 19839 * EACCES 19840 * ENOTSUP 19841 * errno return code from sd_send_scsi_cmd() 19842 * 19843 * Context: Can sleep. Does not return until command is completed. 19844 */ 19845 19846 static int 19847 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19848 uchar_t *usr_bufp) 19849 { 19850 struct scsi_extended_sense sense_buf; 19851 union scsi_cdb cdb; 19852 struct uscsi_cmd ucmd_buf; 19853 int status; 19854 uchar_t data_len = sizeof (sd_prout_t); 19855 sd_prout_t *prp; 19856 19857 ASSERT(un != NULL); 19858 ASSERT(!mutex_owned(SD_MUTEX(un))); 19859 ASSERT(data_len == 24); /* required by scsi spec */ 19860 19861 SD_TRACE(SD_LOG_IO, un, 19862 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19863 19864 if (usr_bufp == NULL) { 19865 return (EINVAL); 19866 } 19867 19868 bzero(&cdb, sizeof (cdb)); 19869 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19870 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19871 prp = kmem_zalloc(data_len, KM_SLEEP); 19872 19873 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19874 cdb.cdb_opaque[1] = usr_cmd; 19875 FORMG1COUNT(&cdb, data_len); 19876 19877 ucmd_buf.uscsi_cdb = (char *)&cdb; 19878 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19879 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19880 ucmd_buf.uscsi_buflen = data_len; 19881 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19882 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19883 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19884 ucmd_buf.uscsi_timeout = 60; 19885 19886 switch (usr_cmd) { 19887 case SD_SCSI3_REGISTER: { 19888 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19889 19890 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19891 bcopy(ptr->newkey.key, prp->service_key, 19892 MHIOC_RESV_KEY_SIZE); 19893 prp->aptpl = ptr->aptpl; 19894 break; 19895 } 19896 case SD_SCSI3_RESERVE: 19897 case SD_SCSI3_RELEASE: { 19898 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19899 19900 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19901 prp->scope_address = BE_32(ptr->scope_specific_addr); 19902 cdb.cdb_opaque[2] = ptr->type; 19903 break; 19904 } 19905 case SD_SCSI3_PREEMPTANDABORT: { 19906 mhioc_preemptandabort_t *ptr = 19907 (mhioc_preemptandabort_t *)usr_bufp; 19908 19909 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19910 bcopy(ptr->victim_key.key, prp->service_key, 19911 MHIOC_RESV_KEY_SIZE); 19912 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19913 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19914 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19915 break; 19916 } 19917 case SD_SCSI3_REGISTERANDIGNOREKEY: 19918 { 19919 mhioc_registerandignorekey_t *ptr; 19920 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19921 bcopy(ptr->newkey.key, 19922 prp->service_key, MHIOC_RESV_KEY_SIZE); 19923 prp->aptpl = ptr->aptpl; 19924 break; 19925 } 19926 default: 19927 ASSERT(FALSE); 19928 break; 19929 } 19930 19931 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19932 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19933 19934 switch (status) { 19935 case 0: 19936 break; /* Success! */ 19937 case EIO: 19938 switch (ucmd_buf.uscsi_status) { 19939 case STATUS_RESERVATION_CONFLICT: 19940 status = EACCES; 19941 break; 19942 case STATUS_CHECK: 19943 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19944 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19945 status = ENOTSUP; 19946 } 19947 break; 19948 default: 19949 break; 19950 } 19951 break; 19952 default: 19953 break; 19954 } 19955 19956 kmem_free(prp, data_len); 19957 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19958 return (status); 19959 } 19960 19961 19962 /* 19963 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19964 * 19965 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19966 * 19967 * Arguments: un - pointer to the target's soft state struct 19968 * 19969 * Return Code: 0 - success 19970 * errno-type error code 19971 * 19972 * Context: kernel thread context only. 19973 */ 19974 19975 static int 19976 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 19977 { 19978 struct sd_uscsi_info *uip; 19979 struct uscsi_cmd *uscmd; 19980 union scsi_cdb *cdb; 19981 struct buf *bp; 19982 int rval = 0; 19983 19984 SD_TRACE(SD_LOG_IO, un, 19985 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19986 19987 ASSERT(un != NULL); 19988 ASSERT(!mutex_owned(SD_MUTEX(un))); 19989 19990 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 19991 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19992 19993 /* 19994 * First get some memory for the uscsi_cmd struct and cdb 19995 * and initialize for SYNCHRONIZE_CACHE cmd. 19996 */ 19997 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19998 uscmd->uscsi_cdblen = CDB_GROUP1; 19999 uscmd->uscsi_cdb = (caddr_t)cdb; 20000 uscmd->uscsi_bufaddr = NULL; 20001 uscmd->uscsi_buflen = 0; 20002 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20003 uscmd->uscsi_rqlen = SENSE_LENGTH; 20004 uscmd->uscsi_rqresid = SENSE_LENGTH; 20005 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20006 uscmd->uscsi_timeout = sd_io_time; 20007 20008 /* 20009 * Allocate an sd_uscsi_info struct and fill it with the info 20010 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20011 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20012 * since we allocate the buf here in this function, we do not 20013 * need to preserve the prior contents of b_private. 20014 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20015 */ 20016 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20017 uip->ui_flags = SD_PATH_DIRECT; 20018 uip->ui_cmdp = uscmd; 20019 20020 bp = getrbuf(KM_SLEEP); 20021 bp->b_private = uip; 20022 20023 /* 20024 * Setup buffer to carry uscsi request. 20025 */ 20026 bp->b_flags = B_BUSY; 20027 bp->b_bcount = 0; 20028 bp->b_blkno = 0; 20029 20030 if (dkc != NULL) { 20031 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20032 uip->ui_dkc = *dkc; 20033 } 20034 20035 bp->b_edev = SD_GET_DEV(un); 20036 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20037 20038 (void) sd_uscsi_strategy(bp); 20039 20040 /* 20041 * If synchronous request, wait for completion 20042 * If async just return and let b_iodone callback 20043 * cleanup. 20044 * NOTE: On return, u_ncmds_in_driver will be decremented, 20045 * but it was also incremented in sd_uscsi_strategy(), so 20046 * we should be ok. 20047 */ 20048 if (dkc == NULL) { 20049 (void) biowait(bp); 20050 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20051 } 20052 20053 return (rval); 20054 } 20055 20056 20057 static int 20058 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20059 { 20060 struct sd_uscsi_info *uip; 20061 struct uscsi_cmd *uscmd; 20062 struct scsi_extended_sense *sense_buf; 20063 struct sd_lun *un; 20064 int status; 20065 20066 uip = (struct sd_uscsi_info *)(bp->b_private); 20067 ASSERT(uip != NULL); 20068 20069 uscmd = uip->ui_cmdp; 20070 ASSERT(uscmd != NULL); 20071 20072 sense_buf = (struct scsi_extended_sense *)uscmd->uscsi_rqbuf; 20073 ASSERT(sense_buf != NULL); 20074 20075 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20076 ASSERT(un != NULL); 20077 20078 status = geterror(bp); 20079 switch (status) { 20080 case 0: 20081 break; /* Success! */ 20082 case EIO: 20083 switch (uscmd->uscsi_status) { 20084 case STATUS_RESERVATION_CONFLICT: 20085 /* Ignore reservation conflict */ 20086 status = 0; 20087 goto done; 20088 20089 case STATUS_CHECK: 20090 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20091 (sense_buf->es_key == KEY_ILLEGAL_REQUEST)) { 20092 /* Ignore Illegal Request error */ 20093 mutex_enter(SD_MUTEX(un)); 20094 un->un_f_sync_cache_unsupported = TRUE; 20095 mutex_exit(SD_MUTEX(un)); 20096 status = ENOTSUP; 20097 goto done; 20098 } 20099 break; 20100 default: 20101 break; 20102 } 20103 /* FALLTHRU */ 20104 default: 20105 /* Ignore error if the media is not present */ 20106 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 20107 status = 0; 20108 goto done; 20109 } 20110 /* If we reach this, we had an error */ 20111 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20112 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20113 break; 20114 } 20115 20116 done: 20117 if (uip->ui_dkc.dkc_callback != NULL) { 20118 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20119 } 20120 20121 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20122 freerbuf(bp); 20123 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20124 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20125 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20126 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20127 20128 return (status); 20129 } 20130 20131 20132 /* 20133 * Function: sd_send_scsi_GET_CONFIGURATION 20134 * 20135 * Description: Issues the get configuration command to the device. 20136 * Called from sd_check_for_writable_cd & sd_get_media_info 20137 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20138 * Arguments: un 20139 * ucmdbuf 20140 * rqbuf 20141 * rqbuflen 20142 * bufaddr 20143 * buflen 20144 * 20145 * Return Code: 0 - Success 20146 * errno return code from sd_send_scsi_cmd() 20147 * 20148 * Context: Can sleep. Does not return until command is completed. 20149 * 20150 */ 20151 20152 static int 20153 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 20154 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 20155 { 20156 char cdb[CDB_GROUP1]; 20157 int status; 20158 20159 ASSERT(un != NULL); 20160 ASSERT(!mutex_owned(SD_MUTEX(un))); 20161 ASSERT(bufaddr != NULL); 20162 ASSERT(ucmdbuf != NULL); 20163 ASSERT(rqbuf != NULL); 20164 20165 SD_TRACE(SD_LOG_IO, un, 20166 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20167 20168 bzero(cdb, sizeof (cdb)); 20169 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20170 bzero(rqbuf, rqbuflen); 20171 bzero(bufaddr, buflen); 20172 20173 /* 20174 * Set up cdb field for the get configuration command. 20175 */ 20176 cdb[0] = SCMD_GET_CONFIGURATION; 20177 cdb[1] = 0x02; /* Requested Type */ 20178 cdb[8] = SD_PROFILE_HEADER_LEN; 20179 ucmdbuf->uscsi_cdb = cdb; 20180 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20181 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20182 ucmdbuf->uscsi_buflen = buflen; 20183 ucmdbuf->uscsi_timeout = sd_io_time; 20184 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20185 ucmdbuf->uscsi_rqlen = rqbuflen; 20186 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20187 20188 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 20189 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 20190 20191 switch (status) { 20192 case 0: 20193 break; /* Success! */ 20194 case EIO: 20195 switch (ucmdbuf->uscsi_status) { 20196 case STATUS_RESERVATION_CONFLICT: 20197 status = EACCES; 20198 break; 20199 default: 20200 break; 20201 } 20202 break; 20203 default: 20204 break; 20205 } 20206 20207 if (status == 0) { 20208 SD_DUMP_MEMORY(un, SD_LOG_IO, 20209 "sd_send_scsi_GET_CONFIGURATION: data", 20210 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20211 } 20212 20213 SD_TRACE(SD_LOG_IO, un, 20214 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20215 20216 return (status); 20217 } 20218 20219 /* 20220 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20221 * 20222 * Description: Issues the get configuration command to the device to 20223 * retrieve a specfic feature. Called from 20224 * sd_check_for_writable_cd & sd_set_mmc_caps. 20225 * Arguments: un 20226 * ucmdbuf 20227 * rqbuf 20228 * rqbuflen 20229 * bufaddr 20230 * buflen 20231 * feature 20232 * 20233 * Return Code: 0 - Success 20234 * errno return code from sd_send_scsi_cmd() 20235 * 20236 * Context: Can sleep. Does not return until command is completed. 20237 * 20238 */ 20239 static int 20240 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 20241 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20242 uchar_t *bufaddr, uint_t buflen, char feature) 20243 { 20244 char cdb[CDB_GROUP1]; 20245 int status; 20246 20247 ASSERT(un != NULL); 20248 ASSERT(!mutex_owned(SD_MUTEX(un))); 20249 ASSERT(bufaddr != NULL); 20250 ASSERT(ucmdbuf != NULL); 20251 ASSERT(rqbuf != NULL); 20252 20253 SD_TRACE(SD_LOG_IO, un, 20254 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20255 20256 bzero(cdb, sizeof (cdb)); 20257 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20258 bzero(rqbuf, rqbuflen); 20259 bzero(bufaddr, buflen); 20260 20261 /* 20262 * Set up cdb field for the get configuration command. 20263 */ 20264 cdb[0] = SCMD_GET_CONFIGURATION; 20265 cdb[1] = 0x02; /* Requested Type */ 20266 cdb[3] = feature; 20267 cdb[8] = buflen; 20268 ucmdbuf->uscsi_cdb = cdb; 20269 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20270 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20271 ucmdbuf->uscsi_buflen = buflen; 20272 ucmdbuf->uscsi_timeout = sd_io_time; 20273 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20274 ucmdbuf->uscsi_rqlen = rqbuflen; 20275 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20276 20277 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 20278 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 20279 20280 switch (status) { 20281 case 0: 20282 break; /* Success! */ 20283 case EIO: 20284 switch (ucmdbuf->uscsi_status) { 20285 case STATUS_RESERVATION_CONFLICT: 20286 status = EACCES; 20287 break; 20288 default: 20289 break; 20290 } 20291 break; 20292 default: 20293 break; 20294 } 20295 20296 if (status == 0) { 20297 SD_DUMP_MEMORY(un, SD_LOG_IO, 20298 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20299 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20300 } 20301 20302 SD_TRACE(SD_LOG_IO, un, 20303 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20304 20305 return (status); 20306 } 20307 20308 20309 /* 20310 * Function: sd_send_scsi_MODE_SENSE 20311 * 20312 * Description: Utility function for issuing a scsi MODE SENSE command. 20313 * Note: This routine uses a consistent implementation for Group0, 20314 * Group1, and Group2 commands across all platforms. ATAPI devices 20315 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20316 * 20317 * Arguments: un - pointer to the softstate struct for the target. 20318 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20319 * CDB_GROUP[1|2] (10 byte). 20320 * bufaddr - buffer for page data retrieved from the target. 20321 * buflen - size of page to be retrieved. 20322 * page_code - page code of data to be retrieved from the target. 20323 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20324 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20325 * to use the USCSI "direct" chain and bypass the normal 20326 * command waitq. 20327 * 20328 * Return Code: 0 - Success 20329 * errno return code from sd_send_scsi_cmd() 20330 * 20331 * Context: Can sleep. Does not return until command is completed. 20332 */ 20333 20334 static int 20335 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20336 size_t buflen, uchar_t page_code, int path_flag) 20337 { 20338 struct scsi_extended_sense sense_buf; 20339 union scsi_cdb cdb; 20340 struct uscsi_cmd ucmd_buf; 20341 int status; 20342 20343 ASSERT(un != NULL); 20344 ASSERT(!mutex_owned(SD_MUTEX(un))); 20345 ASSERT(bufaddr != NULL); 20346 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20347 (cdbsize == CDB_GROUP2)); 20348 20349 SD_TRACE(SD_LOG_IO, un, 20350 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20351 20352 bzero(&cdb, sizeof (cdb)); 20353 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20354 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20355 bzero(bufaddr, buflen); 20356 20357 if (cdbsize == CDB_GROUP0) { 20358 cdb.scc_cmd = SCMD_MODE_SENSE; 20359 cdb.cdb_opaque[2] = page_code; 20360 FORMG0COUNT(&cdb, buflen); 20361 } else { 20362 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20363 cdb.cdb_opaque[2] = page_code; 20364 FORMG1COUNT(&cdb, buflen); 20365 } 20366 20367 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20368 20369 ucmd_buf.uscsi_cdb = (char *)&cdb; 20370 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20371 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20372 ucmd_buf.uscsi_buflen = buflen; 20373 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20374 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20375 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20376 ucmd_buf.uscsi_timeout = 60; 20377 20378 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20379 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20380 20381 switch (status) { 20382 case 0: 20383 break; /* Success! */ 20384 case EIO: 20385 switch (ucmd_buf.uscsi_status) { 20386 case STATUS_RESERVATION_CONFLICT: 20387 status = EACCES; 20388 break; 20389 default: 20390 break; 20391 } 20392 break; 20393 default: 20394 break; 20395 } 20396 20397 if (status == 0) { 20398 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20399 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20400 } 20401 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20402 20403 return (status); 20404 } 20405 20406 20407 /* 20408 * Function: sd_send_scsi_MODE_SELECT 20409 * 20410 * Description: Utility function for issuing a scsi MODE SELECT command. 20411 * Note: This routine uses a consistent implementation for Group0, 20412 * Group1, and Group2 commands across all platforms. ATAPI devices 20413 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20414 * 20415 * Arguments: un - pointer to the softstate struct for the target. 20416 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20417 * CDB_GROUP[1|2] (10 byte). 20418 * bufaddr - buffer for page data retrieved from the target. 20419 * buflen - size of page to be retrieved. 20420 * save_page - boolean to determin if SP bit should be set. 20421 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20422 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20423 * to use the USCSI "direct" chain and bypass the normal 20424 * command waitq. 20425 * 20426 * Return Code: 0 - Success 20427 * errno return code from sd_send_scsi_cmd() 20428 * 20429 * Context: Can sleep. Does not return until command is completed. 20430 */ 20431 20432 static int 20433 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20434 size_t buflen, uchar_t save_page, int path_flag) 20435 { 20436 struct scsi_extended_sense sense_buf; 20437 union scsi_cdb cdb; 20438 struct uscsi_cmd ucmd_buf; 20439 int status; 20440 20441 ASSERT(un != NULL); 20442 ASSERT(!mutex_owned(SD_MUTEX(un))); 20443 ASSERT(bufaddr != NULL); 20444 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20445 (cdbsize == CDB_GROUP2)); 20446 20447 SD_TRACE(SD_LOG_IO, un, 20448 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20449 20450 bzero(&cdb, sizeof (cdb)); 20451 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20452 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20453 20454 /* Set the PF bit for many third party drives */ 20455 cdb.cdb_opaque[1] = 0x10; 20456 20457 /* Set the savepage(SP) bit if given */ 20458 if (save_page == SD_SAVE_PAGE) { 20459 cdb.cdb_opaque[1] |= 0x01; 20460 } 20461 20462 if (cdbsize == CDB_GROUP0) { 20463 cdb.scc_cmd = SCMD_MODE_SELECT; 20464 FORMG0COUNT(&cdb, buflen); 20465 } else { 20466 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20467 FORMG1COUNT(&cdb, buflen); 20468 } 20469 20470 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20471 20472 ucmd_buf.uscsi_cdb = (char *)&cdb; 20473 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20474 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20475 ucmd_buf.uscsi_buflen = buflen; 20476 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20477 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20478 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20479 ucmd_buf.uscsi_timeout = 60; 20480 20481 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20482 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20483 20484 switch (status) { 20485 case 0: 20486 break; /* Success! */ 20487 case EIO: 20488 switch (ucmd_buf.uscsi_status) { 20489 case STATUS_RESERVATION_CONFLICT: 20490 status = EACCES; 20491 break; 20492 default: 20493 break; 20494 } 20495 break; 20496 default: 20497 break; 20498 } 20499 20500 if (status == 0) { 20501 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20502 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20503 } 20504 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20505 20506 return (status); 20507 } 20508 20509 20510 /* 20511 * Function: sd_send_scsi_RDWR 20512 * 20513 * Description: Issue a scsi READ or WRITE command with the given parameters. 20514 * 20515 * Arguments: un: Pointer to the sd_lun struct for the target. 20516 * cmd: SCMD_READ or SCMD_WRITE 20517 * bufaddr: Address of caller's buffer to receive the RDWR data 20518 * buflen: Length of caller's buffer receive the RDWR data. 20519 * start_block: Block number for the start of the RDWR operation. 20520 * (Assumes target-native block size.) 20521 * residp: Pointer to variable to receive the redisual of the 20522 * RDWR operation (may be NULL of no residual requested). 20523 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20524 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20525 * to use the USCSI "direct" chain and bypass the normal 20526 * command waitq. 20527 * 20528 * Return Code: 0 - Success 20529 * errno return code from sd_send_scsi_cmd() 20530 * 20531 * Context: Can sleep. Does not return until command is completed. 20532 */ 20533 20534 static int 20535 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20536 size_t buflen, daddr_t start_block, int path_flag) 20537 { 20538 struct scsi_extended_sense sense_buf; 20539 union scsi_cdb cdb; 20540 struct uscsi_cmd ucmd_buf; 20541 uint32_t block_count; 20542 int status; 20543 int cdbsize; 20544 uchar_t flag; 20545 20546 ASSERT(un != NULL); 20547 ASSERT(!mutex_owned(SD_MUTEX(un))); 20548 ASSERT(bufaddr != NULL); 20549 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20550 20551 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20552 20553 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20554 return (EINVAL); 20555 } 20556 20557 mutex_enter(SD_MUTEX(un)); 20558 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20559 mutex_exit(SD_MUTEX(un)); 20560 20561 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20562 20563 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20564 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20565 bufaddr, buflen, start_block, block_count); 20566 20567 bzero(&cdb, sizeof (cdb)); 20568 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20569 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20570 20571 /* Compute CDB size to use */ 20572 if (start_block > 0xffffffff) 20573 cdbsize = CDB_GROUP4; 20574 else if ((start_block & 0xFFE00000) || 20575 (un->un_f_cfg_is_atapi == TRUE)) 20576 cdbsize = CDB_GROUP1; 20577 else 20578 cdbsize = CDB_GROUP0; 20579 20580 switch (cdbsize) { 20581 case CDB_GROUP0: /* 6-byte CDBs */ 20582 cdb.scc_cmd = cmd; 20583 FORMG0ADDR(&cdb, start_block); 20584 FORMG0COUNT(&cdb, block_count); 20585 break; 20586 case CDB_GROUP1: /* 10-byte CDBs */ 20587 cdb.scc_cmd = cmd | SCMD_GROUP1; 20588 FORMG1ADDR(&cdb, start_block); 20589 FORMG1COUNT(&cdb, block_count); 20590 break; 20591 case CDB_GROUP4: /* 16-byte CDBs */ 20592 cdb.scc_cmd = cmd | SCMD_GROUP4; 20593 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20594 FORMG4COUNT(&cdb, block_count); 20595 break; 20596 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20597 default: 20598 /* All others reserved */ 20599 return (EINVAL); 20600 } 20601 20602 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20603 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20604 20605 ucmd_buf.uscsi_cdb = (char *)&cdb; 20606 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20607 ucmd_buf.uscsi_bufaddr = bufaddr; 20608 ucmd_buf.uscsi_buflen = buflen; 20609 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20610 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20611 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20612 ucmd_buf.uscsi_timeout = 60; 20613 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20614 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20615 switch (status) { 20616 case 0: 20617 break; /* Success! */ 20618 case EIO: 20619 switch (ucmd_buf.uscsi_status) { 20620 case STATUS_RESERVATION_CONFLICT: 20621 status = EACCES; 20622 break; 20623 default: 20624 break; 20625 } 20626 break; 20627 default: 20628 break; 20629 } 20630 20631 if (status == 0) { 20632 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20633 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20634 } 20635 20636 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20637 20638 return (status); 20639 } 20640 20641 20642 /* 20643 * Function: sd_send_scsi_LOG_SENSE 20644 * 20645 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20646 * 20647 * Arguments: un: Pointer to the sd_lun struct for the target. 20648 * 20649 * Return Code: 0 - Success 20650 * errno return code from sd_send_scsi_cmd() 20651 * 20652 * Context: Can sleep. Does not return until command is completed. 20653 */ 20654 20655 static int 20656 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20657 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20658 int path_flag) 20659 20660 { 20661 struct scsi_extended_sense sense_buf; 20662 union scsi_cdb cdb; 20663 struct uscsi_cmd ucmd_buf; 20664 int status; 20665 20666 ASSERT(un != NULL); 20667 ASSERT(!mutex_owned(SD_MUTEX(un))); 20668 20669 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20670 20671 bzero(&cdb, sizeof (cdb)); 20672 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20673 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20674 20675 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 20676 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 20677 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20678 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 20679 FORMG1COUNT(&cdb, buflen); 20680 20681 ucmd_buf.uscsi_cdb = (char *)&cdb; 20682 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20683 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20684 ucmd_buf.uscsi_buflen = buflen; 20685 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20686 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20687 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20688 ucmd_buf.uscsi_timeout = 60; 20689 20690 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20691 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20692 20693 switch (status) { 20694 case 0: 20695 break; 20696 case EIO: 20697 switch (ucmd_buf.uscsi_status) { 20698 case STATUS_RESERVATION_CONFLICT: 20699 status = EACCES; 20700 break; 20701 case STATUS_CHECK: 20702 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20703 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20704 (sense_buf.es_add_code == 0x24)) { 20705 /* 20706 * ASC 0x24: INVALID FIELD IN CDB 20707 */ 20708 switch (page_code) { 20709 case START_STOP_CYCLE_PAGE: 20710 /* 20711 * The start stop cycle counter is 20712 * implemented as page 0x31 in earlier 20713 * generation disks. In new generation 20714 * disks the start stop cycle counter is 20715 * implemented as page 0xE. To properly 20716 * handle this case if an attempt for 20717 * log page 0xE is made and fails we 20718 * will try again using page 0x31. 20719 * 20720 * Network storage BU committed to 20721 * maintain the page 0x31 for this 20722 * purpose and will not have any other 20723 * page implemented with page code 0x31 20724 * until all disks transition to the 20725 * standard page. 20726 */ 20727 mutex_enter(SD_MUTEX(un)); 20728 un->un_start_stop_cycle_page = 20729 START_STOP_CYCLE_VU_PAGE; 20730 cdb.cdb_opaque[2] = 20731 (char)(page_control << 6) | 20732 un->un_start_stop_cycle_page; 20733 mutex_exit(SD_MUTEX(un)); 20734 status = sd_send_scsi_cmd( 20735 SD_GET_DEV(un), &ucmd_buf, 20736 UIO_SYSSPACE, UIO_SYSSPACE, 20737 UIO_SYSSPACE, path_flag); 20738 20739 break; 20740 case TEMPERATURE_PAGE: 20741 status = ENOTTY; 20742 break; 20743 default: 20744 break; 20745 } 20746 } 20747 break; 20748 default: 20749 break; 20750 } 20751 break; 20752 default: 20753 break; 20754 } 20755 20756 if (status == 0) { 20757 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20758 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20759 } 20760 20761 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20762 20763 return (status); 20764 } 20765 20766 20767 /* 20768 * Function: sdioctl 20769 * 20770 * Description: Driver's ioctl(9e) entry point function. 20771 * 20772 * Arguments: dev - device number 20773 * cmd - ioctl operation to be performed 20774 * arg - user argument, contains data to be set or reference 20775 * parameter for get 20776 * flag - bit flag, indicating open settings, 32/64 bit type 20777 * cred_p - user credential pointer 20778 * rval_p - calling process return value (OPT) 20779 * 20780 * Return Code: EINVAL 20781 * ENOTTY 20782 * ENXIO 20783 * EIO 20784 * EFAULT 20785 * ENOTSUP 20786 * EPERM 20787 * 20788 * Context: Called from the device switch at normal priority. 20789 */ 20790 20791 static int 20792 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20793 { 20794 struct sd_lun *un = NULL; 20795 int geom_validated = FALSE; 20796 int err = 0; 20797 int i = 0; 20798 cred_t *cr; 20799 20800 /* 20801 * All device accesses go thru sdstrategy where we check on suspend 20802 * status 20803 */ 20804 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20805 return (ENXIO); 20806 } 20807 20808 ASSERT(!mutex_owned(SD_MUTEX(un))); 20809 20810 /* 20811 * Moved this wait from sd_uscsi_strategy to here for 20812 * reasons of deadlock prevention. Internal driver commands, 20813 * specifically those to change a devices power level, result 20814 * in a call to sd_uscsi_strategy. 20815 */ 20816 mutex_enter(SD_MUTEX(un)); 20817 while ((un->un_state == SD_STATE_SUSPENDED) || 20818 (un->un_state == SD_STATE_PM_CHANGING)) { 20819 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20820 } 20821 /* 20822 * Twiddling the counter here protects commands from now 20823 * through to the top of sd_uscsi_strategy. Without the 20824 * counter inc. a power down, for example, could get in 20825 * after the above check for state is made and before 20826 * execution gets to the top of sd_uscsi_strategy. 20827 * That would cause problems. 20828 */ 20829 un->un_ncmds_in_driver++; 20830 20831 if ((un->un_f_geometry_is_valid == FALSE) && 20832 (flag & (FNDELAY | FNONBLOCK))) { 20833 switch (cmd) { 20834 case CDROMPAUSE: 20835 case CDROMRESUME: 20836 case CDROMPLAYMSF: 20837 case CDROMPLAYTRKIND: 20838 case CDROMREADTOCHDR: 20839 case CDROMREADTOCENTRY: 20840 case CDROMSTOP: 20841 case CDROMSTART: 20842 case CDROMVOLCTRL: 20843 case CDROMSUBCHNL: 20844 case CDROMREADMODE2: 20845 case CDROMREADMODE1: 20846 case CDROMREADOFFSET: 20847 case CDROMSBLKMODE: 20848 case CDROMGBLKMODE: 20849 case CDROMGDRVSPEED: 20850 case CDROMSDRVSPEED: 20851 case CDROMCDDA: 20852 case CDROMCDXA: 20853 case CDROMSUBCODE: 20854 if (!ISCD(un)) { 20855 un->un_ncmds_in_driver--; 20856 ASSERT(un->un_ncmds_in_driver >= 0); 20857 mutex_exit(SD_MUTEX(un)); 20858 return (ENOTTY); 20859 } 20860 break; 20861 case FDEJECT: 20862 case DKIOCEJECT: 20863 case CDROMEJECT: 20864 if (!ISREMOVABLE(un)) { 20865 un->un_ncmds_in_driver--; 20866 ASSERT(un->un_ncmds_in_driver >= 0); 20867 mutex_exit(SD_MUTEX(un)); 20868 return (ENOTTY); 20869 } 20870 break; 20871 case DKIOCSVTOC: 20872 case DKIOCSETEFI: 20873 case DKIOCSMBOOT: 20874 case DKIOCFLUSHWRITECACHE: 20875 mutex_exit(SD_MUTEX(un)); 20876 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20877 if (err != 0) { 20878 mutex_enter(SD_MUTEX(un)); 20879 un->un_ncmds_in_driver--; 20880 ASSERT(un->un_ncmds_in_driver >= 0); 20881 mutex_exit(SD_MUTEX(un)); 20882 return (EIO); 20883 } 20884 mutex_enter(SD_MUTEX(un)); 20885 /* FALLTHROUGH */ 20886 case DKIOCREMOVABLE: 20887 case DKIOCINFO: 20888 case DKIOCGMEDIAINFO: 20889 case MHIOCENFAILFAST: 20890 case MHIOCSTATUS: 20891 case MHIOCTKOWN: 20892 case MHIOCRELEASE: 20893 case MHIOCGRP_INKEYS: 20894 case MHIOCGRP_INRESV: 20895 case MHIOCGRP_REGISTER: 20896 case MHIOCGRP_RESERVE: 20897 case MHIOCGRP_PREEMPTANDABORT: 20898 case MHIOCGRP_REGISTERANDIGNOREKEY: 20899 case CDROMCLOSETRAY: 20900 case USCSICMD: 20901 goto skip_ready_valid; 20902 default: 20903 break; 20904 } 20905 20906 mutex_exit(SD_MUTEX(un)); 20907 err = sd_ready_and_valid(un); 20908 mutex_enter(SD_MUTEX(un)); 20909 if (err == SD_READY_NOT_VALID) { 20910 switch (cmd) { 20911 case DKIOCGAPART: 20912 case DKIOCGGEOM: 20913 case DKIOCSGEOM: 20914 case DKIOCGVTOC: 20915 case DKIOCSVTOC: 20916 case DKIOCSAPART: 20917 case DKIOCG_PHYGEOM: 20918 case DKIOCG_VIRTGEOM: 20919 err = ENOTSUP; 20920 un->un_ncmds_in_driver--; 20921 ASSERT(un->un_ncmds_in_driver >= 0); 20922 mutex_exit(SD_MUTEX(un)); 20923 return (err); 20924 } 20925 } 20926 if (err != SD_READY_VALID) { 20927 switch (cmd) { 20928 case DKIOCSTATE: 20929 case CDROMGDRVSPEED: 20930 case CDROMSDRVSPEED: 20931 case FDEJECT: /* for eject command */ 20932 case DKIOCEJECT: 20933 case CDROMEJECT: 20934 case DKIOCGETEFI: 20935 case DKIOCSGEOM: 20936 case DKIOCREMOVABLE: 20937 case DKIOCSAPART: 20938 case DKIOCSETEFI: 20939 break; 20940 default: 20941 if (ISREMOVABLE(un)) { 20942 err = ENXIO; 20943 } else { 20944 /* Do not map EACCES to EIO */ 20945 if (err != EACCES) 20946 err = EIO; 20947 } 20948 un->un_ncmds_in_driver--; 20949 ASSERT(un->un_ncmds_in_driver >= 0); 20950 mutex_exit(SD_MUTEX(un)); 20951 return (err); 20952 } 20953 } 20954 geom_validated = TRUE; 20955 } 20956 if ((un->un_f_geometry_is_valid == TRUE) && 20957 (un->un_solaris_size > 0)) { 20958 /* 20959 * the "geometry_is_valid" flag could be true if we 20960 * have an fdisk table but no Solaris partition 20961 */ 20962 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20963 /* it is EFI, so return ENOTSUP for these */ 20964 switch (cmd) { 20965 case DKIOCGAPART: 20966 case DKIOCGGEOM: 20967 case DKIOCGVTOC: 20968 case DKIOCSVTOC: 20969 case DKIOCSAPART: 20970 err = ENOTSUP; 20971 un->un_ncmds_in_driver--; 20972 ASSERT(un->un_ncmds_in_driver >= 0); 20973 mutex_exit(SD_MUTEX(un)); 20974 return (err); 20975 } 20976 } 20977 } 20978 20979 skip_ready_valid: 20980 mutex_exit(SD_MUTEX(un)); 20981 20982 switch (cmd) { 20983 case DKIOCINFO: 20984 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20985 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20986 break; 20987 20988 case DKIOCGMEDIAINFO: 20989 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20990 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20991 break; 20992 20993 case DKIOCGGEOM: 20994 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20995 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20996 geom_validated); 20997 break; 20998 20999 case DKIOCSGEOM: 21000 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 21001 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 21002 break; 21003 21004 case DKIOCGAPART: 21005 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 21006 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 21007 geom_validated); 21008 break; 21009 21010 case DKIOCSAPART: 21011 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 21012 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 21013 break; 21014 21015 case DKIOCGVTOC: 21016 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 21017 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 21018 geom_validated); 21019 break; 21020 21021 case DKIOCGETEFI: 21022 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 21023 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 21024 break; 21025 21026 case DKIOCPARTITION: 21027 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 21028 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 21029 break; 21030 21031 case DKIOCSVTOC: 21032 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 21033 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 21034 break; 21035 21036 case DKIOCSETEFI: 21037 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 21038 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 21039 break; 21040 21041 case DKIOCGMBOOT: 21042 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 21043 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 21044 break; 21045 21046 case DKIOCSMBOOT: 21047 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 21048 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 21049 break; 21050 21051 case DKIOCLOCK: 21052 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21053 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21054 SD_PATH_STANDARD); 21055 break; 21056 21057 case DKIOCUNLOCK: 21058 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21059 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 21060 SD_PATH_STANDARD); 21061 break; 21062 21063 case DKIOCSTATE: { 21064 enum dkio_state state; 21065 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21066 21067 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21068 err = EFAULT; 21069 } else { 21070 err = sd_check_media(dev, state); 21071 if (err == 0) { 21072 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21073 sizeof (int), flag) != 0) 21074 err = EFAULT; 21075 } 21076 } 21077 break; 21078 } 21079 21080 case DKIOCREMOVABLE: 21081 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21082 if (ISREMOVABLE(un)) { 21083 i = 1; 21084 } else { 21085 i = 0; 21086 } 21087 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21088 err = EFAULT; 21089 } else { 21090 err = 0; 21091 } 21092 break; 21093 21094 case DKIOCGTEMPERATURE: 21095 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21096 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21097 break; 21098 21099 case MHIOCENFAILFAST: 21100 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21101 if ((err = drv_priv(cred_p)) == 0) { 21102 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21103 } 21104 break; 21105 21106 case MHIOCTKOWN: 21107 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21108 if ((err = drv_priv(cred_p)) == 0) { 21109 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21110 } 21111 break; 21112 21113 case MHIOCRELEASE: 21114 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21115 if ((err = drv_priv(cred_p)) == 0) { 21116 err = sd_mhdioc_release(dev); 21117 } 21118 break; 21119 21120 case MHIOCSTATUS: 21121 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21122 if ((err = drv_priv(cred_p)) == 0) { 21123 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 21124 case 0: 21125 err = 0; 21126 break; 21127 case EACCES: 21128 *rval_p = 1; 21129 err = 0; 21130 break; 21131 default: 21132 err = EIO; 21133 break; 21134 } 21135 } 21136 break; 21137 21138 case MHIOCQRESERVE: 21139 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21140 if ((err = drv_priv(cred_p)) == 0) { 21141 err = sd_reserve_release(dev, SD_RESERVE); 21142 } 21143 break; 21144 21145 case MHIOCREREGISTERDEVID: 21146 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21147 if (drv_priv(cred_p) == EPERM) { 21148 err = EPERM; 21149 } else if (ISREMOVABLE(un) || ISCD(un)) { 21150 err = ENOTTY; 21151 } else { 21152 err = sd_mhdioc_register_devid(dev); 21153 } 21154 break; 21155 21156 case MHIOCGRP_INKEYS: 21157 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21158 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21159 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21160 err = ENOTSUP; 21161 } else { 21162 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21163 flag); 21164 } 21165 } 21166 break; 21167 21168 case MHIOCGRP_INRESV: 21169 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21170 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21171 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21172 err = ENOTSUP; 21173 } else { 21174 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21175 } 21176 } 21177 break; 21178 21179 case MHIOCGRP_REGISTER: 21180 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21181 if ((err = drv_priv(cred_p)) != EPERM) { 21182 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21183 err = ENOTSUP; 21184 } else if (arg != NULL) { 21185 mhioc_register_t reg; 21186 if (ddi_copyin((void *)arg, ®, 21187 sizeof (mhioc_register_t), flag) != 0) { 21188 err = EFAULT; 21189 } else { 21190 err = 21191 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21192 un, SD_SCSI3_REGISTER, 21193 (uchar_t *)®); 21194 } 21195 } 21196 } 21197 break; 21198 21199 case MHIOCGRP_RESERVE: 21200 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21201 if ((err = drv_priv(cred_p)) != EPERM) { 21202 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21203 err = ENOTSUP; 21204 } else if (arg != NULL) { 21205 mhioc_resv_desc_t resv_desc; 21206 if (ddi_copyin((void *)arg, &resv_desc, 21207 sizeof (mhioc_resv_desc_t), flag) != 0) { 21208 err = EFAULT; 21209 } else { 21210 err = 21211 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21212 un, SD_SCSI3_RESERVE, 21213 (uchar_t *)&resv_desc); 21214 } 21215 } 21216 } 21217 break; 21218 21219 case MHIOCGRP_PREEMPTANDABORT: 21220 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21221 if ((err = drv_priv(cred_p)) != EPERM) { 21222 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21223 err = ENOTSUP; 21224 } else if (arg != NULL) { 21225 mhioc_preemptandabort_t preempt_abort; 21226 if (ddi_copyin((void *)arg, &preempt_abort, 21227 sizeof (mhioc_preemptandabort_t), 21228 flag) != 0) { 21229 err = EFAULT; 21230 } else { 21231 err = 21232 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21233 un, SD_SCSI3_PREEMPTANDABORT, 21234 (uchar_t *)&preempt_abort); 21235 } 21236 } 21237 } 21238 break; 21239 21240 case MHIOCGRP_REGISTERANDIGNOREKEY: 21241 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21242 if ((err = drv_priv(cred_p)) != EPERM) { 21243 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21244 err = ENOTSUP; 21245 } else if (arg != NULL) { 21246 mhioc_registerandignorekey_t r_and_i; 21247 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21248 sizeof (mhioc_registerandignorekey_t), 21249 flag) != 0) { 21250 err = EFAULT; 21251 } else { 21252 err = 21253 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21254 un, SD_SCSI3_REGISTERANDIGNOREKEY, 21255 (uchar_t *)&r_and_i); 21256 } 21257 } 21258 } 21259 break; 21260 21261 case USCSICMD: 21262 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21263 cr = ddi_get_cred(); 21264 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21265 err = EPERM; 21266 } else { 21267 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 21268 } 21269 break; 21270 21271 case CDROMPAUSE: 21272 case CDROMRESUME: 21273 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21274 if (!ISCD(un)) { 21275 err = ENOTTY; 21276 } else { 21277 err = sr_pause_resume(dev, cmd); 21278 } 21279 break; 21280 21281 case CDROMPLAYMSF: 21282 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21283 if (!ISCD(un)) { 21284 err = ENOTTY; 21285 } else { 21286 err = sr_play_msf(dev, (caddr_t)arg, flag); 21287 } 21288 break; 21289 21290 case CDROMPLAYTRKIND: 21291 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21292 #if defined(__i386) || defined(__amd64) 21293 /* 21294 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21295 */ 21296 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21297 #else 21298 if (!ISCD(un)) { 21299 #endif 21300 err = ENOTTY; 21301 } else { 21302 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21303 } 21304 break; 21305 21306 case CDROMREADTOCHDR: 21307 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21308 if (!ISCD(un)) { 21309 err = ENOTTY; 21310 } else { 21311 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21312 } 21313 break; 21314 21315 case CDROMREADTOCENTRY: 21316 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21317 if (!ISCD(un)) { 21318 err = ENOTTY; 21319 } else { 21320 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21321 } 21322 break; 21323 21324 case CDROMSTOP: 21325 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21326 if (!ISCD(un)) { 21327 err = ENOTTY; 21328 } else { 21329 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 21330 SD_PATH_STANDARD); 21331 } 21332 break; 21333 21334 case CDROMSTART: 21335 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21336 if (!ISCD(un)) { 21337 err = ENOTTY; 21338 } else { 21339 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 21340 SD_PATH_STANDARD); 21341 } 21342 break; 21343 21344 case CDROMCLOSETRAY: 21345 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21346 if (!ISCD(un)) { 21347 err = ENOTTY; 21348 } else { 21349 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 21350 SD_PATH_STANDARD); 21351 } 21352 break; 21353 21354 case FDEJECT: /* for eject command */ 21355 case DKIOCEJECT: 21356 case CDROMEJECT: 21357 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21358 if (!ISREMOVABLE(un)) { 21359 err = ENOTTY; 21360 } else { 21361 err = sr_eject(dev); 21362 } 21363 break; 21364 21365 case CDROMVOLCTRL: 21366 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21367 if (!ISCD(un)) { 21368 err = ENOTTY; 21369 } else { 21370 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21371 } 21372 break; 21373 21374 case CDROMSUBCHNL: 21375 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21376 if (!ISCD(un)) { 21377 err = ENOTTY; 21378 } else { 21379 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21380 } 21381 break; 21382 21383 case CDROMREADMODE2: 21384 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21385 if (!ISCD(un)) { 21386 err = ENOTTY; 21387 } else if (un->un_f_cfg_is_atapi == TRUE) { 21388 /* 21389 * If the drive supports READ CD, use that instead of 21390 * switching the LBA size via a MODE SELECT 21391 * Block Descriptor 21392 */ 21393 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21394 } else { 21395 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21396 } 21397 break; 21398 21399 case CDROMREADMODE1: 21400 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21401 if (!ISCD(un)) { 21402 err = ENOTTY; 21403 } else { 21404 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21405 } 21406 break; 21407 21408 case CDROMREADOFFSET: 21409 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21410 if (!ISCD(un)) { 21411 err = ENOTTY; 21412 } else { 21413 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21414 flag); 21415 } 21416 break; 21417 21418 case CDROMSBLKMODE: 21419 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21420 /* 21421 * There is no means of changing block size in case of atapi 21422 * drives, thus return ENOTTY if drive type is atapi 21423 */ 21424 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21425 err = ENOTTY; 21426 } else if (un->un_f_mmc_cap == TRUE) { 21427 21428 /* 21429 * MMC Devices do not support changing the 21430 * logical block size 21431 * 21432 * Note: EINVAL is being returned instead of ENOTTY to 21433 * maintain consistancy with the original mmc 21434 * driver update. 21435 */ 21436 err = EINVAL; 21437 } else { 21438 mutex_enter(SD_MUTEX(un)); 21439 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21440 (un->un_ncmds_in_transport > 0)) { 21441 mutex_exit(SD_MUTEX(un)); 21442 err = EINVAL; 21443 } else { 21444 mutex_exit(SD_MUTEX(un)); 21445 err = sr_change_blkmode(dev, cmd, arg, flag); 21446 } 21447 } 21448 break; 21449 21450 case CDROMGBLKMODE: 21451 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21452 if (!ISCD(un)) { 21453 err = ENOTTY; 21454 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21455 (un->un_f_blockcount_is_valid != FALSE)) { 21456 /* 21457 * Drive is an ATAPI drive so return target block 21458 * size for ATAPI drives since we cannot change the 21459 * blocksize on ATAPI drives. Used primarily to detect 21460 * if an ATAPI cdrom is present. 21461 */ 21462 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21463 sizeof (int), flag) != 0) { 21464 err = EFAULT; 21465 } else { 21466 err = 0; 21467 } 21468 21469 } else { 21470 /* 21471 * Drive supports changing block sizes via a Mode 21472 * Select. 21473 */ 21474 err = sr_change_blkmode(dev, cmd, arg, flag); 21475 } 21476 break; 21477 21478 case CDROMGDRVSPEED: 21479 case CDROMSDRVSPEED: 21480 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21481 if (!ISCD(un)) { 21482 err = ENOTTY; 21483 } else if (un->un_f_mmc_cap == TRUE) { 21484 /* 21485 * Note: In the future the driver implementation 21486 * for getting and 21487 * setting cd speed should entail: 21488 * 1) If non-mmc try the Toshiba mode page 21489 * (sr_change_speed) 21490 * 2) If mmc but no support for Real Time Streaming try 21491 * the SET CD SPEED (0xBB) command 21492 * (sr_atapi_change_speed) 21493 * 3) If mmc and support for Real Time Streaming 21494 * try the GET PERFORMANCE and SET STREAMING 21495 * commands (not yet implemented, 4380808) 21496 */ 21497 /* 21498 * As per recent MMC spec, CD-ROM speed is variable 21499 * and changes with LBA. Since there is no such 21500 * things as drive speed now, fail this ioctl. 21501 * 21502 * Note: EINVAL is returned for consistancy of original 21503 * implementation which included support for getting 21504 * the drive speed of mmc devices but not setting 21505 * the drive speed. Thus EINVAL would be returned 21506 * if a set request was made for an mmc device. 21507 * We no longer support get or set speed for 21508 * mmc but need to remain consistant with regard 21509 * to the error code returned. 21510 */ 21511 err = EINVAL; 21512 } else if (un->un_f_cfg_is_atapi == TRUE) { 21513 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21514 } else { 21515 err = sr_change_speed(dev, cmd, arg, flag); 21516 } 21517 break; 21518 21519 case CDROMCDDA: 21520 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21521 if (!ISCD(un)) { 21522 err = ENOTTY; 21523 } else { 21524 err = sr_read_cdda(dev, (void *)arg, flag); 21525 } 21526 break; 21527 21528 case CDROMCDXA: 21529 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21530 if (!ISCD(un)) { 21531 err = ENOTTY; 21532 } else { 21533 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21534 } 21535 break; 21536 21537 case CDROMSUBCODE: 21538 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21539 if (!ISCD(un)) { 21540 err = ENOTTY; 21541 } else { 21542 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21543 } 21544 break; 21545 21546 case DKIOCPARTINFO: { 21547 /* 21548 * Return parameters describing the selected disk slice. 21549 * Note: this ioctl is for the intel platform only 21550 */ 21551 #if defined(__i386) || defined(__amd64) 21552 int part; 21553 21554 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21555 part = SDPART(dev); 21556 21557 /* don't check un_solaris_size for pN */ 21558 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21559 err = EIO; 21560 } else { 21561 struct part_info p; 21562 21563 p.p_start = (daddr_t)un->un_offset[part]; 21564 p.p_length = (int)un->un_map[part].dkl_nblk; 21565 #ifdef _MULTI_DATAMODEL 21566 switch (ddi_model_convert_from(flag & FMODELS)) { 21567 case DDI_MODEL_ILP32: 21568 { 21569 struct part_info32 p32; 21570 21571 p32.p_start = (daddr32_t)p.p_start; 21572 p32.p_length = p.p_length; 21573 if (ddi_copyout(&p32, (void *)arg, 21574 sizeof (p32), flag)) 21575 err = EFAULT; 21576 break; 21577 } 21578 21579 case DDI_MODEL_NONE: 21580 { 21581 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21582 flag)) 21583 err = EFAULT; 21584 break; 21585 } 21586 } 21587 #else /* ! _MULTI_DATAMODEL */ 21588 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21589 err = EFAULT; 21590 #endif /* _MULTI_DATAMODEL */ 21591 } 21592 #else 21593 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21594 err = ENOTTY; 21595 #endif 21596 break; 21597 } 21598 21599 case DKIOCG_PHYGEOM: { 21600 /* Return the driver's notion of the media physical geometry */ 21601 #if defined(__i386) || defined(__amd64) 21602 struct dk_geom disk_geom; 21603 struct dk_geom *dkgp = &disk_geom; 21604 21605 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21606 mutex_enter(SD_MUTEX(un)); 21607 21608 if (un->un_g.dkg_nhead != 0 && 21609 un->un_g.dkg_nsect != 0) { 21610 /* 21611 * We succeeded in getting a geometry, but 21612 * right now it is being reported as just the 21613 * Solaris fdisk partition, just like for 21614 * DKIOCGGEOM. We need to change that to be 21615 * correct for the entire disk now. 21616 */ 21617 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21618 dkgp->dkg_acyl = 0; 21619 dkgp->dkg_ncyl = un->un_blockcount / 21620 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21621 } else { 21622 bzero(dkgp, sizeof (struct dk_geom)); 21623 /* 21624 * This disk does not have a Solaris VTOC 21625 * so we must present a physical geometry 21626 * that will remain consistent regardless 21627 * of how the disk is used. This will ensure 21628 * that the geometry does not change regardless 21629 * of the fdisk partition type (ie. EFI, FAT32, 21630 * Solaris, etc). 21631 */ 21632 if (ISCD(un)) { 21633 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21634 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21635 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21636 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21637 } else { 21638 sd_convert_geometry(un->un_blockcount, dkgp); 21639 dkgp->dkg_acyl = 0; 21640 dkgp->dkg_ncyl = un->un_blockcount / 21641 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21642 } 21643 } 21644 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21645 21646 if (ddi_copyout(dkgp, (void *)arg, 21647 sizeof (struct dk_geom), flag)) { 21648 mutex_exit(SD_MUTEX(un)); 21649 err = EFAULT; 21650 } else { 21651 mutex_exit(SD_MUTEX(un)); 21652 err = 0; 21653 } 21654 #else 21655 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21656 err = ENOTTY; 21657 #endif 21658 break; 21659 } 21660 21661 case DKIOCG_VIRTGEOM: { 21662 /* Return the driver's notion of the media's logical geometry */ 21663 #if defined(__i386) || defined(__amd64) 21664 struct dk_geom disk_geom; 21665 struct dk_geom *dkgp = &disk_geom; 21666 21667 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21668 mutex_enter(SD_MUTEX(un)); 21669 /* 21670 * If there is no HBA geometry available, or 21671 * if the HBA returned us something that doesn't 21672 * really fit into an Int 13/function 8 geometry 21673 * result, just fail the ioctl. See PSARC 1998/313. 21674 */ 21675 if (un->un_lgeom.g_nhead == 0 || 21676 un->un_lgeom.g_nsect == 0 || 21677 un->un_lgeom.g_ncyl > 1024) { 21678 mutex_exit(SD_MUTEX(un)); 21679 err = EINVAL; 21680 } else { 21681 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21682 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21683 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21684 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21685 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21686 21687 if (ddi_copyout(dkgp, (void *)arg, 21688 sizeof (struct dk_geom), flag)) { 21689 mutex_exit(SD_MUTEX(un)); 21690 err = EFAULT; 21691 } else { 21692 mutex_exit(SD_MUTEX(un)); 21693 err = 0; 21694 } 21695 } 21696 #else 21697 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21698 err = ENOTTY; 21699 #endif 21700 break; 21701 } 21702 #ifdef SDDEBUG 21703 /* RESET/ABORTS testing ioctls */ 21704 case DKIOCRESET: { 21705 int reset_level; 21706 21707 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21708 err = EFAULT; 21709 } else { 21710 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21711 "reset_level = 0x%lx\n", reset_level); 21712 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21713 err = 0; 21714 } else { 21715 err = EIO; 21716 } 21717 } 21718 break; 21719 } 21720 21721 case DKIOCABORT: 21722 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21723 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21724 err = 0; 21725 } else { 21726 err = EIO; 21727 } 21728 break; 21729 #endif 21730 21731 #ifdef SD_FAULT_INJECTION 21732 /* SDIOC FaultInjection testing ioctls */ 21733 case SDIOCSTART: 21734 case SDIOCSTOP: 21735 case SDIOCINSERTPKT: 21736 case SDIOCINSERTXB: 21737 case SDIOCINSERTUN: 21738 case SDIOCINSERTARQ: 21739 case SDIOCPUSH: 21740 case SDIOCRETRIEVE: 21741 case SDIOCRUN: 21742 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21743 "SDIOC detected cmd:0x%X:\n", cmd); 21744 /* call error generator */ 21745 sd_faultinjection_ioctl(cmd, arg, un); 21746 err = 0; 21747 break; 21748 21749 #endif /* SD_FAULT_INJECTION */ 21750 21751 case DKIOCFLUSHWRITECACHE: 21752 { 21753 struct dk_callback *dkc = (struct dk_callback *)arg; 21754 21755 mutex_enter(SD_MUTEX(un)); 21756 if (un->un_f_sync_cache_unsupported || 21757 ! un->un_f_write_cache_enabled) { 21758 err = un->un_f_sync_cache_unsupported ? 21759 ENOTSUP : 0; 21760 mutex_exit(SD_MUTEX(un)); 21761 if ((flag & FKIOCTL) && dkc != NULL && 21762 dkc->dkc_callback != NULL) { 21763 (*dkc->dkc_callback)(dkc->dkc_cookie, 21764 err); 21765 /* 21766 * Did callback and reported error. 21767 * Since we did a callback, ioctl 21768 * should return 0. 21769 */ 21770 err = 0; 21771 } 21772 break; 21773 } 21774 mutex_exit(SD_MUTEX(un)); 21775 21776 if ((flag & FKIOCTL) && dkc != NULL && 21777 dkc->dkc_callback != NULL) { 21778 /* async SYNC CACHE request */ 21779 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 21780 } else { 21781 /* synchronous SYNC CACHE request */ 21782 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 21783 } 21784 } 21785 break; 21786 21787 default: 21788 err = ENOTTY; 21789 break; 21790 } 21791 mutex_enter(SD_MUTEX(un)); 21792 un->un_ncmds_in_driver--; 21793 ASSERT(un->un_ncmds_in_driver >= 0); 21794 mutex_exit(SD_MUTEX(un)); 21795 21796 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21797 return (err); 21798 } 21799 21800 21801 /* 21802 * Function: sd_uscsi_ioctl 21803 * 21804 * Description: This routine is the driver entry point for handling USCSI ioctl 21805 * requests (USCSICMD). 21806 * 21807 * Arguments: dev - the device number 21808 * arg - user provided scsi command 21809 * flag - this argument is a pass through to ddi_copyxxx() 21810 * directly from the mode argument of ioctl(). 21811 * 21812 * Return Code: code returned by sd_send_scsi_cmd 21813 * ENXIO 21814 * EFAULT 21815 * EAGAIN 21816 */ 21817 21818 static int 21819 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21820 { 21821 #ifdef _MULTI_DATAMODEL 21822 /* 21823 * For use when a 32 bit app makes a call into a 21824 * 64 bit ioctl 21825 */ 21826 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21827 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21828 model_t model; 21829 #endif /* _MULTI_DATAMODEL */ 21830 struct uscsi_cmd *scmd = NULL; 21831 struct sd_lun *un = NULL; 21832 enum uio_seg uioseg; 21833 char cdb[CDB_GROUP0]; 21834 int rval = 0; 21835 21836 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21837 return (ENXIO); 21838 } 21839 21840 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21841 21842 scmd = (struct uscsi_cmd *) 21843 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21844 21845 #ifdef _MULTI_DATAMODEL 21846 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21847 case DDI_MODEL_ILP32: 21848 { 21849 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21850 rval = EFAULT; 21851 goto done; 21852 } 21853 /* 21854 * Convert the ILP32 uscsi data from the 21855 * application to LP64 for internal use. 21856 */ 21857 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21858 break; 21859 } 21860 case DDI_MODEL_NONE: 21861 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21862 rval = EFAULT; 21863 goto done; 21864 } 21865 break; 21866 } 21867 #else /* ! _MULTI_DATAMODEL */ 21868 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21869 rval = EFAULT; 21870 goto done; 21871 } 21872 #endif /* _MULTI_DATAMODEL */ 21873 21874 scmd->uscsi_flags &= ~USCSI_NOINTR; 21875 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21876 if (un->un_f_format_in_progress == TRUE) { 21877 rval = EAGAIN; 21878 goto done; 21879 } 21880 21881 /* 21882 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21883 * we will have a valid cdb[0] to test. 21884 */ 21885 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21886 (cdb[0] == SCMD_FORMAT)) { 21887 SD_TRACE(SD_LOG_IOCTL, un, 21888 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21889 mutex_enter(SD_MUTEX(un)); 21890 un->un_f_format_in_progress = TRUE; 21891 mutex_exit(SD_MUTEX(un)); 21892 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21893 SD_PATH_STANDARD); 21894 mutex_enter(SD_MUTEX(un)); 21895 un->un_f_format_in_progress = FALSE; 21896 mutex_exit(SD_MUTEX(un)); 21897 } else { 21898 SD_TRACE(SD_LOG_IOCTL, un, 21899 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21900 /* 21901 * It's OK to fall into here even if the ddi_copyin() 21902 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21903 * does this same copyin and will return the EFAULT 21904 * if it fails. 21905 */ 21906 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21907 SD_PATH_STANDARD); 21908 } 21909 #ifdef _MULTI_DATAMODEL 21910 switch (model) { 21911 case DDI_MODEL_ILP32: 21912 /* 21913 * Convert back to ILP32 before copyout to the 21914 * application 21915 */ 21916 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21917 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21918 if (rval != 0) { 21919 rval = EFAULT; 21920 } 21921 } 21922 break; 21923 case DDI_MODEL_NONE: 21924 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21925 if (rval != 0) { 21926 rval = EFAULT; 21927 } 21928 } 21929 break; 21930 } 21931 #else /* ! _MULTI_DATAMODE */ 21932 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21933 if (rval != 0) { 21934 rval = EFAULT; 21935 } 21936 } 21937 #endif /* _MULTI_DATAMODE */ 21938 done: 21939 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21940 21941 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21942 21943 return (rval); 21944 } 21945 21946 21947 /* 21948 * Function: sd_dkio_ctrl_info 21949 * 21950 * Description: This routine is the driver entry point for handling controller 21951 * information ioctl requests (DKIOCINFO). 21952 * 21953 * Arguments: dev - the device number 21954 * arg - pointer to user provided dk_cinfo structure 21955 * specifying the controller type and attributes. 21956 * flag - this argument is a pass through to ddi_copyxxx() 21957 * directly from the mode argument of ioctl(). 21958 * 21959 * Return Code: 0 21960 * EFAULT 21961 * ENXIO 21962 */ 21963 21964 static int 21965 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21966 { 21967 struct sd_lun *un = NULL; 21968 struct dk_cinfo *info; 21969 dev_info_t *pdip; 21970 int lun, tgt; 21971 21972 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21973 return (ENXIO); 21974 } 21975 21976 info = (struct dk_cinfo *) 21977 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21978 21979 switch (un->un_ctype) { 21980 case CTYPE_CDROM: 21981 info->dki_ctype = DKC_CDROM; 21982 break; 21983 default: 21984 info->dki_ctype = DKC_SCSI_CCS; 21985 break; 21986 } 21987 pdip = ddi_get_parent(SD_DEVINFO(un)); 21988 info->dki_cnum = ddi_get_instance(pdip); 21989 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21990 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21991 } else { 21992 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21993 DK_DEVLEN - 1); 21994 } 21995 21996 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21997 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21998 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21999 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22000 22001 /* Unit Information */ 22002 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22003 info->dki_slave = ((tgt << 3) | lun); 22004 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22005 DK_DEVLEN - 1); 22006 info->dki_flags = DKI_FMTVOL; 22007 info->dki_partition = SDPART(dev); 22008 22009 /* Max Transfer size of this device in blocks */ 22010 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22011 info->dki_addr = 0; 22012 info->dki_space = 0; 22013 info->dki_prio = 0; 22014 info->dki_vec = 0; 22015 22016 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22017 kmem_free(info, sizeof (struct dk_cinfo)); 22018 return (EFAULT); 22019 } else { 22020 kmem_free(info, sizeof (struct dk_cinfo)); 22021 return (0); 22022 } 22023 } 22024 22025 22026 /* 22027 * Function: sd_get_media_info 22028 * 22029 * Description: This routine is the driver entry point for handling ioctl 22030 * requests for the media type or command set profile used by the 22031 * drive to operate on the media (DKIOCGMEDIAINFO). 22032 * 22033 * Arguments: dev - the device number 22034 * arg - pointer to user provided dk_minfo structure 22035 * specifying the media type, logical block size and 22036 * drive capacity. 22037 * flag - this argument is a pass through to ddi_copyxxx() 22038 * directly from the mode argument of ioctl(). 22039 * 22040 * Return Code: 0 22041 * EACCESS 22042 * EFAULT 22043 * ENXIO 22044 * EIO 22045 */ 22046 22047 static int 22048 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22049 { 22050 struct sd_lun *un = NULL; 22051 struct uscsi_cmd com; 22052 struct scsi_inquiry *sinq; 22053 struct dk_minfo media_info; 22054 u_longlong_t media_capacity; 22055 uint64_t capacity; 22056 uint_t lbasize; 22057 uchar_t *out_data; 22058 uchar_t *rqbuf; 22059 int rval = 0; 22060 int rtn; 22061 22062 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22063 (un->un_state == SD_STATE_OFFLINE)) { 22064 return (ENXIO); 22065 } 22066 22067 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22068 22069 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22070 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22071 22072 /* Issue a TUR to determine if the drive is ready with media present */ 22073 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 22074 if (rval == ENXIO) { 22075 goto done; 22076 } 22077 22078 /* Now get configuration data */ 22079 if (ISCD(un)) { 22080 media_info.dki_media_type = DK_CDROM; 22081 22082 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22083 if (un->un_f_mmc_cap == TRUE) { 22084 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 22085 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 22086 22087 if (rtn) { 22088 /* 22089 * Failed for other than an illegal request 22090 * or command not supported 22091 */ 22092 if ((com.uscsi_status == STATUS_CHECK) && 22093 (com.uscsi_rqstatus == STATUS_GOOD)) { 22094 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22095 (rqbuf[12] != 0x20)) { 22096 rval = EIO; 22097 goto done; 22098 } 22099 } 22100 } else { 22101 /* 22102 * The GET CONFIGURATION command succeeded 22103 * so set the media type according to the 22104 * returned data 22105 */ 22106 media_info.dki_media_type = out_data[6]; 22107 media_info.dki_media_type <<= 8; 22108 media_info.dki_media_type |= out_data[7]; 22109 } 22110 } 22111 } else { 22112 /* 22113 * The profile list is not available, so we attempt to identify 22114 * the media type based on the inquiry data 22115 */ 22116 sinq = un->un_sd->sd_inq; 22117 if (sinq->inq_qual == 0) { 22118 /* This is a direct access device */ 22119 media_info.dki_media_type = DK_FIXED_DISK; 22120 22121 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22122 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22123 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22124 media_info.dki_media_type = DK_ZIP; 22125 } else if ( 22126 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22127 media_info.dki_media_type = DK_JAZ; 22128 } 22129 } 22130 } else { 22131 /* Not a CD or direct access so return unknown media */ 22132 media_info.dki_media_type = DK_UNKNOWN; 22133 } 22134 } 22135 22136 /* Now read the capacity so we can provide the lbasize and capacity */ 22137 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 22138 SD_PATH_DIRECT)) { 22139 case 0: 22140 break; 22141 case EACCES: 22142 rval = EACCES; 22143 goto done; 22144 default: 22145 rval = EIO; 22146 goto done; 22147 } 22148 22149 media_info.dki_lbsize = lbasize; 22150 media_capacity = capacity; 22151 22152 /* 22153 * sd_send_scsi_READ_CAPACITY() reports capacity in 22154 * un->un_sys_blocksize chunks. So we need to convert it into 22155 * cap.lbasize chunks. 22156 */ 22157 media_capacity *= un->un_sys_blocksize; 22158 media_capacity /= lbasize; 22159 media_info.dki_capacity = media_capacity; 22160 22161 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22162 rval = EFAULT; 22163 /* Put goto. Anybody might add some code below in future */ 22164 goto done; 22165 } 22166 done: 22167 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22168 kmem_free(rqbuf, SENSE_LENGTH); 22169 return (rval); 22170 } 22171 22172 22173 /* 22174 * Function: sd_dkio_get_geometry 22175 * 22176 * Description: This routine is the driver entry point for handling user 22177 * requests to get the device geometry (DKIOCGGEOM). 22178 * 22179 * Arguments: dev - the device number 22180 * arg - pointer to user provided dk_geom structure specifying 22181 * the controller's notion of the current geometry. 22182 * flag - this argument is a pass through to ddi_copyxxx() 22183 * directly from the mode argument of ioctl(). 22184 * geom_validated - flag indicating if the device geometry has been 22185 * previously validated in the sdioctl routine. 22186 * 22187 * Return Code: 0 22188 * EFAULT 22189 * ENXIO 22190 * EIO 22191 */ 22192 22193 static int 22194 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 22195 { 22196 struct sd_lun *un = NULL; 22197 struct dk_geom *tmp_geom = NULL; 22198 int rval = 0; 22199 22200 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22201 return (ENXIO); 22202 } 22203 22204 #if defined(__i386) || defined(__amd64) 22205 if (un->un_solaris_size == 0) { 22206 return (EIO); 22207 } 22208 #endif 22209 if (geom_validated == FALSE) { 22210 /* 22211 * sd_validate_geometry does not spin a disk up 22212 * if it was spun down. We need to make sure it 22213 * is ready. 22214 */ 22215 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22216 return (rval); 22217 } 22218 mutex_enter(SD_MUTEX(un)); 22219 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 22220 mutex_exit(SD_MUTEX(un)); 22221 } 22222 if (rval) 22223 return (rval); 22224 22225 /* 22226 * Make a local copy of the soft state geometry to avoid some potential 22227 * race conditions associated with holding the mutex and updating the 22228 * write_reinstruct value 22229 */ 22230 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 22231 mutex_enter(SD_MUTEX(un)); 22232 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 22233 mutex_exit(SD_MUTEX(un)); 22234 22235 if (tmp_geom->dkg_write_reinstruct == 0) { 22236 tmp_geom->dkg_write_reinstruct = 22237 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 22238 sd_rot_delay) / (int)60000); 22239 } 22240 22241 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 22242 flag); 22243 if (rval != 0) { 22244 rval = EFAULT; 22245 } 22246 22247 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22248 return (rval); 22249 22250 } 22251 22252 22253 /* 22254 * Function: sd_dkio_set_geometry 22255 * 22256 * Description: This routine is the driver entry point for handling user 22257 * requests to set the device geometry (DKIOCSGEOM). The actual 22258 * device geometry is not updated, just the driver "notion" of it. 22259 * 22260 * Arguments: dev - the device number 22261 * arg - pointer to user provided dk_geom structure used to set 22262 * the controller's notion of the current geometry. 22263 * flag - this argument is a pass through to ddi_copyxxx() 22264 * directly from the mode argument of ioctl(). 22265 * 22266 * Return Code: 0 22267 * EFAULT 22268 * ENXIO 22269 * EIO 22270 */ 22271 22272 static int 22273 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 22274 { 22275 struct sd_lun *un = NULL; 22276 struct dk_geom *tmp_geom; 22277 struct dk_map *lp; 22278 int rval = 0; 22279 int i; 22280 22281 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22282 return (ENXIO); 22283 } 22284 22285 #if defined(__i386) || defined(__amd64) 22286 if (un->un_solaris_size == 0) { 22287 return (EIO); 22288 } 22289 #endif 22290 /* 22291 * We need to copy the user specified geometry into local 22292 * storage and then update the softstate. We don't want to hold 22293 * the mutex and copyin directly from the user to the soft state 22294 */ 22295 tmp_geom = (struct dk_geom *) 22296 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 22297 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 22298 if (rval != 0) { 22299 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22300 return (EFAULT); 22301 } 22302 22303 mutex_enter(SD_MUTEX(un)); 22304 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 22305 for (i = 0; i < NDKMAP; i++) { 22306 lp = &un->un_map[i]; 22307 un->un_offset[i] = 22308 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22309 #if defined(__i386) || defined(__amd64) 22310 un->un_offset[i] += un->un_solaris_offset; 22311 #endif 22312 } 22313 un->un_f_geometry_is_valid = FALSE; 22314 mutex_exit(SD_MUTEX(un)); 22315 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22316 22317 return (rval); 22318 } 22319 22320 22321 /* 22322 * Function: sd_dkio_get_partition 22323 * 22324 * Description: This routine is the driver entry point for handling user 22325 * requests to get the partition table (DKIOCGAPART). 22326 * 22327 * Arguments: dev - the device number 22328 * arg - pointer to user provided dk_allmap structure specifying 22329 * the controller's notion of the current partition table. 22330 * flag - this argument is a pass through to ddi_copyxxx() 22331 * directly from the mode argument of ioctl(). 22332 * geom_validated - flag indicating if the device geometry has been 22333 * previously validated in the sdioctl routine. 22334 * 22335 * Return Code: 0 22336 * EFAULT 22337 * ENXIO 22338 * EIO 22339 */ 22340 22341 static int 22342 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 22343 { 22344 struct sd_lun *un = NULL; 22345 int rval = 0; 22346 int size; 22347 22348 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22349 return (ENXIO); 22350 } 22351 22352 #if defined(__i386) || defined(__amd64) 22353 if (un->un_solaris_size == 0) { 22354 return (EIO); 22355 } 22356 #endif 22357 /* 22358 * Make sure the geometry is valid before getting the partition 22359 * information. 22360 */ 22361 mutex_enter(SD_MUTEX(un)); 22362 if (geom_validated == FALSE) { 22363 /* 22364 * sd_validate_geometry does not spin a disk up 22365 * if it was spun down. We need to make sure it 22366 * is ready before validating the geometry. 22367 */ 22368 mutex_exit(SD_MUTEX(un)); 22369 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22370 return (rval); 22371 } 22372 mutex_enter(SD_MUTEX(un)); 22373 22374 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22375 mutex_exit(SD_MUTEX(un)); 22376 return (rval); 22377 } 22378 } 22379 mutex_exit(SD_MUTEX(un)); 22380 22381 #ifdef _MULTI_DATAMODEL 22382 switch (ddi_model_convert_from(flag & FMODELS)) { 22383 case DDI_MODEL_ILP32: { 22384 struct dk_map32 dk_map32[NDKMAP]; 22385 int i; 22386 22387 for (i = 0; i < NDKMAP; i++) { 22388 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 22389 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 22390 } 22391 size = NDKMAP * sizeof (struct dk_map32); 22392 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 22393 if (rval != 0) { 22394 rval = EFAULT; 22395 } 22396 break; 22397 } 22398 case DDI_MODEL_NONE: 22399 size = NDKMAP * sizeof (struct dk_map); 22400 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22401 if (rval != 0) { 22402 rval = EFAULT; 22403 } 22404 break; 22405 } 22406 #else /* ! _MULTI_DATAMODEL */ 22407 size = NDKMAP * sizeof (struct dk_map); 22408 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22409 if (rval != 0) { 22410 rval = EFAULT; 22411 } 22412 #endif /* _MULTI_DATAMODEL */ 22413 return (rval); 22414 } 22415 22416 22417 /* 22418 * Function: sd_dkio_set_partition 22419 * 22420 * Description: This routine is the driver entry point for handling user 22421 * requests to set the partition table (DKIOCSAPART). The actual 22422 * device partition is not updated. 22423 * 22424 * Arguments: dev - the device number 22425 * arg - pointer to user provided dk_allmap structure used to set 22426 * the controller's notion of the partition table. 22427 * flag - this argument is a pass through to ddi_copyxxx() 22428 * directly from the mode argument of ioctl(). 22429 * 22430 * Return Code: 0 22431 * EINVAL 22432 * EFAULT 22433 * ENXIO 22434 * EIO 22435 */ 22436 22437 static int 22438 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22439 { 22440 struct sd_lun *un = NULL; 22441 struct dk_map dk_map[NDKMAP]; 22442 struct dk_map *lp; 22443 int rval = 0; 22444 int size; 22445 int i; 22446 #if defined(_SUNOS_VTOC_16) 22447 struct dkl_partition *vp; 22448 #endif 22449 22450 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22451 return (ENXIO); 22452 } 22453 22454 /* 22455 * Set the map for all logical partitions. We lock 22456 * the priority just to make sure an interrupt doesn't 22457 * come in while the map is half updated. 22458 */ 22459 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22460 mutex_enter(SD_MUTEX(un)); 22461 if (un->un_blockcount > DK_MAX_BLOCKS) { 22462 mutex_exit(SD_MUTEX(un)); 22463 return (ENOTSUP); 22464 } 22465 mutex_exit(SD_MUTEX(un)); 22466 if (un->un_solaris_size == 0) { 22467 return (EIO); 22468 } 22469 22470 #ifdef _MULTI_DATAMODEL 22471 switch (ddi_model_convert_from(flag & FMODELS)) { 22472 case DDI_MODEL_ILP32: { 22473 struct dk_map32 dk_map32[NDKMAP]; 22474 22475 size = NDKMAP * sizeof (struct dk_map32); 22476 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22477 if (rval != 0) { 22478 return (EFAULT); 22479 } 22480 for (i = 0; i < NDKMAP; i++) { 22481 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22482 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22483 } 22484 break; 22485 } 22486 case DDI_MODEL_NONE: 22487 size = NDKMAP * sizeof (struct dk_map); 22488 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22489 if (rval != 0) { 22490 return (EFAULT); 22491 } 22492 break; 22493 } 22494 #else /* ! _MULTI_DATAMODEL */ 22495 size = NDKMAP * sizeof (struct dk_map); 22496 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22497 if (rval != 0) { 22498 return (EFAULT); 22499 } 22500 #endif /* _MULTI_DATAMODEL */ 22501 22502 mutex_enter(SD_MUTEX(un)); 22503 /* Note: The size used in this bcopy is set based upon the data model */ 22504 bcopy(dk_map, un->un_map, size); 22505 #if defined(_SUNOS_VTOC_16) 22506 vp = (struct dkl_partition *)&(un->un_vtoc); 22507 #endif /* defined(_SUNOS_VTOC_16) */ 22508 for (i = 0; i < NDKMAP; i++) { 22509 lp = &un->un_map[i]; 22510 un->un_offset[i] = 22511 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22512 #if defined(_SUNOS_VTOC_16) 22513 vp->p_start = un->un_offset[i]; 22514 vp->p_size = lp->dkl_nblk; 22515 vp++; 22516 #endif /* defined(_SUNOS_VTOC_16) */ 22517 #if defined(__i386) || defined(__amd64) 22518 un->un_offset[i] += un->un_solaris_offset; 22519 #endif 22520 } 22521 mutex_exit(SD_MUTEX(un)); 22522 return (rval); 22523 } 22524 22525 22526 /* 22527 * Function: sd_dkio_get_vtoc 22528 * 22529 * Description: This routine is the driver entry point for handling user 22530 * requests to get the current volume table of contents 22531 * (DKIOCGVTOC). 22532 * 22533 * Arguments: dev - the device number 22534 * arg - pointer to user provided vtoc structure specifying 22535 * the current vtoc. 22536 * flag - this argument is a pass through to ddi_copyxxx() 22537 * directly from the mode argument of ioctl(). 22538 * geom_validated - flag indicating if the device geometry has been 22539 * previously validated in the sdioctl routine. 22540 * 22541 * Return Code: 0 22542 * EFAULT 22543 * ENXIO 22544 * EIO 22545 */ 22546 22547 static int 22548 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22549 { 22550 struct sd_lun *un = NULL; 22551 #if defined(_SUNOS_VTOC_8) 22552 struct vtoc user_vtoc; 22553 #endif /* defined(_SUNOS_VTOC_8) */ 22554 int rval = 0; 22555 22556 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22557 return (ENXIO); 22558 } 22559 22560 mutex_enter(SD_MUTEX(un)); 22561 if (geom_validated == FALSE) { 22562 /* 22563 * sd_validate_geometry does not spin a disk up 22564 * if it was spun down. We need to make sure it 22565 * is ready. 22566 */ 22567 mutex_exit(SD_MUTEX(un)); 22568 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22569 return (rval); 22570 } 22571 mutex_enter(SD_MUTEX(un)); 22572 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22573 mutex_exit(SD_MUTEX(un)); 22574 return (rval); 22575 } 22576 } 22577 22578 #if defined(_SUNOS_VTOC_8) 22579 sd_build_user_vtoc(un, &user_vtoc); 22580 mutex_exit(SD_MUTEX(un)); 22581 22582 #ifdef _MULTI_DATAMODEL 22583 switch (ddi_model_convert_from(flag & FMODELS)) { 22584 case DDI_MODEL_ILP32: { 22585 struct vtoc32 user_vtoc32; 22586 22587 vtoctovtoc32(user_vtoc, user_vtoc32); 22588 if (ddi_copyout(&user_vtoc32, (void *)arg, 22589 sizeof (struct vtoc32), flag)) { 22590 return (EFAULT); 22591 } 22592 break; 22593 } 22594 22595 case DDI_MODEL_NONE: 22596 if (ddi_copyout(&user_vtoc, (void *)arg, 22597 sizeof (struct vtoc), flag)) { 22598 return (EFAULT); 22599 } 22600 break; 22601 } 22602 #else /* ! _MULTI_DATAMODEL */ 22603 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22604 return (EFAULT); 22605 } 22606 #endif /* _MULTI_DATAMODEL */ 22607 22608 #elif defined(_SUNOS_VTOC_16) 22609 mutex_exit(SD_MUTEX(un)); 22610 22611 #ifdef _MULTI_DATAMODEL 22612 /* 22613 * The un_vtoc structure is a "struct dk_vtoc" which is always 22614 * 32-bit to maintain compatibility with existing on-disk 22615 * structures. Thus, we need to convert the structure when copying 22616 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22617 * program. If the target is a 32-bit program, then no conversion 22618 * is necessary. 22619 */ 22620 /* LINTED: logical expression always true: op "||" */ 22621 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22622 switch (ddi_model_convert_from(flag & FMODELS)) { 22623 case DDI_MODEL_ILP32: 22624 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22625 sizeof (un->un_vtoc), flag)) { 22626 return (EFAULT); 22627 } 22628 break; 22629 22630 case DDI_MODEL_NONE: { 22631 struct vtoc user_vtoc; 22632 22633 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22634 if (ddi_copyout(&user_vtoc, (void *)arg, 22635 sizeof (struct vtoc), flag)) { 22636 return (EFAULT); 22637 } 22638 break; 22639 } 22640 } 22641 #else /* ! _MULTI_DATAMODEL */ 22642 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22643 flag)) { 22644 return (EFAULT); 22645 } 22646 #endif /* _MULTI_DATAMODEL */ 22647 #else 22648 #error "No VTOC format defined." 22649 #endif 22650 22651 return (rval); 22652 } 22653 22654 static int 22655 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22656 { 22657 struct sd_lun *un = NULL; 22658 dk_efi_t user_efi; 22659 int rval = 0; 22660 void *buffer; 22661 22662 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22663 return (ENXIO); 22664 22665 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22666 return (EFAULT); 22667 22668 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22669 22670 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22671 (user_efi.dki_length > un->un_max_xfer_size)) 22672 return (EINVAL); 22673 22674 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22675 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22676 user_efi.dki_lba, SD_PATH_DIRECT); 22677 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22678 user_efi.dki_length, flag) != 0) 22679 rval = EFAULT; 22680 22681 kmem_free(buffer, user_efi.dki_length); 22682 return (rval); 22683 } 22684 22685 /* 22686 * Function: sd_build_user_vtoc 22687 * 22688 * Description: This routine populates a pass by reference variable with the 22689 * current volume table of contents. 22690 * 22691 * Arguments: un - driver soft state (unit) structure 22692 * user_vtoc - pointer to vtoc structure to be populated 22693 */ 22694 22695 static void 22696 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22697 { 22698 struct dk_map2 *lpart; 22699 struct dk_map *lmap; 22700 struct partition *vpart; 22701 int nblks; 22702 int i; 22703 22704 ASSERT(mutex_owned(SD_MUTEX(un))); 22705 22706 /* 22707 * Return vtoc structure fields in the provided VTOC area, addressed 22708 * by *vtoc. 22709 */ 22710 bzero(user_vtoc, sizeof (struct vtoc)); 22711 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22712 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22713 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22714 user_vtoc->v_sanity = VTOC_SANE; 22715 user_vtoc->v_version = un->un_vtoc.v_version; 22716 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22717 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22718 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22719 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22720 sizeof (un->un_vtoc.v_reserved)); 22721 /* 22722 * Convert partitioning information. 22723 * 22724 * Note the conversion from starting cylinder number 22725 * to starting sector number. 22726 */ 22727 lmap = un->un_map; 22728 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22729 vpart = user_vtoc->v_part; 22730 22731 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22732 22733 for (i = 0; i < V_NUMPAR; i++) { 22734 vpart->p_tag = lpart->p_tag; 22735 vpart->p_flag = lpart->p_flag; 22736 vpart->p_start = lmap->dkl_cylno * nblks; 22737 vpart->p_size = lmap->dkl_nblk; 22738 lmap++; 22739 lpart++; 22740 vpart++; 22741 22742 /* (4364927) */ 22743 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22744 } 22745 22746 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22747 } 22748 22749 static int 22750 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22751 { 22752 struct sd_lun *un = NULL; 22753 struct partition64 p64; 22754 int rval = 0; 22755 uint_t nparts; 22756 efi_gpe_t *partitions; 22757 efi_gpt_t *buffer; 22758 diskaddr_t gpe_lba; 22759 22760 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22761 return (ENXIO); 22762 } 22763 22764 if (ddi_copyin((const void *)arg, &p64, 22765 sizeof (struct partition64), flag)) { 22766 return (EFAULT); 22767 } 22768 22769 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22770 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22771 1, SD_PATH_DIRECT); 22772 if (rval != 0) 22773 goto done_error; 22774 22775 sd_swap_efi_gpt(buffer); 22776 22777 if ((rval = sd_validate_efi(buffer)) != 0) 22778 goto done_error; 22779 22780 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22781 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22782 if (p64.p_partno > nparts) { 22783 /* couldn't find it */ 22784 rval = ESRCH; 22785 goto done_error; 22786 } 22787 /* 22788 * if we're dealing with a partition that's out of the normal 22789 * 16K block, adjust accordingly 22790 */ 22791 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22792 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22793 gpe_lba, SD_PATH_DIRECT); 22794 if (rval) { 22795 goto done_error; 22796 } 22797 partitions = (efi_gpe_t *)buffer; 22798 22799 sd_swap_efi_gpe(nparts, partitions); 22800 22801 partitions += p64.p_partno; 22802 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22803 sizeof (struct uuid)); 22804 p64.p_start = partitions->efi_gpe_StartingLBA; 22805 p64.p_size = partitions->efi_gpe_EndingLBA - 22806 p64.p_start + 1; 22807 22808 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22809 rval = EFAULT; 22810 22811 done_error: 22812 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22813 return (rval); 22814 } 22815 22816 22817 /* 22818 * Function: sd_dkio_set_vtoc 22819 * 22820 * Description: This routine is the driver entry point for handling user 22821 * requests to set the current volume table of contents 22822 * (DKIOCSVTOC). 22823 * 22824 * Arguments: dev - the device number 22825 * arg - pointer to user provided vtoc structure used to set the 22826 * current vtoc. 22827 * flag - this argument is a pass through to ddi_copyxxx() 22828 * directly from the mode argument of ioctl(). 22829 * 22830 * Return Code: 0 22831 * EFAULT 22832 * ENXIO 22833 * EINVAL 22834 * ENOTSUP 22835 */ 22836 22837 static int 22838 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22839 { 22840 struct sd_lun *un = NULL; 22841 struct vtoc user_vtoc; 22842 int rval = 0; 22843 22844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22845 return (ENXIO); 22846 } 22847 22848 #if defined(__i386) || defined(__amd64) 22849 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22850 return (EINVAL); 22851 } 22852 #endif 22853 22854 #ifdef _MULTI_DATAMODEL 22855 switch (ddi_model_convert_from(flag & FMODELS)) { 22856 case DDI_MODEL_ILP32: { 22857 struct vtoc32 user_vtoc32; 22858 22859 if (ddi_copyin((const void *)arg, &user_vtoc32, 22860 sizeof (struct vtoc32), flag)) { 22861 return (EFAULT); 22862 } 22863 vtoc32tovtoc(user_vtoc32, user_vtoc); 22864 break; 22865 } 22866 22867 case DDI_MODEL_NONE: 22868 if (ddi_copyin((const void *)arg, &user_vtoc, 22869 sizeof (struct vtoc), flag)) { 22870 return (EFAULT); 22871 } 22872 break; 22873 } 22874 #else /* ! _MULTI_DATAMODEL */ 22875 if (ddi_copyin((const void *)arg, &user_vtoc, 22876 sizeof (struct vtoc), flag)) { 22877 return (EFAULT); 22878 } 22879 #endif /* _MULTI_DATAMODEL */ 22880 22881 mutex_enter(SD_MUTEX(un)); 22882 if (un->un_blockcount > DK_MAX_BLOCKS) { 22883 mutex_exit(SD_MUTEX(un)); 22884 return (ENOTSUP); 22885 } 22886 if (un->un_g.dkg_ncyl == 0) { 22887 mutex_exit(SD_MUTEX(un)); 22888 return (EINVAL); 22889 } 22890 22891 mutex_exit(SD_MUTEX(un)); 22892 sd_clear_efi(un); 22893 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22894 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22895 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22896 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22897 un->un_node_type, NULL); 22898 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22899 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22900 un->un_node_type, NULL); 22901 mutex_enter(SD_MUTEX(un)); 22902 22903 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22904 if ((rval = sd_write_label(dev)) == 0) { 22905 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22906 != 0) { 22907 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22908 "sd_dkio_set_vtoc: " 22909 "Failed validate geometry\n"); 22910 } 22911 } 22912 } 22913 22914 /* 22915 * If sd_build_label_vtoc, or sd_write_label failed above write the 22916 * devid anyway, what can it hurt? Also preserve the device id by 22917 * writing to the disk acyl for the case where a devid has been 22918 * fabricated. 22919 */ 22920 if (!ISREMOVABLE(un) && !ISCD(un) && 22921 (un->un_f_opt_fab_devid == TRUE)) { 22922 if (un->un_devid == NULL) { 22923 sd_register_devid(un, SD_DEVINFO(un), 22924 SD_TARGET_IS_UNRESERVED); 22925 } else { 22926 /* 22927 * The device id for this disk has been 22928 * fabricated. Fabricated device id's are 22929 * managed by storing them in the last 2 22930 * available sectors on the drive. The device 22931 * id must be preserved by writing it back out 22932 * to this location. 22933 */ 22934 if (sd_write_deviceid(un) != 0) { 22935 ddi_devid_free(un->un_devid); 22936 un->un_devid = NULL; 22937 } 22938 } 22939 } 22940 mutex_exit(SD_MUTEX(un)); 22941 return (rval); 22942 } 22943 22944 22945 /* 22946 * Function: sd_build_label_vtoc 22947 * 22948 * Description: This routine updates the driver soft state current volume table 22949 * of contents based on a user specified vtoc. 22950 * 22951 * Arguments: un - driver soft state (unit) structure 22952 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22953 * to update the driver soft state. 22954 * 22955 * Return Code: 0 22956 * EINVAL 22957 */ 22958 22959 static int 22960 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22961 { 22962 struct dk_map *lmap; 22963 struct partition *vpart; 22964 int nblks; 22965 #if defined(_SUNOS_VTOC_8) 22966 int ncyl; 22967 struct dk_map2 *lpart; 22968 #endif /* defined(_SUNOS_VTOC_8) */ 22969 int i; 22970 22971 ASSERT(mutex_owned(SD_MUTEX(un))); 22972 22973 /* Sanity-check the vtoc */ 22974 if (user_vtoc->v_sanity != VTOC_SANE || 22975 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22976 user_vtoc->v_nparts != V_NUMPAR) { 22977 return (EINVAL); 22978 } 22979 22980 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22981 if (nblks == 0) { 22982 return (EINVAL); 22983 } 22984 22985 #if defined(_SUNOS_VTOC_8) 22986 vpart = user_vtoc->v_part; 22987 for (i = 0; i < V_NUMPAR; i++) { 22988 if ((vpart->p_start % nblks) != 0) { 22989 return (EINVAL); 22990 } 22991 ncyl = vpart->p_start / nblks; 22992 ncyl += vpart->p_size / nblks; 22993 if ((vpart->p_size % nblks) != 0) { 22994 ncyl++; 22995 } 22996 if (ncyl > (int)un->un_g.dkg_ncyl) { 22997 return (EINVAL); 22998 } 22999 vpart++; 23000 } 23001 #endif /* defined(_SUNOS_VTOC_8) */ 23002 23003 /* Put appropriate vtoc structure fields into the disk label */ 23004 #if defined(_SUNOS_VTOC_16) 23005 /* 23006 * The vtoc is always a 32bit data structure to maintain the 23007 * on-disk format. Convert "in place" instead of bcopying it. 23008 */ 23009 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 23010 23011 /* 23012 * in the 16-slice vtoc, starting sectors are expressed in 23013 * numbers *relative* to the start of the Solaris fdisk partition. 23014 */ 23015 lmap = un->un_map; 23016 vpart = user_vtoc->v_part; 23017 23018 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 23019 lmap->dkl_cylno = vpart->p_start / nblks; 23020 lmap->dkl_nblk = vpart->p_size; 23021 } 23022 23023 #elif defined(_SUNOS_VTOC_8) 23024 23025 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 23026 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 23027 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 23028 23029 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 23030 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 23031 23032 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 23033 23034 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 23035 23036 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 23037 sizeof (un->un_vtoc.v_reserved)); 23038 23039 /* 23040 * Note the conversion from starting sector number 23041 * to starting cylinder number. 23042 * Return error if division results in a remainder. 23043 */ 23044 lmap = un->un_map; 23045 lpart = un->un_vtoc.v_part; 23046 vpart = user_vtoc->v_part; 23047 23048 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 23049 lpart->p_tag = vpart->p_tag; 23050 lpart->p_flag = vpart->p_flag; 23051 lmap->dkl_cylno = vpart->p_start / nblks; 23052 lmap->dkl_nblk = vpart->p_size; 23053 23054 lmap++; 23055 lpart++; 23056 vpart++; 23057 23058 /* (4387723) */ 23059 #ifdef _LP64 23060 if (user_vtoc->timestamp[i] > TIME32_MAX) { 23061 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 23062 } else { 23063 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 23064 } 23065 #else 23066 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 23067 #endif 23068 } 23069 23070 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 23071 #else 23072 #error "No VTOC format defined." 23073 #endif 23074 return (0); 23075 } 23076 23077 /* 23078 * Function: sd_clear_efi 23079 * 23080 * Description: This routine clears all EFI labels. 23081 * 23082 * Arguments: un - driver soft state (unit) structure 23083 * 23084 * Return Code: void 23085 */ 23086 23087 static void 23088 sd_clear_efi(struct sd_lun *un) 23089 { 23090 efi_gpt_t *gpt; 23091 uint_t lbasize; 23092 uint64_t cap; 23093 int rval; 23094 23095 ASSERT(!mutex_owned(SD_MUTEX(un))); 23096 23097 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 23098 23099 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 23100 goto done; 23101 } 23102 23103 sd_swap_efi_gpt(gpt); 23104 rval = sd_validate_efi(gpt); 23105 if (rval == 0) { 23106 /* clear primary */ 23107 bzero(gpt, sizeof (efi_gpt_t)); 23108 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 23109 SD_PATH_DIRECT))) { 23110 SD_INFO(SD_LOG_IO_PARTITION, un, 23111 "sd_clear_efi: clear primary label failed\n"); 23112 } 23113 } 23114 /* the backup */ 23115 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 23116 SD_PATH_DIRECT); 23117 if (rval) { 23118 goto done; 23119 } 23120 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 23121 cap - 1, SD_PATH_DIRECT)) != 0) { 23122 goto done; 23123 } 23124 sd_swap_efi_gpt(gpt); 23125 rval = sd_validate_efi(gpt); 23126 if (rval == 0) { 23127 /* clear backup */ 23128 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 23129 cap-1); 23130 bzero(gpt, sizeof (efi_gpt_t)); 23131 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 23132 cap-1, SD_PATH_DIRECT))) { 23133 SD_INFO(SD_LOG_IO_PARTITION, un, 23134 "sd_clear_efi: clear backup label failed\n"); 23135 } 23136 } 23137 23138 done: 23139 kmem_free(gpt, sizeof (efi_gpt_t)); 23140 } 23141 23142 /* 23143 * Function: sd_set_vtoc 23144 * 23145 * Description: This routine writes data to the appropriate positions 23146 * 23147 * Arguments: un - driver soft state (unit) structure 23148 * dkl - the data to be written 23149 * 23150 * Return: void 23151 */ 23152 23153 static int 23154 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 23155 { 23156 void *shadow_buf; 23157 uint_t label_addr; 23158 int sec; 23159 int blk; 23160 int head; 23161 int cyl; 23162 int rval; 23163 23164 #if defined(__i386) || defined(__amd64) 23165 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 23166 #else 23167 /* Write the primary label at block 0 of the solaris partition. */ 23168 label_addr = 0; 23169 #endif 23170 23171 if (NOT_DEVBSIZE(un)) { 23172 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 23173 /* 23174 * Read the target's first block. 23175 */ 23176 if ((rval = sd_send_scsi_READ(un, shadow_buf, 23177 un->un_tgt_blocksize, label_addr, 23178 SD_PATH_STANDARD)) != 0) { 23179 goto exit; 23180 } 23181 /* 23182 * Copy the contents of the label into the shadow buffer 23183 * which is of the size of target block size. 23184 */ 23185 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 23186 } 23187 23188 /* Write the primary label */ 23189 if (NOT_DEVBSIZE(un)) { 23190 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 23191 label_addr, SD_PATH_STANDARD); 23192 } else { 23193 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 23194 label_addr, SD_PATH_STANDARD); 23195 } 23196 if (rval != 0) { 23197 return (rval); 23198 } 23199 23200 /* 23201 * Calculate where the backup labels go. They are always on 23202 * the last alternate cylinder, but some older drives put them 23203 * on head 2 instead of the last head. They are always on the 23204 * first 5 odd sectors of the appropriate track. 23205 * 23206 * We have no choice at this point, but to believe that the 23207 * disk label is valid. Use the geometry of the disk 23208 * as described in the label. 23209 */ 23210 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 23211 head = dkl->dkl_nhead - 1; 23212 23213 /* 23214 * Write and verify the backup labels. Make sure we don't try to 23215 * write past the last cylinder. 23216 */ 23217 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 23218 blk = (daddr_t)( 23219 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 23220 (head * dkl->dkl_nsect) + sec); 23221 #if defined(__i386) || defined(__amd64) 23222 blk += un->un_solaris_offset; 23223 #endif 23224 if (NOT_DEVBSIZE(un)) { 23225 uint64_t tblk; 23226 /* 23227 * Need to read the block first for read modify write. 23228 */ 23229 tblk = (uint64_t)blk; 23230 blk = (int)((tblk * un->un_sys_blocksize) / 23231 un->un_tgt_blocksize); 23232 if ((rval = sd_send_scsi_READ(un, shadow_buf, 23233 un->un_tgt_blocksize, blk, 23234 SD_PATH_STANDARD)) != 0) { 23235 goto exit; 23236 } 23237 /* 23238 * Modify the shadow buffer with the label. 23239 */ 23240 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 23241 rval = sd_send_scsi_WRITE(un, shadow_buf, 23242 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 23243 } else { 23244 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 23245 blk, SD_PATH_STANDARD); 23246 SD_INFO(SD_LOG_IO_PARTITION, un, 23247 "sd_set_vtoc: wrote backup label %d\n", blk); 23248 } 23249 if (rval != 0) { 23250 goto exit; 23251 } 23252 } 23253 exit: 23254 if (NOT_DEVBSIZE(un)) { 23255 kmem_free(shadow_buf, un->un_tgt_blocksize); 23256 } 23257 return (rval); 23258 } 23259 23260 /* 23261 * Function: sd_clear_vtoc 23262 * 23263 * Description: This routine clears out the VTOC labels. 23264 * 23265 * Arguments: un - driver soft state (unit) structure 23266 * 23267 * Return: void 23268 */ 23269 23270 static void 23271 sd_clear_vtoc(struct sd_lun *un) 23272 { 23273 struct dk_label *dkl; 23274 23275 mutex_exit(SD_MUTEX(un)); 23276 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 23277 mutex_enter(SD_MUTEX(un)); 23278 /* 23279 * sd_set_vtoc uses these fields in order to figure out 23280 * where to overwrite the backup labels 23281 */ 23282 dkl->dkl_apc = un->un_g.dkg_apc; 23283 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 23284 dkl->dkl_acyl = un->un_g.dkg_acyl; 23285 dkl->dkl_nhead = un->un_g.dkg_nhead; 23286 dkl->dkl_nsect = un->un_g.dkg_nsect; 23287 mutex_exit(SD_MUTEX(un)); 23288 (void) sd_set_vtoc(un, dkl); 23289 kmem_free(dkl, sizeof (struct dk_label)); 23290 23291 mutex_enter(SD_MUTEX(un)); 23292 } 23293 23294 /* 23295 * Function: sd_write_label 23296 * 23297 * Description: This routine will validate and write the driver soft state vtoc 23298 * contents to the device. 23299 * 23300 * Arguments: dev - the device number 23301 * 23302 * Return Code: the code returned by sd_send_scsi_cmd() 23303 * 0 23304 * EINVAL 23305 * ENXIO 23306 * ENOMEM 23307 */ 23308 23309 static int 23310 sd_write_label(dev_t dev) 23311 { 23312 struct sd_lun *un; 23313 struct dk_label *dkl; 23314 short sum; 23315 short *sp; 23316 int i; 23317 int rval; 23318 23319 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23320 (un->un_state == SD_STATE_OFFLINE)) { 23321 return (ENXIO); 23322 } 23323 ASSERT(mutex_owned(SD_MUTEX(un))); 23324 mutex_exit(SD_MUTEX(un)); 23325 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 23326 mutex_enter(SD_MUTEX(un)); 23327 23328 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 23329 dkl->dkl_rpm = un->un_g.dkg_rpm; 23330 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 23331 dkl->dkl_apc = un->un_g.dkg_apc; 23332 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 23333 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 23334 dkl->dkl_acyl = un->un_g.dkg_acyl; 23335 dkl->dkl_nhead = un->un_g.dkg_nhead; 23336 dkl->dkl_nsect = un->un_g.dkg_nsect; 23337 23338 #if defined(_SUNOS_VTOC_8) 23339 dkl->dkl_obs1 = un->un_g.dkg_obs1; 23340 dkl->dkl_obs2 = un->un_g.dkg_obs2; 23341 dkl->dkl_obs3 = un->un_g.dkg_obs3; 23342 for (i = 0; i < NDKMAP; i++) { 23343 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 23344 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 23345 } 23346 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 23347 #elif defined(_SUNOS_VTOC_16) 23348 dkl->dkl_skew = un->un_dkg_skew; 23349 #else 23350 #error "No VTOC format defined." 23351 #endif 23352 23353 dkl->dkl_magic = DKL_MAGIC; 23354 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 23355 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 23356 23357 /* Construct checksum for the new disk label */ 23358 sum = 0; 23359 sp = (short *)dkl; 23360 i = sizeof (struct dk_label) / sizeof (short); 23361 while (i--) { 23362 sum ^= *sp++; 23363 } 23364 dkl->dkl_cksum = sum; 23365 23366 mutex_exit(SD_MUTEX(un)); 23367 23368 rval = sd_set_vtoc(un, dkl); 23369 exit: 23370 kmem_free(dkl, sizeof (struct dk_label)); 23371 mutex_enter(SD_MUTEX(un)); 23372 return (rval); 23373 } 23374 23375 static int 23376 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 23377 { 23378 struct sd_lun *un = NULL; 23379 dk_efi_t user_efi; 23380 int rval = 0; 23381 void *buffer; 23382 23383 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 23384 return (ENXIO); 23385 23386 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 23387 return (EFAULT); 23388 23389 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 23390 23391 if ((user_efi.dki_length % un->un_tgt_blocksize) || 23392 (user_efi.dki_length > un->un_max_xfer_size)) 23393 return (EINVAL); 23394 23395 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 23396 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 23397 rval = EFAULT; 23398 } else { 23399 /* 23400 * let's clear the vtoc labels and clear the softstate 23401 * vtoc. 23402 */ 23403 mutex_enter(SD_MUTEX(un)); 23404 if (un->un_vtoc.v_sanity == VTOC_SANE) { 23405 SD_TRACE(SD_LOG_IO_PARTITION, un, 23406 "sd_dkio_set_efi: CLEAR VTOC\n"); 23407 sd_clear_vtoc(un); 23408 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23409 mutex_exit(SD_MUTEX(un)); 23410 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 23411 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 23412 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 23413 S_IFBLK, 23414 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23415 un->un_node_type, NULL); 23416 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 23417 S_IFCHR, 23418 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23419 un->un_node_type, NULL); 23420 } else 23421 mutex_exit(SD_MUTEX(un)); 23422 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 23423 user_efi.dki_lba, SD_PATH_DIRECT); 23424 if (rval == 0) { 23425 mutex_enter(SD_MUTEX(un)); 23426 un->un_f_geometry_is_valid = FALSE; 23427 mutex_exit(SD_MUTEX(un)); 23428 } 23429 } 23430 kmem_free(buffer, user_efi.dki_length); 23431 return (rval); 23432 } 23433 23434 /* 23435 * Function: sd_dkio_get_mboot 23436 * 23437 * Description: This routine is the driver entry point for handling user 23438 * requests to get the current device mboot (DKIOCGMBOOT) 23439 * 23440 * Arguments: dev - the device number 23441 * arg - pointer to user provided mboot structure specifying 23442 * the current mboot. 23443 * flag - this argument is a pass through to ddi_copyxxx() 23444 * directly from the mode argument of ioctl(). 23445 * 23446 * Return Code: 0 23447 * EINVAL 23448 * EFAULT 23449 * ENXIO 23450 */ 23451 23452 static int 23453 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23454 { 23455 struct sd_lun *un; 23456 struct mboot *mboot; 23457 int rval; 23458 size_t buffer_size; 23459 23460 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23461 (un->un_state == SD_STATE_OFFLINE)) { 23462 return (ENXIO); 23463 } 23464 23465 #if defined(_SUNOS_VTOC_8) 23466 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23467 #elif defined(_SUNOS_VTOC_16) 23468 if (arg == NULL) { 23469 #endif 23470 return (EINVAL); 23471 } 23472 23473 /* 23474 * Read the mboot block, located at absolute block 0 on the target. 23475 */ 23476 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23477 23478 SD_TRACE(SD_LOG_IO_PARTITION, un, 23479 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23480 23481 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23482 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23483 SD_PATH_STANDARD)) == 0) { 23484 if (ddi_copyout(mboot, (void *)arg, 23485 sizeof (struct mboot), flag) != 0) { 23486 rval = EFAULT; 23487 } 23488 } 23489 kmem_free(mboot, buffer_size); 23490 return (rval); 23491 } 23492 23493 23494 /* 23495 * Function: sd_dkio_set_mboot 23496 * 23497 * Description: This routine is the driver entry point for handling user 23498 * requests to validate and set the device master boot 23499 * (DKIOCSMBOOT). 23500 * 23501 * Arguments: dev - the device number 23502 * arg - pointer to user provided mboot structure used to set the 23503 * master boot. 23504 * flag - this argument is a pass through to ddi_copyxxx() 23505 * directly from the mode argument of ioctl(). 23506 * 23507 * Return Code: 0 23508 * EINVAL 23509 * EFAULT 23510 * ENXIO 23511 */ 23512 23513 static int 23514 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23515 { 23516 struct sd_lun *un = NULL; 23517 struct mboot *mboot = NULL; 23518 int rval; 23519 ushort_t magic; 23520 23521 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23522 return (ENXIO); 23523 } 23524 23525 ASSERT(!mutex_owned(SD_MUTEX(un))); 23526 23527 #if defined(_SUNOS_VTOC_8) 23528 if (!ISREMOVABLE(un)) { 23529 return (EINVAL); 23530 } 23531 #endif 23532 23533 if (arg == NULL) { 23534 return (EINVAL); 23535 } 23536 23537 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23538 23539 if (ddi_copyin((const void *)arg, mboot, 23540 sizeof (struct mboot), flag) != 0) { 23541 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23542 return (EFAULT); 23543 } 23544 23545 /* Is this really a master boot record? */ 23546 magic = LE_16(mboot->signature); 23547 if (magic != MBB_MAGIC) { 23548 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23549 return (EINVAL); 23550 } 23551 23552 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23553 SD_PATH_STANDARD); 23554 23555 mutex_enter(SD_MUTEX(un)); 23556 #if defined(__i386) || defined(__amd64) 23557 if (rval == 0) { 23558 /* 23559 * mboot has been written successfully. 23560 * update the fdisk and vtoc tables in memory 23561 */ 23562 rval = sd_update_fdisk_and_vtoc(un); 23563 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23564 mutex_exit(SD_MUTEX(un)); 23565 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23566 return (rval); 23567 } 23568 } 23569 23570 /* 23571 * If the mboot write fails, write the devid anyway, what can it hurt? 23572 * Also preserve the device id by writing to the disk acyl for the case 23573 * where a devid has been fabricated. 23574 */ 23575 if (!ISREMOVABLE(un) && !ISCD(un) && 23576 (un->un_f_opt_fab_devid == TRUE)) { 23577 if (un->un_devid == NULL) { 23578 sd_register_devid(un, SD_DEVINFO(un), 23579 SD_TARGET_IS_UNRESERVED); 23580 } else { 23581 /* 23582 * The device id for this disk has been 23583 * fabricated. Fabricated device id's are 23584 * managed by storing them in the last 2 23585 * available sectors on the drive. The device 23586 * id must be preserved by writing it back out 23587 * to this location. 23588 */ 23589 if (sd_write_deviceid(un) != 0) { 23590 ddi_devid_free(un->un_devid); 23591 un->un_devid = NULL; 23592 } 23593 } 23594 } 23595 #else 23596 if (rval == 0) { 23597 /* 23598 * mboot has been written successfully. 23599 * set up the default geometry and VTOC 23600 */ 23601 if (un->un_blockcount <= DK_MAX_BLOCKS) 23602 sd_setup_default_geometry(un); 23603 } 23604 #endif 23605 mutex_exit(SD_MUTEX(un)); 23606 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23607 return (rval); 23608 } 23609 23610 23611 /* 23612 * Function: sd_setup_default_geometry 23613 * 23614 * Description: This local utility routine sets the default geometry as part of 23615 * setting the device mboot. 23616 * 23617 * Arguments: un - driver soft state (unit) structure 23618 * 23619 * Note: This may be redundant with sd_build_default_label. 23620 */ 23621 23622 static void 23623 sd_setup_default_geometry(struct sd_lun *un) 23624 { 23625 /* zero out the soft state geometry and partition table. */ 23626 bzero(&un->un_g, sizeof (struct dk_geom)); 23627 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23628 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23629 un->un_asciilabel[0] = '\0'; 23630 23631 /* 23632 * For the rpm, we use the minimum for the disk. 23633 * For the head, cyl and number of sector per track, 23634 * if the capacity <= 1GB, head = 64, sect = 32. 23635 * else head = 255, sect 63 23636 * Note: the capacity should be equal to C*H*S values. 23637 * This will cause some truncation of size due to 23638 * round off errors. For CD-ROMs, this truncation can 23639 * have adverse side effects, so returning ncyl and 23640 * nhead as 1. The nsect will overflow for most of 23641 * CD-ROMs as nsect is of type ushort. 23642 */ 23643 if (ISCD(un)) { 23644 un->un_g.dkg_ncyl = 1; 23645 un->un_g.dkg_nhead = 1; 23646 un->un_g.dkg_nsect = un->un_blockcount; 23647 } else { 23648 if (un->un_blockcount <= 0x1000) { 23649 /* Needed for unlabeled SCSI floppies. */ 23650 un->un_g.dkg_nhead = 2; 23651 un->un_g.dkg_ncyl = 80; 23652 un->un_g.dkg_pcyl = 80; 23653 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23654 } else if (un->un_blockcount <= 0x200000) { 23655 un->un_g.dkg_nhead = 64; 23656 un->un_g.dkg_nsect = 32; 23657 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23658 } else { 23659 un->un_g.dkg_nhead = 255; 23660 un->un_g.dkg_nsect = 63; 23661 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23662 } 23663 un->un_blockcount = un->un_g.dkg_ncyl * 23664 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23665 } 23666 un->un_g.dkg_acyl = 0; 23667 un->un_g.dkg_bcyl = 0; 23668 un->un_g.dkg_intrlv = 1; 23669 un->un_g.dkg_rpm = 200; 23670 un->un_g.dkg_read_reinstruct = 0; 23671 un->un_g.dkg_write_reinstruct = 0; 23672 if (un->un_g.dkg_pcyl == 0) { 23673 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23674 } 23675 23676 un->un_map['a'-'a'].dkl_cylno = 0; 23677 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23678 un->un_map['c'-'a'].dkl_cylno = 0; 23679 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23680 un->un_f_geometry_is_valid = FALSE; 23681 } 23682 23683 23684 #if defined(__i386) || defined(__amd64) 23685 /* 23686 * Function: sd_update_fdisk_and_vtoc 23687 * 23688 * Description: This local utility routine updates the device fdisk and vtoc 23689 * as part of setting the device mboot. 23690 * 23691 * Arguments: un - driver soft state (unit) structure 23692 * 23693 * Return Code: 0 for success or errno-type return code. 23694 * 23695 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23696 * these did exist seperately in x86 sd.c!!! 23697 */ 23698 23699 static int 23700 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23701 { 23702 static char labelstring[128]; 23703 static char buf[256]; 23704 char *label = 0; 23705 int count; 23706 int label_rc = 0; 23707 int gvalid = un->un_f_geometry_is_valid; 23708 int fdisk_rval; 23709 int lbasize; 23710 int capacity; 23711 23712 ASSERT(mutex_owned(SD_MUTEX(un))); 23713 23714 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23715 return (EINVAL); 23716 } 23717 23718 if (un->un_f_blockcount_is_valid == FALSE) { 23719 return (EINVAL); 23720 } 23721 23722 #if defined(_SUNOS_VTOC_16) 23723 /* 23724 * Set up the "whole disk" fdisk partition; this should always 23725 * exist, regardless of whether the disk contains an fdisk table 23726 * or vtoc. 23727 */ 23728 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23729 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23730 #endif /* defined(_SUNOS_VTOC_16) */ 23731 23732 /* 23733 * copy the lbasize and capacity so that if they're 23734 * reset while we're not holding the SD_MUTEX(un), we will 23735 * continue to use valid values after the SD_MUTEX(un) is 23736 * reacquired. 23737 */ 23738 lbasize = un->un_tgt_blocksize; 23739 capacity = un->un_blockcount; 23740 23741 /* 23742 * refresh the logical and physical geometry caches. 23743 * (data from mode sense format/rigid disk geometry pages, 23744 * and scsi_ifgetcap("geometry"). 23745 */ 23746 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23747 23748 /* 23749 * Only DIRECT ACCESS devices will have Sun labels. 23750 * CD's supposedly have a Sun label, too 23751 */ 23752 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23753 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23754 SD_PATH_DIRECT); 23755 if (fdisk_rval == SD_CMD_FAILURE) { 23756 ASSERT(mutex_owned(SD_MUTEX(un))); 23757 return (EIO); 23758 } 23759 23760 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23761 ASSERT(mutex_owned(SD_MUTEX(un))); 23762 return (EACCES); 23763 } 23764 23765 if (un->un_solaris_size <= DK_LABEL_LOC) { 23766 /* 23767 * Found fdisk table but no Solaris partition entry, 23768 * so don't call sd_uselabel() and don't create 23769 * a default label. 23770 */ 23771 label_rc = 0; 23772 un->un_f_geometry_is_valid = TRUE; 23773 goto no_solaris_partition; 23774 } 23775 23776 #if defined(_SUNOS_VTOC_8) 23777 label = (char *)un->un_asciilabel; 23778 #elif defined(_SUNOS_VTOC_16) 23779 label = (char *)un->un_vtoc.v_asciilabel; 23780 #else 23781 #error "No VTOC format defined." 23782 #endif 23783 } else if (capacity < 0) { 23784 ASSERT(mutex_owned(SD_MUTEX(un))); 23785 return (EINVAL); 23786 } 23787 23788 /* 23789 * For Removable media We reach here if we have found a 23790 * SOLARIS PARTITION. 23791 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23792 * PARTITION has changed from the previous one, hence we will setup a 23793 * default VTOC in this case. 23794 */ 23795 if (un->un_f_geometry_is_valid == FALSE) { 23796 sd_build_default_label(un); 23797 label_rc = 0; 23798 } 23799 23800 no_solaris_partition: 23801 if ((!ISREMOVABLE(un) || 23802 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23803 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23804 /* 23805 * Print out a message indicating who and what we are. 23806 * We do this only when we happen to really validate the 23807 * geometry. We may call sd_validate_geometry() at other 23808 * times, ioctl()'s like Get VTOC in which case we 23809 * don't want to print the label. 23810 * If the geometry is valid, print the label string, 23811 * else print vendor and product info, if available 23812 */ 23813 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23814 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23815 } else { 23816 mutex_enter(&sd_label_mutex); 23817 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23818 labelstring); 23819 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23820 &labelstring[64]); 23821 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23822 labelstring, &labelstring[64]); 23823 if (un->un_f_blockcount_is_valid == TRUE) { 23824 (void) sprintf(&buf[strlen(buf)], 23825 ", %" PRIu64 " %u byte blocks\n", 23826 un->un_blockcount, 23827 un->un_tgt_blocksize); 23828 } else { 23829 (void) sprintf(&buf[strlen(buf)], 23830 ", (unknown capacity)\n"); 23831 } 23832 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23833 mutex_exit(&sd_label_mutex); 23834 } 23835 } 23836 23837 #if defined(_SUNOS_VTOC_16) 23838 /* 23839 * If we have valid geometry, set up the remaining fdisk partitions. 23840 * Note that dkl_cylno is not used for the fdisk map entries, so 23841 * we set it to an entirely bogus value. 23842 */ 23843 for (count = 0; count < FD_NUMPART; count++) { 23844 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23845 un->un_map[FDISK_P1 + count].dkl_nblk = 23846 un->un_fmap[count].fmap_nblk; 23847 un->un_offset[FDISK_P1 + count] = 23848 un->un_fmap[count].fmap_start; 23849 } 23850 #endif 23851 23852 for (count = 0; count < NDKMAP; count++) { 23853 #if defined(_SUNOS_VTOC_8) 23854 struct dk_map *lp = &un->un_map[count]; 23855 un->un_offset[count] = 23856 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23857 #elif defined(_SUNOS_VTOC_16) 23858 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23859 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23860 #else 23861 #error "No VTOC format defined." 23862 #endif 23863 } 23864 23865 ASSERT(mutex_owned(SD_MUTEX(un))); 23866 return (label_rc); 23867 } 23868 #endif 23869 23870 23871 /* 23872 * Function: sd_check_media 23873 * 23874 * Description: This utility routine implements the functionality for the 23875 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23876 * driver state changes from that specified by the user 23877 * (inserted or ejected). For example, if the user specifies 23878 * DKIO_EJECTED and the current media state is inserted this 23879 * routine will immediately return DKIO_INSERTED. However, if the 23880 * current media state is not inserted the user thread will be 23881 * blocked until the drive state changes. If DKIO_NONE is specified 23882 * the user thread will block until a drive state change occurs. 23883 * 23884 * Arguments: dev - the device number 23885 * state - user pointer to a dkio_state, updated with the current 23886 * drive state at return. 23887 * 23888 * Return Code: ENXIO 23889 * EIO 23890 * EAGAIN 23891 * EINTR 23892 */ 23893 23894 static int 23895 sd_check_media(dev_t dev, enum dkio_state state) 23896 { 23897 struct sd_lun *un = NULL; 23898 enum dkio_state prev_state; 23899 opaque_t token = NULL; 23900 int rval = 0; 23901 23902 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23903 return (ENXIO); 23904 } 23905 23906 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23907 23908 mutex_enter(SD_MUTEX(un)); 23909 23910 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23911 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23912 23913 prev_state = un->un_mediastate; 23914 23915 /* is there anything to do? */ 23916 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23917 /* 23918 * submit the request to the scsi_watch service; 23919 * scsi_media_watch_cb() does the real work 23920 */ 23921 mutex_exit(SD_MUTEX(un)); 23922 23923 /* 23924 * This change handles the case where a scsi watch request is 23925 * added to a device that is powered down. To accomplish this 23926 * we power up the device before adding the scsi watch request, 23927 * since the scsi watch sends a TUR directly to the device 23928 * which the device cannot handle if it is powered down. 23929 */ 23930 if (sd_pm_entry(un) != DDI_SUCCESS) { 23931 mutex_enter(SD_MUTEX(un)); 23932 goto done; 23933 } 23934 23935 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23936 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23937 (caddr_t)dev); 23938 23939 sd_pm_exit(un); 23940 23941 mutex_enter(SD_MUTEX(un)); 23942 if (token == NULL) { 23943 rval = EAGAIN; 23944 goto done; 23945 } 23946 23947 /* 23948 * This is a special case IOCTL that doesn't return 23949 * until the media state changes. Routine sdpower 23950 * knows about and handles this so don't count it 23951 * as an active cmd in the driver, which would 23952 * keep the device busy to the pm framework. 23953 * If the count isn't decremented the device can't 23954 * be powered down. 23955 */ 23956 un->un_ncmds_in_driver--; 23957 ASSERT(un->un_ncmds_in_driver >= 0); 23958 23959 /* 23960 * if a prior request had been made, this will be the same 23961 * token, as scsi_watch was designed that way. 23962 */ 23963 un->un_swr_token = token; 23964 un->un_specified_mediastate = state; 23965 23966 /* 23967 * now wait for media change 23968 * we will not be signalled unless mediastate == state but it is 23969 * still better to test for this condition, since there is a 23970 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23971 */ 23972 SD_TRACE(SD_LOG_COMMON, un, 23973 "sd_check_media: waiting for media state change\n"); 23974 while (un->un_mediastate == state) { 23975 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23976 SD_TRACE(SD_LOG_COMMON, un, 23977 "sd_check_media: waiting for media state " 23978 "was interrupted\n"); 23979 un->un_ncmds_in_driver++; 23980 rval = EINTR; 23981 goto done; 23982 } 23983 SD_TRACE(SD_LOG_COMMON, un, 23984 "sd_check_media: received signal, state=%x\n", 23985 un->un_mediastate); 23986 } 23987 /* 23988 * Inc the counter to indicate the device once again 23989 * has an active outstanding cmd. 23990 */ 23991 un->un_ncmds_in_driver++; 23992 } 23993 23994 /* invalidate geometry */ 23995 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23996 sr_ejected(un); 23997 } 23998 23999 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 24000 uint64_t capacity; 24001 uint_t lbasize; 24002 24003 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 24004 mutex_exit(SD_MUTEX(un)); 24005 /* 24006 * Since the following routines use SD_PATH_DIRECT, we must 24007 * call PM directly before the upcoming disk accesses. This 24008 * may cause the disk to be power/spin up. 24009 */ 24010 24011 if (sd_pm_entry(un) == DDI_SUCCESS) { 24012 rval = sd_send_scsi_READ_CAPACITY(un, 24013 &capacity, 24014 &lbasize, SD_PATH_DIRECT); 24015 if (rval != 0) { 24016 sd_pm_exit(un); 24017 mutex_enter(SD_MUTEX(un)); 24018 goto done; 24019 } 24020 } else { 24021 rval = EIO; 24022 mutex_enter(SD_MUTEX(un)); 24023 goto done; 24024 } 24025 mutex_enter(SD_MUTEX(un)); 24026 24027 sd_update_block_info(un, lbasize, capacity); 24028 24029 un->un_f_geometry_is_valid = FALSE; 24030 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 24031 24032 mutex_exit(SD_MUTEX(un)); 24033 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 24034 SD_PATH_DIRECT); 24035 sd_pm_exit(un); 24036 24037 mutex_enter(SD_MUTEX(un)); 24038 } 24039 done: 24040 un->un_f_watcht_stopped = FALSE; 24041 if (un->un_swr_token) { 24042 /* 24043 * Use of this local token and the mutex ensures that we avoid 24044 * some race conditions associated with terminating the 24045 * scsi watch. 24046 */ 24047 token = un->un_swr_token; 24048 un->un_swr_token = (opaque_t)NULL; 24049 mutex_exit(SD_MUTEX(un)); 24050 (void) scsi_watch_request_terminate(token, 24051 SCSI_WATCH_TERMINATE_WAIT); 24052 mutex_enter(SD_MUTEX(un)); 24053 } 24054 24055 /* 24056 * Update the capacity kstat value, if no media previously 24057 * (capacity kstat is 0) and a media has been inserted 24058 * (un_f_blockcount_is_valid == TRUE) 24059 * This is a more generic way then checking for ISREMOVABLE. 24060 */ 24061 if (un->un_errstats) { 24062 struct sd_errstats *stp = NULL; 24063 24064 stp = (struct sd_errstats *)un->un_errstats->ks_data; 24065 if ((stp->sd_capacity.value.ui64 == 0) && 24066 (un->un_f_blockcount_is_valid == TRUE)) { 24067 stp->sd_capacity.value.ui64 = 24068 (uint64_t)((uint64_t)un->un_blockcount * 24069 un->un_sys_blocksize); 24070 } 24071 } 24072 mutex_exit(SD_MUTEX(un)); 24073 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 24074 return (rval); 24075 } 24076 24077 24078 /* 24079 * Function: sd_delayed_cv_broadcast 24080 * 24081 * Description: Delayed cv_broadcast to allow for target to recover from media 24082 * insertion. 24083 * 24084 * Arguments: arg - driver soft state (unit) structure 24085 */ 24086 24087 static void 24088 sd_delayed_cv_broadcast(void *arg) 24089 { 24090 struct sd_lun *un = arg; 24091 24092 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 24093 24094 mutex_enter(SD_MUTEX(un)); 24095 un->un_dcvb_timeid = NULL; 24096 cv_broadcast(&un->un_state_cv); 24097 mutex_exit(SD_MUTEX(un)); 24098 } 24099 24100 24101 /* 24102 * Function: sd_media_watch_cb 24103 * 24104 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 24105 * routine processes the TUR sense data and updates the driver 24106 * state if a transition has occurred. The user thread 24107 * (sd_check_media) is then signalled. 24108 * 24109 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24110 * among multiple watches that share this callback function 24111 * resultp - scsi watch facility result packet containing scsi 24112 * packet, status byte and sense data 24113 * 24114 * Return Code: 0 for success, -1 for failure 24115 */ 24116 24117 static int 24118 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24119 { 24120 struct sd_lun *un; 24121 struct scsi_status *statusp = resultp->statusp; 24122 struct scsi_extended_sense *sensep = resultp->sensep; 24123 enum dkio_state state = DKIO_NONE; 24124 dev_t dev = (dev_t)arg; 24125 uchar_t actual_sense_length; 24126 24127 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24128 return (-1); 24129 } 24130 actual_sense_length = resultp->actual_sense_length; 24131 24132 mutex_enter(SD_MUTEX(un)); 24133 SD_TRACE(SD_LOG_COMMON, un, 24134 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 24135 *((char *)statusp), (void *)sensep, actual_sense_length); 24136 24137 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 24138 un->un_mediastate = DKIO_DEV_GONE; 24139 cv_broadcast(&un->un_state_cv); 24140 mutex_exit(SD_MUTEX(un)); 24141 24142 return (0); 24143 } 24144 24145 /* 24146 * If there was a check condition then sensep points to valid sense data 24147 * If status was not a check condition but a reservation or busy status 24148 * then the new state is DKIO_NONE 24149 */ 24150 if (sensep != NULL) { 24151 SD_INFO(SD_LOG_COMMON, un, 24152 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24153 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 24154 /* This routine only uses up to 13 bytes of sense data. */ 24155 if (actual_sense_length >= 13) { 24156 if (sensep->es_key == KEY_UNIT_ATTENTION) { 24157 if (sensep->es_add_code == 0x28) { 24158 state = DKIO_INSERTED; 24159 } 24160 } else { 24161 /* 24162 * if 02/04/02 means that the host 24163 * should send start command. Explicitly 24164 * leave the media state as is 24165 * (inserted) as the media is inserted 24166 * and host has stopped device for PM 24167 * reasons. Upon next true read/write 24168 * to this media will bring the 24169 * device to the right state good for 24170 * media access. 24171 */ 24172 if ((sensep->es_key == KEY_NOT_READY) && 24173 (sensep->es_add_code == 0x3a)) { 24174 state = DKIO_EJECTED; 24175 } 24176 24177 /* 24178 * If the drivge is busy with an operation 24179 * or long write, keep the media in an 24180 * inserted state. 24181 */ 24182 24183 if ((sensep->es_key == KEY_NOT_READY) && 24184 (sensep->es_add_code == 0x04) && 24185 ((sensep->es_qual_code == 0x02) || 24186 (sensep->es_qual_code == 0x07) || 24187 (sensep->es_qual_code == 0x08))) { 24188 state = DKIO_INSERTED; 24189 } 24190 } 24191 } 24192 } else if ((*((char *)statusp) == STATUS_GOOD) && 24193 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24194 state = DKIO_INSERTED; 24195 } 24196 24197 SD_TRACE(SD_LOG_COMMON, un, 24198 "sd_media_watch_cb: state=%x, specified=%x\n", 24199 state, un->un_specified_mediastate); 24200 24201 /* 24202 * now signal the waiting thread if this is *not* the specified state; 24203 * delay the signal if the state is DKIO_INSERTED to allow the target 24204 * to recover 24205 */ 24206 if (state != un->un_specified_mediastate) { 24207 un->un_mediastate = state; 24208 if (state == DKIO_INSERTED) { 24209 /* 24210 * delay the signal to give the drive a chance 24211 * to do what it apparently needs to do 24212 */ 24213 SD_TRACE(SD_LOG_COMMON, un, 24214 "sd_media_watch_cb: delayed cv_broadcast\n"); 24215 if (un->un_dcvb_timeid == NULL) { 24216 un->un_dcvb_timeid = 24217 timeout(sd_delayed_cv_broadcast, un, 24218 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24219 } 24220 } else { 24221 SD_TRACE(SD_LOG_COMMON, un, 24222 "sd_media_watch_cb: immediate cv_broadcast\n"); 24223 cv_broadcast(&un->un_state_cv); 24224 } 24225 } 24226 mutex_exit(SD_MUTEX(un)); 24227 return (0); 24228 } 24229 24230 24231 /* 24232 * Function: sd_dkio_get_temp 24233 * 24234 * Description: This routine is the driver entry point for handling ioctl 24235 * requests to get the disk temperature. 24236 * 24237 * Arguments: dev - the device number 24238 * arg - pointer to user provided dk_temperature structure. 24239 * flag - this argument is a pass through to ddi_copyxxx() 24240 * directly from the mode argument of ioctl(). 24241 * 24242 * Return Code: 0 24243 * EFAULT 24244 * ENXIO 24245 * EAGAIN 24246 */ 24247 24248 static int 24249 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24250 { 24251 struct sd_lun *un = NULL; 24252 struct dk_temperature *dktemp = NULL; 24253 uchar_t *temperature_page; 24254 int rval = 0; 24255 int path_flag = SD_PATH_STANDARD; 24256 24257 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24258 return (ENXIO); 24259 } 24260 24261 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24262 24263 /* copyin the disk temp argument to get the user flags */ 24264 if (ddi_copyin((void *)arg, dktemp, 24265 sizeof (struct dk_temperature), flag) != 0) { 24266 rval = EFAULT; 24267 goto done; 24268 } 24269 24270 /* Initialize the temperature to invalid. */ 24271 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24272 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24273 24274 /* 24275 * Note: Investigate removing the "bypass pm" semantic. 24276 * Can we just bypass PM always? 24277 */ 24278 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24279 path_flag = SD_PATH_DIRECT; 24280 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24281 mutex_enter(&un->un_pm_mutex); 24282 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24283 /* 24284 * If DKT_BYPASS_PM is set, and the drive happens to be 24285 * in low power mode, we can not wake it up, Need to 24286 * return EAGAIN. 24287 */ 24288 mutex_exit(&un->un_pm_mutex); 24289 rval = EAGAIN; 24290 goto done; 24291 } else { 24292 /* 24293 * Indicate to PM the device is busy. This is required 24294 * to avoid a race - i.e. the ioctl is issuing a 24295 * command and the pm framework brings down the device 24296 * to low power mode (possible power cut-off on some 24297 * platforms). 24298 */ 24299 mutex_exit(&un->un_pm_mutex); 24300 if (sd_pm_entry(un) != DDI_SUCCESS) { 24301 rval = EAGAIN; 24302 goto done; 24303 } 24304 } 24305 } 24306 24307 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24308 24309 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 24310 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 24311 goto done2; 24312 } 24313 24314 /* 24315 * For the current temperature verify that the parameter length is 0x02 24316 * and the parameter code is 0x00 24317 */ 24318 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24319 (temperature_page[5] == 0x00)) { 24320 if (temperature_page[9] == 0xFF) { 24321 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24322 } else { 24323 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24324 } 24325 } 24326 24327 /* 24328 * For the reference temperature verify that the parameter 24329 * length is 0x02 and the parameter code is 0x01 24330 */ 24331 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24332 (temperature_page[11] == 0x01)) { 24333 if (temperature_page[15] == 0xFF) { 24334 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24335 } else { 24336 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24337 } 24338 } 24339 24340 /* Do the copyout regardless of the temperature commands status. */ 24341 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24342 flag) != 0) { 24343 rval = EFAULT; 24344 } 24345 24346 done2: 24347 if (path_flag == SD_PATH_DIRECT) { 24348 sd_pm_exit(un); 24349 } 24350 24351 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24352 done: 24353 if (dktemp != NULL) { 24354 kmem_free(dktemp, sizeof (struct dk_temperature)); 24355 } 24356 24357 return (rval); 24358 } 24359 24360 24361 /* 24362 * Function: sd_log_page_supported 24363 * 24364 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24365 * supported log pages. 24366 * 24367 * Arguments: un - 24368 * log_page - 24369 * 24370 * Return Code: -1 - on error (log sense is optional and may not be supported). 24371 * 0 - log page not found. 24372 * 1 - log page found. 24373 */ 24374 24375 static int 24376 sd_log_page_supported(struct sd_lun *un, int log_page) 24377 { 24378 uchar_t *log_page_data; 24379 int i; 24380 int match = 0; 24381 int log_size; 24382 24383 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24384 24385 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 24386 SD_PATH_DIRECT) != 0) { 24387 SD_ERROR(SD_LOG_COMMON, un, 24388 "sd_log_page_supported: failed log page retrieval\n"); 24389 kmem_free(log_page_data, 0xFF); 24390 return (-1); 24391 } 24392 log_size = log_page_data[3]; 24393 24394 /* 24395 * The list of supported log pages start from the fourth byte. Check 24396 * until we run out of log pages or a match is found. 24397 */ 24398 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24399 if (log_page_data[i] == log_page) { 24400 match++; 24401 } 24402 } 24403 kmem_free(log_page_data, 0xFF); 24404 return (match); 24405 } 24406 24407 24408 /* 24409 * Function: sd_mhdioc_failfast 24410 * 24411 * Description: This routine is the driver entry point for handling ioctl 24412 * requests to enable/disable the multihost failfast option. 24413 * (MHIOCENFAILFAST) 24414 * 24415 * Arguments: dev - the device number 24416 * arg - user specified probing interval. 24417 * flag - this argument is a pass through to ddi_copyxxx() 24418 * directly from the mode argument of ioctl(). 24419 * 24420 * Return Code: 0 24421 * EFAULT 24422 * ENXIO 24423 */ 24424 24425 static int 24426 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24427 { 24428 struct sd_lun *un = NULL; 24429 int mh_time; 24430 int rval = 0; 24431 24432 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24433 return (ENXIO); 24434 } 24435 24436 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24437 return (EFAULT); 24438 24439 if (mh_time) { 24440 mutex_enter(SD_MUTEX(un)); 24441 un->un_resvd_status |= SD_FAILFAST; 24442 mutex_exit(SD_MUTEX(un)); 24443 /* 24444 * If mh_time is INT_MAX, then this ioctl is being used for 24445 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24446 */ 24447 if (mh_time != INT_MAX) { 24448 rval = sd_check_mhd(dev, mh_time); 24449 } 24450 } else { 24451 (void) sd_check_mhd(dev, 0); 24452 mutex_enter(SD_MUTEX(un)); 24453 un->un_resvd_status &= ~SD_FAILFAST; 24454 mutex_exit(SD_MUTEX(un)); 24455 } 24456 return (rval); 24457 } 24458 24459 24460 /* 24461 * Function: sd_mhdioc_takeown 24462 * 24463 * Description: This routine is the driver entry point for handling ioctl 24464 * requests to forcefully acquire exclusive access rights to the 24465 * multihost disk (MHIOCTKOWN). 24466 * 24467 * Arguments: dev - the device number 24468 * arg - user provided structure specifying the delay 24469 * parameters in milliseconds 24470 * flag - this argument is a pass through to ddi_copyxxx() 24471 * directly from the mode argument of ioctl(). 24472 * 24473 * Return Code: 0 24474 * EFAULT 24475 * ENXIO 24476 */ 24477 24478 static int 24479 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24480 { 24481 struct sd_lun *un = NULL; 24482 struct mhioctkown *tkown = NULL; 24483 int rval = 0; 24484 24485 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24486 return (ENXIO); 24487 } 24488 24489 if (arg != NULL) { 24490 tkown = (struct mhioctkown *) 24491 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24492 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24493 if (rval != 0) { 24494 rval = EFAULT; 24495 goto error; 24496 } 24497 } 24498 24499 rval = sd_take_ownership(dev, tkown); 24500 mutex_enter(SD_MUTEX(un)); 24501 if (rval == 0) { 24502 un->un_resvd_status |= SD_RESERVE; 24503 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24504 sd_reinstate_resv_delay = 24505 tkown->reinstate_resv_delay * 1000; 24506 } else { 24507 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24508 } 24509 /* 24510 * Give the scsi_watch routine interval set by 24511 * the MHIOCENFAILFAST ioctl precedence here. 24512 */ 24513 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24514 mutex_exit(SD_MUTEX(un)); 24515 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24516 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24517 "sd_mhdioc_takeown : %d\n", 24518 sd_reinstate_resv_delay); 24519 } else { 24520 mutex_exit(SD_MUTEX(un)); 24521 } 24522 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24523 sd_mhd_reset_notify_cb, (caddr_t)un); 24524 } else { 24525 un->un_resvd_status &= ~SD_RESERVE; 24526 mutex_exit(SD_MUTEX(un)); 24527 } 24528 24529 error: 24530 if (tkown != NULL) { 24531 kmem_free(tkown, sizeof (struct mhioctkown)); 24532 } 24533 return (rval); 24534 } 24535 24536 24537 /* 24538 * Function: sd_mhdioc_release 24539 * 24540 * Description: This routine is the driver entry point for handling ioctl 24541 * requests to release exclusive access rights to the multihost 24542 * disk (MHIOCRELEASE). 24543 * 24544 * Arguments: dev - the device number 24545 * 24546 * Return Code: 0 24547 * ENXIO 24548 */ 24549 24550 static int 24551 sd_mhdioc_release(dev_t dev) 24552 { 24553 struct sd_lun *un = NULL; 24554 timeout_id_t resvd_timeid_save; 24555 int resvd_status_save; 24556 int rval = 0; 24557 24558 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24559 return (ENXIO); 24560 } 24561 24562 mutex_enter(SD_MUTEX(un)); 24563 resvd_status_save = un->un_resvd_status; 24564 un->un_resvd_status &= 24565 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24566 if (un->un_resvd_timeid) { 24567 resvd_timeid_save = un->un_resvd_timeid; 24568 un->un_resvd_timeid = NULL; 24569 mutex_exit(SD_MUTEX(un)); 24570 (void) untimeout(resvd_timeid_save); 24571 } else { 24572 mutex_exit(SD_MUTEX(un)); 24573 } 24574 24575 /* 24576 * destroy any pending timeout thread that may be attempting to 24577 * reinstate reservation on this device. 24578 */ 24579 sd_rmv_resv_reclaim_req(dev); 24580 24581 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24582 mutex_enter(SD_MUTEX(un)); 24583 if ((un->un_mhd_token) && 24584 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24585 mutex_exit(SD_MUTEX(un)); 24586 (void) sd_check_mhd(dev, 0); 24587 } else { 24588 mutex_exit(SD_MUTEX(un)); 24589 } 24590 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24591 sd_mhd_reset_notify_cb, (caddr_t)un); 24592 } else { 24593 /* 24594 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24595 */ 24596 mutex_enter(SD_MUTEX(un)); 24597 un->un_resvd_status = resvd_status_save; 24598 mutex_exit(SD_MUTEX(un)); 24599 } 24600 return (rval); 24601 } 24602 24603 24604 /* 24605 * Function: sd_mhdioc_register_devid 24606 * 24607 * Description: This routine is the driver entry point for handling ioctl 24608 * requests to register the device id (MHIOCREREGISTERDEVID). 24609 * 24610 * Note: The implementation for this ioctl has been updated to 24611 * be consistent with the original PSARC case (1999/357) 24612 * (4375899, 4241671, 4220005) 24613 * 24614 * Arguments: dev - the device number 24615 * 24616 * Return Code: 0 24617 * ENXIO 24618 */ 24619 24620 static int 24621 sd_mhdioc_register_devid(dev_t dev) 24622 { 24623 struct sd_lun *un = NULL; 24624 int rval = 0; 24625 24626 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24627 return (ENXIO); 24628 } 24629 24630 ASSERT(!mutex_owned(SD_MUTEX(un))); 24631 24632 mutex_enter(SD_MUTEX(un)); 24633 24634 /* If a devid already exists, de-register it */ 24635 if (un->un_devid != NULL) { 24636 ddi_devid_unregister(SD_DEVINFO(un)); 24637 /* 24638 * After unregister devid, needs to free devid memory 24639 */ 24640 ddi_devid_free(un->un_devid); 24641 un->un_devid = NULL; 24642 } 24643 24644 /* Check for reservation conflict */ 24645 mutex_exit(SD_MUTEX(un)); 24646 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24647 mutex_enter(SD_MUTEX(un)); 24648 24649 switch (rval) { 24650 case 0: 24651 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24652 break; 24653 case EACCES: 24654 break; 24655 default: 24656 rval = EIO; 24657 } 24658 24659 mutex_exit(SD_MUTEX(un)); 24660 return (rval); 24661 } 24662 24663 24664 /* 24665 * Function: sd_mhdioc_inkeys 24666 * 24667 * Description: This routine is the driver entry point for handling ioctl 24668 * requests to issue the SCSI-3 Persistent In Read Keys command 24669 * to the device (MHIOCGRP_INKEYS). 24670 * 24671 * Arguments: dev - the device number 24672 * arg - user provided in_keys structure 24673 * flag - this argument is a pass through to ddi_copyxxx() 24674 * directly from the mode argument of ioctl(). 24675 * 24676 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24677 * ENXIO 24678 * EFAULT 24679 */ 24680 24681 static int 24682 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24683 { 24684 struct sd_lun *un; 24685 mhioc_inkeys_t inkeys; 24686 int rval = 0; 24687 24688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24689 return (ENXIO); 24690 } 24691 24692 #ifdef _MULTI_DATAMODEL 24693 switch (ddi_model_convert_from(flag & FMODELS)) { 24694 case DDI_MODEL_ILP32: { 24695 struct mhioc_inkeys32 inkeys32; 24696 24697 if (ddi_copyin(arg, &inkeys32, 24698 sizeof (struct mhioc_inkeys32), flag) != 0) { 24699 return (EFAULT); 24700 } 24701 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24702 if ((rval = sd_persistent_reservation_in_read_keys(un, 24703 &inkeys, flag)) != 0) { 24704 return (rval); 24705 } 24706 inkeys32.generation = inkeys.generation; 24707 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24708 flag) != 0) { 24709 return (EFAULT); 24710 } 24711 break; 24712 } 24713 case DDI_MODEL_NONE: 24714 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24715 flag) != 0) { 24716 return (EFAULT); 24717 } 24718 if ((rval = sd_persistent_reservation_in_read_keys(un, 24719 &inkeys, flag)) != 0) { 24720 return (rval); 24721 } 24722 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24723 flag) != 0) { 24724 return (EFAULT); 24725 } 24726 break; 24727 } 24728 24729 #else /* ! _MULTI_DATAMODEL */ 24730 24731 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24732 return (EFAULT); 24733 } 24734 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24735 if (rval != 0) { 24736 return (rval); 24737 } 24738 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24739 return (EFAULT); 24740 } 24741 24742 #endif /* _MULTI_DATAMODEL */ 24743 24744 return (rval); 24745 } 24746 24747 24748 /* 24749 * Function: sd_mhdioc_inresv 24750 * 24751 * Description: This routine is the driver entry point for handling ioctl 24752 * requests to issue the SCSI-3 Persistent In Read Reservations 24753 * command to the device (MHIOCGRP_INKEYS). 24754 * 24755 * Arguments: dev - the device number 24756 * arg - user provided in_resv structure 24757 * flag - this argument is a pass through to ddi_copyxxx() 24758 * directly from the mode argument of ioctl(). 24759 * 24760 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24761 * ENXIO 24762 * EFAULT 24763 */ 24764 24765 static int 24766 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24767 { 24768 struct sd_lun *un; 24769 mhioc_inresvs_t inresvs; 24770 int rval = 0; 24771 24772 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24773 return (ENXIO); 24774 } 24775 24776 #ifdef _MULTI_DATAMODEL 24777 24778 switch (ddi_model_convert_from(flag & FMODELS)) { 24779 case DDI_MODEL_ILP32: { 24780 struct mhioc_inresvs32 inresvs32; 24781 24782 if (ddi_copyin(arg, &inresvs32, 24783 sizeof (struct mhioc_inresvs32), flag) != 0) { 24784 return (EFAULT); 24785 } 24786 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24787 if ((rval = sd_persistent_reservation_in_read_resv(un, 24788 &inresvs, flag)) != 0) { 24789 return (rval); 24790 } 24791 inresvs32.generation = inresvs.generation; 24792 if (ddi_copyout(&inresvs32, arg, 24793 sizeof (struct mhioc_inresvs32), flag) != 0) { 24794 return (EFAULT); 24795 } 24796 break; 24797 } 24798 case DDI_MODEL_NONE: 24799 if (ddi_copyin(arg, &inresvs, 24800 sizeof (mhioc_inresvs_t), flag) != 0) { 24801 return (EFAULT); 24802 } 24803 if ((rval = sd_persistent_reservation_in_read_resv(un, 24804 &inresvs, flag)) != 0) { 24805 return (rval); 24806 } 24807 if (ddi_copyout(&inresvs, arg, 24808 sizeof (mhioc_inresvs_t), flag) != 0) { 24809 return (EFAULT); 24810 } 24811 break; 24812 } 24813 24814 #else /* ! _MULTI_DATAMODEL */ 24815 24816 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24817 return (EFAULT); 24818 } 24819 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24820 if (rval != 0) { 24821 return (rval); 24822 } 24823 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24824 return (EFAULT); 24825 } 24826 24827 #endif /* ! _MULTI_DATAMODEL */ 24828 24829 return (rval); 24830 } 24831 24832 24833 /* 24834 * The following routines support the clustering functionality described below 24835 * and implement lost reservation reclaim functionality. 24836 * 24837 * Clustering 24838 * ---------- 24839 * The clustering code uses two different, independent forms of SCSI 24840 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24841 * Persistent Group Reservations. For any particular disk, it will use either 24842 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24843 * 24844 * SCSI-2 24845 * The cluster software takes ownership of a multi-hosted disk by issuing the 24846 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24847 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24848 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24849 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24850 * meaning of failfast is that if the driver (on this host) ever encounters the 24851 * scsi error return code RESERVATION_CONFLICT from the device, it should 24852 * immediately panic the host. The motivation for this ioctl is that if this 24853 * host does encounter reservation conflict, the underlying cause is that some 24854 * other host of the cluster has decided that this host is no longer in the 24855 * cluster and has seized control of the disks for itself. Since this host is no 24856 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24857 * does two things: 24858 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24859 * error to panic the host 24860 * (b) it sets up a periodic timer to test whether this host still has 24861 * "access" (in that no other host has reserved the device): if the 24862 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24863 * purpose of that periodic timer is to handle scenarios where the host is 24864 * otherwise temporarily quiescent, temporarily doing no real i/o. 24865 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24866 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24867 * the device itself. 24868 * 24869 * SCSI-3 PGR 24870 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24871 * facility is supported through the shared multihost disk ioctls 24872 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24873 * MHIOCGRP_PREEMPTANDABORT) 24874 * 24875 * Reservation Reclaim: 24876 * -------------------- 24877 * To support the lost reservation reclaim operations this driver creates a 24878 * single thread to handle reinstating reservations on all devices that have 24879 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24880 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24881 * and the reservation reclaim thread loops through the requests to regain the 24882 * lost reservations. 24883 */ 24884 24885 /* 24886 * Function: sd_check_mhd() 24887 * 24888 * Description: This function sets up and submits a scsi watch request or 24889 * terminates an existing watch request. This routine is used in 24890 * support of reservation reclaim. 24891 * 24892 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24893 * among multiple watches that share the callback function 24894 * interval - the number of microseconds specifying the watch 24895 * interval for issuing TEST UNIT READY commands. If 24896 * set to 0 the watch should be terminated. If the 24897 * interval is set to 0 and if the device is required 24898 * to hold reservation while disabling failfast, the 24899 * watch is restarted with an interval of 24900 * reinstate_resv_delay. 24901 * 24902 * Return Code: 0 - Successful submit/terminate of scsi watch request 24903 * ENXIO - Indicates an invalid device was specified 24904 * EAGAIN - Unable to submit the scsi watch request 24905 */ 24906 24907 static int 24908 sd_check_mhd(dev_t dev, int interval) 24909 { 24910 struct sd_lun *un; 24911 opaque_t token; 24912 24913 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24914 return (ENXIO); 24915 } 24916 24917 /* is this a watch termination request? */ 24918 if (interval == 0) { 24919 mutex_enter(SD_MUTEX(un)); 24920 /* if there is an existing watch task then terminate it */ 24921 if (un->un_mhd_token) { 24922 token = un->un_mhd_token; 24923 un->un_mhd_token = NULL; 24924 mutex_exit(SD_MUTEX(un)); 24925 (void) scsi_watch_request_terminate(token, 24926 SCSI_WATCH_TERMINATE_WAIT); 24927 mutex_enter(SD_MUTEX(un)); 24928 } else { 24929 mutex_exit(SD_MUTEX(un)); 24930 /* 24931 * Note: If we return here we don't check for the 24932 * failfast case. This is the original legacy 24933 * implementation but perhaps we should be checking 24934 * the failfast case. 24935 */ 24936 return (0); 24937 } 24938 /* 24939 * If the device is required to hold reservation while 24940 * disabling failfast, we need to restart the scsi_watch 24941 * routine with an interval of reinstate_resv_delay. 24942 */ 24943 if (un->un_resvd_status & SD_RESERVE) { 24944 interval = sd_reinstate_resv_delay/1000; 24945 } else { 24946 /* no failfast so bail */ 24947 mutex_exit(SD_MUTEX(un)); 24948 return (0); 24949 } 24950 mutex_exit(SD_MUTEX(un)); 24951 } 24952 24953 /* 24954 * adjust minimum time interval to 1 second, 24955 * and convert from msecs to usecs 24956 */ 24957 if (interval > 0 && interval < 1000) { 24958 interval = 1000; 24959 } 24960 interval *= 1000; 24961 24962 /* 24963 * submit the request to the scsi_watch service 24964 */ 24965 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24966 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24967 if (token == NULL) { 24968 return (EAGAIN); 24969 } 24970 24971 /* 24972 * save token for termination later on 24973 */ 24974 mutex_enter(SD_MUTEX(un)); 24975 un->un_mhd_token = token; 24976 mutex_exit(SD_MUTEX(un)); 24977 return (0); 24978 } 24979 24980 24981 /* 24982 * Function: sd_mhd_watch_cb() 24983 * 24984 * Description: This function is the call back function used by the scsi watch 24985 * facility. The scsi watch facility sends the "Test Unit Ready" 24986 * and processes the status. If applicable (i.e. a "Unit Attention" 24987 * status and automatic "Request Sense" not used) the scsi watch 24988 * facility will send a "Request Sense" and retrieve the sense data 24989 * to be passed to this callback function. In either case the 24990 * automatic "Request Sense" or the facility submitting one, this 24991 * callback is passed the status and sense data. 24992 * 24993 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24994 * among multiple watches that share this callback function 24995 * resultp - scsi watch facility result packet containing scsi 24996 * packet, status byte and sense data 24997 * 24998 * Return Code: 0 - continue the watch task 24999 * non-zero - terminate the watch task 25000 */ 25001 25002 static int 25003 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 25004 { 25005 struct sd_lun *un; 25006 struct scsi_status *statusp; 25007 struct scsi_extended_sense *sensep; 25008 struct scsi_pkt *pkt; 25009 uchar_t actual_sense_length; 25010 dev_t dev = (dev_t)arg; 25011 25012 ASSERT(resultp != NULL); 25013 statusp = resultp->statusp; 25014 sensep = resultp->sensep; 25015 pkt = resultp->pkt; 25016 actual_sense_length = resultp->actual_sense_length; 25017 25018 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25019 return (ENXIO); 25020 } 25021 25022 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25023 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 25024 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 25025 25026 /* Begin processing of the status and/or sense data */ 25027 if (pkt->pkt_reason != CMD_CMPLT) { 25028 /* Handle the incomplete packet */ 25029 sd_mhd_watch_incomplete(un, pkt); 25030 return (0); 25031 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 25032 if (*((unsigned char *)statusp) 25033 == STATUS_RESERVATION_CONFLICT) { 25034 /* 25035 * Handle a reservation conflict by panicking if 25036 * configured for failfast or by logging the conflict 25037 * and updating the reservation status 25038 */ 25039 mutex_enter(SD_MUTEX(un)); 25040 if ((un->un_resvd_status & SD_FAILFAST) && 25041 (sd_failfast_enable)) { 25042 panic("Reservation Conflict"); 25043 /*NOTREACHED*/ 25044 } 25045 SD_INFO(SD_LOG_IOCTL_MHD, un, 25046 "sd_mhd_watch_cb: Reservation Conflict\n"); 25047 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 25048 mutex_exit(SD_MUTEX(un)); 25049 } 25050 } 25051 25052 if (sensep != NULL) { 25053 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 25054 mutex_enter(SD_MUTEX(un)); 25055 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 25056 (un->un_resvd_status & SD_RESERVE)) { 25057 /* 25058 * The additional sense code indicates a power 25059 * on or bus device reset has occurred; update 25060 * the reservation status. 25061 */ 25062 un->un_resvd_status |= 25063 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25064 SD_INFO(SD_LOG_IOCTL_MHD, un, 25065 "sd_mhd_watch_cb: Lost Reservation\n"); 25066 } 25067 } else { 25068 return (0); 25069 } 25070 } else { 25071 mutex_enter(SD_MUTEX(un)); 25072 } 25073 25074 if ((un->un_resvd_status & SD_RESERVE) && 25075 (un->un_resvd_status & SD_LOST_RESERVE)) { 25076 if (un->un_resvd_status & SD_WANT_RESERVE) { 25077 /* 25078 * A reset occurred in between the last probe and this 25079 * one so if a timeout is pending cancel it. 25080 */ 25081 if (un->un_resvd_timeid) { 25082 timeout_id_t temp_id = un->un_resvd_timeid; 25083 un->un_resvd_timeid = NULL; 25084 mutex_exit(SD_MUTEX(un)); 25085 (void) untimeout(temp_id); 25086 mutex_enter(SD_MUTEX(un)); 25087 } 25088 un->un_resvd_status &= ~SD_WANT_RESERVE; 25089 } 25090 if (un->un_resvd_timeid == 0) { 25091 /* Schedule a timeout to handle the lost reservation */ 25092 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25093 (void *)dev, 25094 drv_usectohz(sd_reinstate_resv_delay)); 25095 } 25096 } 25097 mutex_exit(SD_MUTEX(un)); 25098 return (0); 25099 } 25100 25101 25102 /* 25103 * Function: sd_mhd_watch_incomplete() 25104 * 25105 * Description: This function is used to find out why a scsi pkt sent by the 25106 * scsi watch facility was not completed. Under some scenarios this 25107 * routine will return. Otherwise it will send a bus reset to see 25108 * if the drive is still online. 25109 * 25110 * Arguments: un - driver soft state (unit) structure 25111 * pkt - incomplete scsi pkt 25112 */ 25113 25114 static void 25115 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25116 { 25117 int be_chatty; 25118 int perr; 25119 25120 ASSERT(pkt != NULL); 25121 ASSERT(un != NULL); 25122 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25123 perr = (pkt->pkt_statistics & STAT_PERR); 25124 25125 mutex_enter(SD_MUTEX(un)); 25126 if (un->un_state == SD_STATE_DUMPING) { 25127 mutex_exit(SD_MUTEX(un)); 25128 return; 25129 } 25130 25131 switch (pkt->pkt_reason) { 25132 case CMD_UNX_BUS_FREE: 25133 /* 25134 * If we had a parity error that caused the target to drop BSY*, 25135 * don't be chatty about it. 25136 */ 25137 if (perr && be_chatty) { 25138 be_chatty = 0; 25139 } 25140 break; 25141 case CMD_TAG_REJECT: 25142 /* 25143 * The SCSI-2 spec states that a tag reject will be sent by the 25144 * target if tagged queuing is not supported. A tag reject may 25145 * also be sent during certain initialization periods or to 25146 * control internal resources. For the latter case the target 25147 * may also return Queue Full. 25148 * 25149 * If this driver receives a tag reject from a target that is 25150 * going through an init period or controlling internal 25151 * resources tagged queuing will be disabled. This is a less 25152 * than optimal behavior but the driver is unable to determine 25153 * the target state and assumes tagged queueing is not supported 25154 */ 25155 pkt->pkt_flags = 0; 25156 un->un_tagflags = 0; 25157 25158 if (un->un_f_opt_queueing == TRUE) { 25159 un->un_throttle = min(un->un_throttle, 3); 25160 } else { 25161 un->un_throttle = 1; 25162 } 25163 mutex_exit(SD_MUTEX(un)); 25164 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25165 mutex_enter(SD_MUTEX(un)); 25166 break; 25167 case CMD_INCOMPLETE: 25168 /* 25169 * The transport stopped with an abnormal state, fallthrough and 25170 * reset the target and/or bus unless selection did not complete 25171 * (indicated by STATE_GOT_BUS) in which case we don't want to 25172 * go through a target/bus reset 25173 */ 25174 if (pkt->pkt_state == STATE_GOT_BUS) { 25175 break; 25176 } 25177 /*FALLTHROUGH*/ 25178 25179 case CMD_TIMEOUT: 25180 default: 25181 /* 25182 * The lun may still be running the command, so a lun reset 25183 * should be attempted. If the lun reset fails or cannot be 25184 * issued, than try a target reset. Lastly try a bus reset. 25185 */ 25186 if ((pkt->pkt_statistics & 25187 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 25188 int reset_retval = 0; 25189 mutex_exit(SD_MUTEX(un)); 25190 if (un->un_f_allow_bus_device_reset == TRUE) { 25191 if (un->un_f_lun_reset_enabled == TRUE) { 25192 reset_retval = 25193 scsi_reset(SD_ADDRESS(un), 25194 RESET_LUN); 25195 } 25196 if (reset_retval == 0) { 25197 reset_retval = 25198 scsi_reset(SD_ADDRESS(un), 25199 RESET_TARGET); 25200 } 25201 } 25202 if (reset_retval == 0) { 25203 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25204 } 25205 mutex_enter(SD_MUTEX(un)); 25206 } 25207 break; 25208 } 25209 25210 /* A device/bus reset has occurred; update the reservation status. */ 25211 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25212 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25213 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25214 un->un_resvd_status |= 25215 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25216 SD_INFO(SD_LOG_IOCTL_MHD, un, 25217 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25218 } 25219 } 25220 25221 /* 25222 * The disk has been turned off; Update the device state. 25223 * 25224 * Note: Should we be offlining the disk here? 25225 */ 25226 if (pkt->pkt_state == STATE_GOT_BUS) { 25227 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25228 "Disk not responding to selection\n"); 25229 if (un->un_state != SD_STATE_OFFLINE) { 25230 New_state(un, SD_STATE_OFFLINE); 25231 } 25232 } else if (be_chatty) { 25233 /* 25234 * suppress messages if they are all the same pkt reason; 25235 * with TQ, many (up to 256) are returned with the same 25236 * pkt_reason 25237 */ 25238 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25239 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25240 "sd_mhd_watch_incomplete: " 25241 "SCSI transport failed: reason '%s'\n", 25242 scsi_rname(pkt->pkt_reason)); 25243 } 25244 } 25245 un->un_last_pkt_reason = pkt->pkt_reason; 25246 mutex_exit(SD_MUTEX(un)); 25247 } 25248 25249 25250 /* 25251 * Function: sd_sname() 25252 * 25253 * Description: This is a simple little routine to return a string containing 25254 * a printable description of command status byte for use in 25255 * logging. 25256 * 25257 * Arguments: status - pointer to a status byte 25258 * 25259 * Return Code: char * - string containing status description. 25260 */ 25261 25262 static char * 25263 sd_sname(uchar_t status) 25264 { 25265 switch (status & STATUS_MASK) { 25266 case STATUS_GOOD: 25267 return ("good status"); 25268 case STATUS_CHECK: 25269 return ("check condition"); 25270 case STATUS_MET: 25271 return ("condition met"); 25272 case STATUS_BUSY: 25273 return ("busy"); 25274 case STATUS_INTERMEDIATE: 25275 return ("intermediate"); 25276 case STATUS_INTERMEDIATE_MET: 25277 return ("intermediate - condition met"); 25278 case STATUS_RESERVATION_CONFLICT: 25279 return ("reservation_conflict"); 25280 case STATUS_TERMINATED: 25281 return ("command terminated"); 25282 case STATUS_QFULL: 25283 return ("queue full"); 25284 default: 25285 return ("<unknown status>"); 25286 } 25287 } 25288 25289 25290 /* 25291 * Function: sd_mhd_resvd_recover() 25292 * 25293 * Description: This function adds a reservation entry to the 25294 * sd_resv_reclaim_request list and signals the reservation 25295 * reclaim thread that there is work pending. If the reservation 25296 * reclaim thread has not been previously created this function 25297 * will kick it off. 25298 * 25299 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25300 * among multiple watches that share this callback function 25301 * 25302 * Context: This routine is called by timeout() and is run in interrupt 25303 * context. It must not sleep or call other functions which may 25304 * sleep. 25305 */ 25306 25307 static void 25308 sd_mhd_resvd_recover(void *arg) 25309 { 25310 dev_t dev = (dev_t)arg; 25311 struct sd_lun *un; 25312 struct sd_thr_request *sd_treq = NULL; 25313 struct sd_thr_request *sd_cur = NULL; 25314 struct sd_thr_request *sd_prev = NULL; 25315 int already_there = 0; 25316 25317 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25318 return; 25319 } 25320 25321 mutex_enter(SD_MUTEX(un)); 25322 un->un_resvd_timeid = NULL; 25323 if (un->un_resvd_status & SD_WANT_RESERVE) { 25324 /* 25325 * There was a reset so don't issue the reserve, allow the 25326 * sd_mhd_watch_cb callback function to notice this and 25327 * reschedule the timeout for reservation. 25328 */ 25329 mutex_exit(SD_MUTEX(un)); 25330 return; 25331 } 25332 mutex_exit(SD_MUTEX(un)); 25333 25334 /* 25335 * Add this device to the sd_resv_reclaim_request list and the 25336 * sd_resv_reclaim_thread should take care of the rest. 25337 * 25338 * Note: We can't sleep in this context so if the memory allocation 25339 * fails allow the sd_mhd_watch_cb callback function to notice this and 25340 * reschedule the timeout for reservation. (4378460) 25341 */ 25342 sd_treq = (struct sd_thr_request *) 25343 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25344 if (sd_treq == NULL) { 25345 return; 25346 } 25347 25348 sd_treq->sd_thr_req_next = NULL; 25349 sd_treq->dev = dev; 25350 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25351 if (sd_tr.srq_thr_req_head == NULL) { 25352 sd_tr.srq_thr_req_head = sd_treq; 25353 } else { 25354 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25355 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25356 if (sd_cur->dev == dev) { 25357 /* 25358 * already in Queue so don't log 25359 * another request for the device 25360 */ 25361 already_there = 1; 25362 break; 25363 } 25364 sd_prev = sd_cur; 25365 } 25366 if (!already_there) { 25367 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25368 "logging request for %lx\n", dev); 25369 sd_prev->sd_thr_req_next = sd_treq; 25370 } else { 25371 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25372 } 25373 } 25374 25375 /* 25376 * Create a kernel thread to do the reservation reclaim and free up this 25377 * thread. We cannot block this thread while we go away to do the 25378 * reservation reclaim 25379 */ 25380 if (sd_tr.srq_resv_reclaim_thread == NULL) 25381 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25382 sd_resv_reclaim_thread, NULL, 25383 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25384 25385 /* Tell the reservation reclaim thread that it has work to do */ 25386 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25387 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25388 } 25389 25390 /* 25391 * Function: sd_resv_reclaim_thread() 25392 * 25393 * Description: This function implements the reservation reclaim operations 25394 * 25395 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25396 * among multiple watches that share this callback function 25397 */ 25398 25399 static void 25400 sd_resv_reclaim_thread() 25401 { 25402 struct sd_lun *un; 25403 struct sd_thr_request *sd_mhreq; 25404 25405 /* Wait for work */ 25406 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25407 if (sd_tr.srq_thr_req_head == NULL) { 25408 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25409 &sd_tr.srq_resv_reclaim_mutex); 25410 } 25411 25412 /* Loop while we have work */ 25413 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25414 un = ddi_get_soft_state(sd_state, 25415 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25416 if (un == NULL) { 25417 /* 25418 * softstate structure is NULL so just 25419 * dequeue the request and continue 25420 */ 25421 sd_tr.srq_thr_req_head = 25422 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25423 kmem_free(sd_tr.srq_thr_cur_req, 25424 sizeof (struct sd_thr_request)); 25425 continue; 25426 } 25427 25428 /* dequeue the request */ 25429 sd_mhreq = sd_tr.srq_thr_cur_req; 25430 sd_tr.srq_thr_req_head = 25431 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25432 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25433 25434 /* 25435 * Reclaim reservation only if SD_RESERVE is still set. There 25436 * may have been a call to MHIOCRELEASE before we got here. 25437 */ 25438 mutex_enter(SD_MUTEX(un)); 25439 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25440 /* 25441 * Note: The SD_LOST_RESERVE flag is cleared before 25442 * reclaiming the reservation. If this is done after the 25443 * call to sd_reserve_release a reservation loss in the 25444 * window between pkt completion of reserve cmd and 25445 * mutex_enter below may not be recognized 25446 */ 25447 un->un_resvd_status &= ~SD_LOST_RESERVE; 25448 mutex_exit(SD_MUTEX(un)); 25449 25450 if (sd_reserve_release(sd_mhreq->dev, 25451 SD_RESERVE) == 0) { 25452 mutex_enter(SD_MUTEX(un)); 25453 un->un_resvd_status |= SD_RESERVE; 25454 mutex_exit(SD_MUTEX(un)); 25455 SD_INFO(SD_LOG_IOCTL_MHD, un, 25456 "sd_resv_reclaim_thread: " 25457 "Reservation Recovered\n"); 25458 } else { 25459 mutex_enter(SD_MUTEX(un)); 25460 un->un_resvd_status |= SD_LOST_RESERVE; 25461 mutex_exit(SD_MUTEX(un)); 25462 SD_INFO(SD_LOG_IOCTL_MHD, un, 25463 "sd_resv_reclaim_thread: Failed " 25464 "Reservation Recovery\n"); 25465 } 25466 } else { 25467 mutex_exit(SD_MUTEX(un)); 25468 } 25469 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25470 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25471 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25472 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25473 /* 25474 * wakeup the destroy thread if anyone is waiting on 25475 * us to complete. 25476 */ 25477 cv_signal(&sd_tr.srq_inprocess_cv); 25478 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25479 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25480 } 25481 25482 /* 25483 * cleanup the sd_tr structure now that this thread will not exist 25484 */ 25485 ASSERT(sd_tr.srq_thr_req_head == NULL); 25486 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25487 sd_tr.srq_resv_reclaim_thread = NULL; 25488 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25489 thread_exit(); 25490 } 25491 25492 25493 /* 25494 * Function: sd_rmv_resv_reclaim_req() 25495 * 25496 * Description: This function removes any pending reservation reclaim requests 25497 * for the specified device. 25498 * 25499 * Arguments: dev - the device 'dev_t' 25500 */ 25501 25502 static void 25503 sd_rmv_resv_reclaim_req(dev_t dev) 25504 { 25505 struct sd_thr_request *sd_mhreq; 25506 struct sd_thr_request *sd_prev; 25507 25508 /* Remove a reservation reclaim request from the list */ 25509 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25510 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25511 /* 25512 * We are attempting to reinstate reservation for 25513 * this device. We wait for sd_reserve_release() 25514 * to return before we return. 25515 */ 25516 cv_wait(&sd_tr.srq_inprocess_cv, 25517 &sd_tr.srq_resv_reclaim_mutex); 25518 } else { 25519 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25520 if (sd_mhreq && sd_mhreq->dev == dev) { 25521 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25522 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25523 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25524 return; 25525 } 25526 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25527 if (sd_mhreq && sd_mhreq->dev == dev) { 25528 break; 25529 } 25530 sd_prev = sd_mhreq; 25531 } 25532 if (sd_mhreq != NULL) { 25533 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25534 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25535 } 25536 } 25537 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25538 } 25539 25540 25541 /* 25542 * Function: sd_mhd_reset_notify_cb() 25543 * 25544 * Description: This is a call back function for scsi_reset_notify. This 25545 * function updates the softstate reserved status and logs the 25546 * reset. The driver scsi watch facility callback function 25547 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25548 * will reclaim the reservation. 25549 * 25550 * Arguments: arg - driver soft state (unit) structure 25551 */ 25552 25553 static void 25554 sd_mhd_reset_notify_cb(caddr_t arg) 25555 { 25556 struct sd_lun *un = (struct sd_lun *)arg; 25557 25558 mutex_enter(SD_MUTEX(un)); 25559 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25560 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25561 SD_INFO(SD_LOG_IOCTL_MHD, un, 25562 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25563 } 25564 mutex_exit(SD_MUTEX(un)); 25565 } 25566 25567 25568 /* 25569 * Function: sd_take_ownership() 25570 * 25571 * Description: This routine implements an algorithm to achieve a stable 25572 * reservation on disks which don't implement priority reserve, 25573 * and makes sure that other host lose re-reservation attempts. 25574 * This algorithm contains of a loop that keeps issuing the RESERVE 25575 * for some period of time (min_ownership_delay, default 6 seconds) 25576 * During that loop, it looks to see if there has been a bus device 25577 * reset or bus reset (both of which cause an existing reservation 25578 * to be lost). If the reservation is lost issue RESERVE until a 25579 * period of min_ownership_delay with no resets has gone by, or 25580 * until max_ownership_delay has expired. This loop ensures that 25581 * the host really did manage to reserve the device, in spite of 25582 * resets. The looping for min_ownership_delay (default six 25583 * seconds) is important to early generation clustering products, 25584 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25585 * MHIOCENFAILFAST periodic timer of two seconds. By having 25586 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25587 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25588 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25589 * have already noticed, via the MHIOCENFAILFAST polling, that it 25590 * no longer "owns" the disk and will have panicked itself. Thus, 25591 * the host issuing the MHIOCTKOWN is assured (with timing 25592 * dependencies) that by the time it actually starts to use the 25593 * disk for real work, the old owner is no longer accessing it. 25594 * 25595 * min_ownership_delay is the minimum amount of time for which the 25596 * disk must be reserved continuously devoid of resets before the 25597 * MHIOCTKOWN ioctl will return success. 25598 * 25599 * max_ownership_delay indicates the amount of time by which the 25600 * take ownership should succeed or timeout with an error. 25601 * 25602 * Arguments: dev - the device 'dev_t' 25603 * *p - struct containing timing info. 25604 * 25605 * Return Code: 0 for success or error code 25606 */ 25607 25608 static int 25609 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25610 { 25611 struct sd_lun *un; 25612 int rval; 25613 int err; 25614 int reservation_count = 0; 25615 int min_ownership_delay = 6000000; /* in usec */ 25616 int max_ownership_delay = 30000000; /* in usec */ 25617 clock_t start_time; /* starting time of this algorithm */ 25618 clock_t end_time; /* time limit for giving up */ 25619 clock_t ownership_time; /* time limit for stable ownership */ 25620 clock_t current_time; 25621 clock_t previous_current_time; 25622 25623 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25624 return (ENXIO); 25625 } 25626 25627 /* 25628 * Attempt a device reservation. A priority reservation is requested. 25629 */ 25630 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25631 != SD_SUCCESS) { 25632 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25633 "sd_take_ownership: return(1)=%d\n", rval); 25634 return (rval); 25635 } 25636 25637 /* Update the softstate reserved status to indicate the reservation */ 25638 mutex_enter(SD_MUTEX(un)); 25639 un->un_resvd_status |= SD_RESERVE; 25640 un->un_resvd_status &= 25641 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25642 mutex_exit(SD_MUTEX(un)); 25643 25644 if (p != NULL) { 25645 if (p->min_ownership_delay != 0) { 25646 min_ownership_delay = p->min_ownership_delay * 1000; 25647 } 25648 if (p->max_ownership_delay != 0) { 25649 max_ownership_delay = p->max_ownership_delay * 1000; 25650 } 25651 } 25652 SD_INFO(SD_LOG_IOCTL_MHD, un, 25653 "sd_take_ownership: min, max delays: %d, %d\n", 25654 min_ownership_delay, max_ownership_delay); 25655 25656 start_time = ddi_get_lbolt(); 25657 current_time = start_time; 25658 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25659 end_time = start_time + drv_usectohz(max_ownership_delay); 25660 25661 while (current_time - end_time < 0) { 25662 delay(drv_usectohz(500000)); 25663 25664 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25665 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25666 mutex_enter(SD_MUTEX(un)); 25667 rval = (un->un_resvd_status & 25668 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25669 mutex_exit(SD_MUTEX(un)); 25670 break; 25671 } 25672 } 25673 previous_current_time = current_time; 25674 current_time = ddi_get_lbolt(); 25675 mutex_enter(SD_MUTEX(un)); 25676 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25677 ownership_time = ddi_get_lbolt() + 25678 drv_usectohz(min_ownership_delay); 25679 reservation_count = 0; 25680 } else { 25681 reservation_count++; 25682 } 25683 un->un_resvd_status |= SD_RESERVE; 25684 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25685 mutex_exit(SD_MUTEX(un)); 25686 25687 SD_INFO(SD_LOG_IOCTL_MHD, un, 25688 "sd_take_ownership: ticks for loop iteration=%ld, " 25689 "reservation=%s\n", (current_time - previous_current_time), 25690 reservation_count ? "ok" : "reclaimed"); 25691 25692 if (current_time - ownership_time >= 0 && 25693 reservation_count >= 4) { 25694 rval = 0; /* Achieved a stable ownership */ 25695 break; 25696 } 25697 if (current_time - end_time >= 0) { 25698 rval = EACCES; /* No ownership in max possible time */ 25699 break; 25700 } 25701 } 25702 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25703 "sd_take_ownership: return(2)=%d\n", rval); 25704 return (rval); 25705 } 25706 25707 25708 /* 25709 * Function: sd_reserve_release() 25710 * 25711 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25712 * PRIORITY RESERVE commands based on a user specified command type 25713 * 25714 * Arguments: dev - the device 'dev_t' 25715 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25716 * SD_RESERVE, SD_RELEASE 25717 * 25718 * Return Code: 0 or Error Code 25719 */ 25720 25721 static int 25722 sd_reserve_release(dev_t dev, int cmd) 25723 { 25724 struct uscsi_cmd *com = NULL; 25725 struct sd_lun *un = NULL; 25726 char cdb[CDB_GROUP0]; 25727 int rval; 25728 25729 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25730 (cmd == SD_PRIORITY_RESERVE)); 25731 25732 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25733 return (ENXIO); 25734 } 25735 25736 /* instantiate and initialize the command and cdb */ 25737 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25738 bzero(cdb, CDB_GROUP0); 25739 com->uscsi_flags = USCSI_SILENT; 25740 com->uscsi_timeout = un->un_reserve_release_time; 25741 com->uscsi_cdblen = CDB_GROUP0; 25742 com->uscsi_cdb = cdb; 25743 if (cmd == SD_RELEASE) { 25744 cdb[0] = SCMD_RELEASE; 25745 } else { 25746 cdb[0] = SCMD_RESERVE; 25747 } 25748 25749 /* Send the command. */ 25750 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25751 UIO_SYSSPACE, SD_PATH_STANDARD); 25752 25753 /* 25754 * "break" a reservation that is held by another host, by issuing a 25755 * reset if priority reserve is desired, and we could not get the 25756 * device. 25757 */ 25758 if ((cmd == SD_PRIORITY_RESERVE) && 25759 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25760 /* 25761 * First try to reset the LUN. If we cannot, then try a target 25762 * reset, followed by a bus reset if the target reset fails. 25763 */ 25764 int reset_retval = 0; 25765 if (un->un_f_lun_reset_enabled == TRUE) { 25766 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25767 } 25768 if (reset_retval == 0) { 25769 /* The LUN reset either failed or was not issued */ 25770 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25771 } 25772 if ((reset_retval == 0) && 25773 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25774 rval = EIO; 25775 kmem_free(com, sizeof (*com)); 25776 return (rval); 25777 } 25778 25779 bzero(com, sizeof (struct uscsi_cmd)); 25780 com->uscsi_flags = USCSI_SILENT; 25781 com->uscsi_cdb = cdb; 25782 com->uscsi_cdblen = CDB_GROUP0; 25783 com->uscsi_timeout = 5; 25784 25785 /* 25786 * Reissue the last reserve command, this time without request 25787 * sense. Assume that it is just a regular reserve command. 25788 */ 25789 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25790 UIO_SYSSPACE, SD_PATH_STANDARD); 25791 } 25792 25793 /* Return an error if still getting a reservation conflict. */ 25794 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25795 rval = EACCES; 25796 } 25797 25798 kmem_free(com, sizeof (*com)); 25799 return (rval); 25800 } 25801 25802 25803 #define SD_NDUMP_RETRIES 12 25804 /* 25805 * System Crash Dump routine 25806 */ 25807 25808 static int 25809 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25810 { 25811 int instance; 25812 int partition; 25813 int i; 25814 int err; 25815 struct sd_lun *un; 25816 struct dk_map *lp; 25817 struct scsi_pkt *wr_pktp; 25818 struct buf *wr_bp; 25819 struct buf wr_buf; 25820 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25821 daddr_t tgt_blkno; /* rmw - blkno for target */ 25822 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25823 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25824 size_t io_start_offset; 25825 int doing_rmw = FALSE; 25826 int rval; 25827 #if defined(__i386) || defined(__amd64) 25828 ssize_t dma_resid; 25829 daddr_t oblkno; 25830 #endif 25831 25832 instance = SDUNIT(dev); 25833 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25834 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25835 return (ENXIO); 25836 } 25837 25838 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25839 25840 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25841 25842 partition = SDPART(dev); 25843 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25844 25845 /* Validate blocks to dump at against partition size. */ 25846 lp = &un->un_map[partition]; 25847 if ((blkno + nblk) > lp->dkl_nblk) { 25848 SD_TRACE(SD_LOG_DUMP, un, 25849 "sddump: dump range larger than partition: " 25850 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25851 blkno, nblk, lp->dkl_nblk); 25852 return (EINVAL); 25853 } 25854 25855 mutex_enter(&un->un_pm_mutex); 25856 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25857 struct scsi_pkt *start_pktp; 25858 25859 mutex_exit(&un->un_pm_mutex); 25860 25861 /* 25862 * use pm framework to power on HBA 1st 25863 */ 25864 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25865 25866 /* 25867 * Dump no long uses sdpower to power on a device, it's 25868 * in-line here so it can be done in polled mode. 25869 */ 25870 25871 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25872 25873 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25874 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25875 25876 if (start_pktp == NULL) { 25877 /* We were not given a SCSI packet, fail. */ 25878 return (EIO); 25879 } 25880 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25881 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25882 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25883 start_pktp->pkt_flags = FLAG_NOINTR; 25884 25885 mutex_enter(SD_MUTEX(un)); 25886 SD_FILL_SCSI1_LUN(un, start_pktp); 25887 mutex_exit(SD_MUTEX(un)); 25888 /* 25889 * Scsi_poll returns 0 (success) if the command completes and 25890 * the status block is STATUS_GOOD. 25891 */ 25892 if (sd_scsi_poll(un, start_pktp) != 0) { 25893 scsi_destroy_pkt(start_pktp); 25894 return (EIO); 25895 } 25896 scsi_destroy_pkt(start_pktp); 25897 (void) sd_ddi_pm_resume(un); 25898 } else { 25899 mutex_exit(&un->un_pm_mutex); 25900 } 25901 25902 mutex_enter(SD_MUTEX(un)); 25903 un->un_throttle = 0; 25904 25905 /* 25906 * The first time through, reset the specific target device. 25907 * However, when cpr calls sddump we know that sd is in a 25908 * a good state so no bus reset is required. 25909 * Clear sense data via Request Sense cmd. 25910 * In sddump we don't care about allow_bus_device_reset anymore 25911 */ 25912 25913 if ((un->un_state != SD_STATE_SUSPENDED) && 25914 (un->un_state != SD_STATE_DUMPING)) { 25915 25916 New_state(un, SD_STATE_DUMPING); 25917 25918 if (un->un_f_is_fibre == FALSE) { 25919 mutex_exit(SD_MUTEX(un)); 25920 /* 25921 * Attempt a bus reset for parallel scsi. 25922 * 25923 * Note: A bus reset is required because on some host 25924 * systems (i.e. E420R) a bus device reset is 25925 * insufficient to reset the state of the target. 25926 * 25927 * Note: Don't issue the reset for fibre-channel, 25928 * because this tends to hang the bus (loop) for 25929 * too long while everyone is logging out and in 25930 * and the deadman timer for dumping will fire 25931 * before the dump is complete. 25932 */ 25933 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25934 mutex_enter(SD_MUTEX(un)); 25935 Restore_state(un); 25936 mutex_exit(SD_MUTEX(un)); 25937 return (EIO); 25938 } 25939 25940 /* Delay to give the device some recovery time. */ 25941 drv_usecwait(10000); 25942 25943 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25944 SD_INFO(SD_LOG_DUMP, un, 25945 "sddump: sd_send_polled_RQS failed\n"); 25946 } 25947 mutex_enter(SD_MUTEX(un)); 25948 } 25949 } 25950 25951 /* 25952 * Convert the partition-relative block number to a 25953 * disk physical block number. 25954 */ 25955 blkno += un->un_offset[partition]; 25956 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25957 25958 25959 /* 25960 * Check if the device has a non-512 block size. 25961 */ 25962 wr_bp = NULL; 25963 if (NOT_DEVBSIZE(un)) { 25964 tgt_byte_offset = blkno * un->un_sys_blocksize; 25965 tgt_byte_count = nblk * un->un_sys_blocksize; 25966 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25967 (tgt_byte_count % un->un_tgt_blocksize)) { 25968 doing_rmw = TRUE; 25969 /* 25970 * Calculate the block number and number of block 25971 * in terms of the media block size. 25972 */ 25973 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25974 tgt_nblk = 25975 ((tgt_byte_offset + tgt_byte_count + 25976 (un->un_tgt_blocksize - 1)) / 25977 un->un_tgt_blocksize) - tgt_blkno; 25978 25979 /* 25980 * Invoke the routine which is going to do read part 25981 * of read-modify-write. 25982 * Note that this routine returns a pointer to 25983 * a valid bp in wr_bp. 25984 */ 25985 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25986 &wr_bp); 25987 if (err) { 25988 mutex_exit(SD_MUTEX(un)); 25989 return (err); 25990 } 25991 /* 25992 * Offset is being calculated as - 25993 * (original block # * system block size) - 25994 * (new block # * target block size) 25995 */ 25996 io_start_offset = 25997 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25998 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25999 26000 ASSERT((io_start_offset >= 0) && 26001 (io_start_offset < un->un_tgt_blocksize)); 26002 /* 26003 * Do the modify portion of read modify write. 26004 */ 26005 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 26006 (size_t)nblk * un->un_sys_blocksize); 26007 } else { 26008 doing_rmw = FALSE; 26009 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26010 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 26011 } 26012 26013 /* Convert blkno and nblk to target blocks */ 26014 blkno = tgt_blkno; 26015 nblk = tgt_nblk; 26016 } else { 26017 wr_bp = &wr_buf; 26018 bzero(wr_bp, sizeof (struct buf)); 26019 wr_bp->b_flags = B_BUSY; 26020 wr_bp->b_un.b_addr = addr; 26021 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26022 wr_bp->b_resid = 0; 26023 } 26024 26025 mutex_exit(SD_MUTEX(un)); 26026 26027 /* 26028 * Obtain a SCSI packet for the write command. 26029 * It should be safe to call the allocator here without 26030 * worrying about being locked for DVMA mapping because 26031 * the address we're passed is already a DVMA mapping 26032 * 26033 * We are also not going to worry about semaphore ownership 26034 * in the dump buffer. Dumping is single threaded at present. 26035 */ 26036 26037 wr_pktp = NULL; 26038 26039 #if defined(__i386) || defined(__amd64) 26040 dma_resid = wr_bp->b_bcount; 26041 oblkno = blkno; 26042 while (dma_resid != 0) { 26043 #endif 26044 26045 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26046 wr_bp->b_flags &= ~B_ERROR; 26047 26048 #if defined(__i386) || defined(__amd64) 26049 blkno = oblkno + 26050 ((wr_bp->b_bcount - dma_resid) / 26051 un->un_tgt_blocksize); 26052 nblk = dma_resid / un->un_tgt_blocksize; 26053 26054 if (wr_pktp) { 26055 /* Partial DMA transfers after initial transfer */ 26056 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26057 blkno, nblk); 26058 } else { 26059 /* Initial transfer */ 26060 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26061 un->un_pkt_flags, NULL_FUNC, NULL, 26062 blkno, nblk); 26063 } 26064 #else 26065 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26066 0, NULL_FUNC, NULL, blkno, nblk); 26067 #endif 26068 26069 if (rval == 0) { 26070 /* We were given a SCSI packet, continue. */ 26071 break; 26072 } 26073 26074 if (i == 0) { 26075 if (wr_bp->b_flags & B_ERROR) { 26076 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26077 "no resources for dumping; " 26078 "error code: 0x%x, retrying", 26079 geterror(wr_bp)); 26080 } else { 26081 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26082 "no resources for dumping; retrying"); 26083 } 26084 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26085 if (wr_bp->b_flags & B_ERROR) { 26086 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26087 "no resources for dumping; error code: " 26088 "0x%x, retrying\n", geterror(wr_bp)); 26089 } 26090 } else { 26091 if (wr_bp->b_flags & B_ERROR) { 26092 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26093 "no resources for dumping; " 26094 "error code: 0x%x, retries failed, " 26095 "giving up.\n", geterror(wr_bp)); 26096 } else { 26097 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26098 "no resources for dumping; " 26099 "retries failed, giving up.\n"); 26100 } 26101 mutex_enter(SD_MUTEX(un)); 26102 Restore_state(un); 26103 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26104 mutex_exit(SD_MUTEX(un)); 26105 scsi_free_consistent_buf(wr_bp); 26106 } else { 26107 mutex_exit(SD_MUTEX(un)); 26108 } 26109 return (EIO); 26110 } 26111 drv_usecwait(10000); 26112 } 26113 26114 #if defined(__i386) || defined(__amd64) 26115 /* 26116 * save the resid from PARTIAL_DMA 26117 */ 26118 dma_resid = wr_pktp->pkt_resid; 26119 if (dma_resid != 0) 26120 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26121 wr_pktp->pkt_resid = 0; 26122 #endif 26123 26124 /* SunBug 1222170 */ 26125 wr_pktp->pkt_flags = FLAG_NOINTR; 26126 26127 err = EIO; 26128 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26129 26130 /* 26131 * Scsi_poll returns 0 (success) if the command completes and 26132 * the status block is STATUS_GOOD. We should only check 26133 * errors if this condition is not true. Even then we should 26134 * send our own request sense packet only if we have a check 26135 * condition and auto request sense has not been performed by 26136 * the hba. 26137 */ 26138 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26139 26140 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26141 (wr_pktp->pkt_resid == 0)) { 26142 err = SD_SUCCESS; 26143 break; 26144 } 26145 26146 /* 26147 * Check CMD_DEV_GONE 1st, give up if device is gone. 26148 */ 26149 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26150 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26151 "Device is gone\n"); 26152 break; 26153 } 26154 26155 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26156 SD_INFO(SD_LOG_DUMP, un, 26157 "sddump: write failed with CHECK, try # %d\n", i); 26158 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26159 (void) sd_send_polled_RQS(un); 26160 } 26161 26162 continue; 26163 } 26164 26165 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26166 int reset_retval = 0; 26167 26168 SD_INFO(SD_LOG_DUMP, un, 26169 "sddump: write failed with BUSY, try # %d\n", i); 26170 26171 if (un->un_f_lun_reset_enabled == TRUE) { 26172 reset_retval = scsi_reset(SD_ADDRESS(un), 26173 RESET_LUN); 26174 } 26175 if (reset_retval == 0) { 26176 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26177 } 26178 (void) sd_send_polled_RQS(un); 26179 26180 } else { 26181 SD_INFO(SD_LOG_DUMP, un, 26182 "sddump: write failed with 0x%x, try # %d\n", 26183 SD_GET_PKT_STATUS(wr_pktp), i); 26184 mutex_enter(SD_MUTEX(un)); 26185 sd_reset_target(un, wr_pktp); 26186 mutex_exit(SD_MUTEX(un)); 26187 } 26188 26189 /* 26190 * If we are not getting anywhere with lun/target resets, 26191 * let's reset the bus. 26192 */ 26193 if (i == SD_NDUMP_RETRIES/2) { 26194 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26195 (void) sd_send_polled_RQS(un); 26196 } 26197 26198 } 26199 #if defined(__i386) || defined(__amd64) 26200 } /* dma_resid */ 26201 #endif 26202 26203 scsi_destroy_pkt(wr_pktp); 26204 mutex_enter(SD_MUTEX(un)); 26205 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26206 mutex_exit(SD_MUTEX(un)); 26207 scsi_free_consistent_buf(wr_bp); 26208 } else { 26209 mutex_exit(SD_MUTEX(un)); 26210 } 26211 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26212 return (err); 26213 } 26214 26215 /* 26216 * Function: sd_scsi_poll() 26217 * 26218 * Description: This is a wrapper for the scsi_poll call. 26219 * 26220 * Arguments: sd_lun - The unit structure 26221 * scsi_pkt - The scsi packet being sent to the device. 26222 * 26223 * Return Code: 0 - Command completed successfully with good status 26224 * -1 - Command failed. This could indicate a check condition 26225 * or other status value requiring recovery action. 26226 * 26227 */ 26228 26229 static int 26230 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26231 { 26232 int status; 26233 26234 ASSERT(un != NULL); 26235 ASSERT(!mutex_owned(SD_MUTEX(un))); 26236 ASSERT(pktp != NULL); 26237 26238 status = SD_SUCCESS; 26239 26240 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26241 pktp->pkt_flags |= un->un_tagflags; 26242 pktp->pkt_flags &= ~FLAG_NODISCON; 26243 } 26244 26245 status = sd_ddi_scsi_poll(pktp); 26246 /* 26247 * Scsi_poll returns 0 (success) if the command completes and the 26248 * status block is STATUS_GOOD. We should only check errors if this 26249 * condition is not true. Even then we should send our own request 26250 * sense packet only if we have a check condition and auto 26251 * request sense has not been performed by the hba. 26252 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26253 */ 26254 if ((status != SD_SUCCESS) && 26255 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26256 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26257 (pktp->pkt_reason != CMD_DEV_GONE)) 26258 (void) sd_send_polled_RQS(un); 26259 26260 return (status); 26261 } 26262 26263 /* 26264 * Function: sd_send_polled_RQS() 26265 * 26266 * Description: This sends the request sense command to a device. 26267 * 26268 * Arguments: sd_lun - The unit structure 26269 * 26270 * Return Code: 0 - Command completed successfully with good status 26271 * -1 - Command failed. 26272 * 26273 */ 26274 26275 static int 26276 sd_send_polled_RQS(struct sd_lun *un) 26277 { 26278 int ret_val; 26279 struct scsi_pkt *rqs_pktp; 26280 struct buf *rqs_bp; 26281 26282 ASSERT(un != NULL); 26283 ASSERT(!mutex_owned(SD_MUTEX(un))); 26284 26285 ret_val = SD_SUCCESS; 26286 26287 rqs_pktp = un->un_rqs_pktp; 26288 rqs_bp = un->un_rqs_bp; 26289 26290 mutex_enter(SD_MUTEX(un)); 26291 26292 if (un->un_sense_isbusy) { 26293 ret_val = SD_FAILURE; 26294 mutex_exit(SD_MUTEX(un)); 26295 return (ret_val); 26296 } 26297 26298 /* 26299 * If the request sense buffer (and packet) is not in use, 26300 * let's set the un_sense_isbusy and send our packet 26301 */ 26302 un->un_sense_isbusy = 1; 26303 rqs_pktp->pkt_resid = 0; 26304 rqs_pktp->pkt_reason = 0; 26305 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26306 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26307 26308 mutex_exit(SD_MUTEX(un)); 26309 26310 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26311 " 0x%p\n", rqs_bp->b_un.b_addr); 26312 26313 /* 26314 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26315 * axle - it has a call into us! 26316 */ 26317 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26318 SD_INFO(SD_LOG_COMMON, un, 26319 "sd_send_polled_RQS: RQS failed\n"); 26320 } 26321 26322 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26323 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26324 26325 mutex_enter(SD_MUTEX(un)); 26326 un->un_sense_isbusy = 0; 26327 mutex_exit(SD_MUTEX(un)); 26328 26329 return (ret_val); 26330 } 26331 26332 /* 26333 * Defines needed for localized version of the scsi_poll routine. 26334 */ 26335 #define SD_CSEC 10000 /* usecs */ 26336 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 26337 26338 26339 /* 26340 * Function: sd_ddi_scsi_poll() 26341 * 26342 * Description: Localized version of the scsi_poll routine. The purpose is to 26343 * send a scsi_pkt to a device as a polled command. This version 26344 * is to ensure more robust handling of transport errors. 26345 * Specifically this routine cures not ready, coming ready 26346 * transition for power up and reset of sonoma's. This can take 26347 * up to 45 seconds for power-on and 20 seconds for reset of a 26348 * sonoma lun. 26349 * 26350 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26351 * 26352 * Return Code: 0 - Command completed successfully with good status 26353 * -1 - Command failed. 26354 * 26355 */ 26356 26357 static int 26358 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26359 { 26360 int busy_count; 26361 int timeout; 26362 int rval = SD_FAILURE; 26363 int savef; 26364 struct scsi_extended_sense *sensep; 26365 long savet; 26366 void (*savec)(); 26367 /* 26368 * The following is defined in machdep.c and is used in determining if 26369 * the scsi transport system will do polled I/O instead of interrupt 26370 * I/O when called from xx_dump(). 26371 */ 26372 extern int do_polled_io; 26373 26374 /* 26375 * save old flags in pkt, to restore at end 26376 */ 26377 savef = pkt->pkt_flags; 26378 savec = pkt->pkt_comp; 26379 savet = pkt->pkt_time; 26380 26381 pkt->pkt_flags |= FLAG_NOINTR; 26382 26383 /* 26384 * XXX there is nothing in the SCSA spec that states that we should not 26385 * do a callback for polled cmds; however, removing this will break sd 26386 * and probably other target drivers 26387 */ 26388 pkt->pkt_comp = NULL; 26389 26390 /* 26391 * we don't like a polled command without timeout. 26392 * 60 seconds seems long enough. 26393 */ 26394 if (pkt->pkt_time == 0) { 26395 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26396 } 26397 26398 /* 26399 * Send polled cmd. 26400 * 26401 * We do some error recovery for various errors. Tran_busy, 26402 * queue full, and non-dispatched commands are retried every 10 msec. 26403 * as they are typically transient failures. Busy status and Not 26404 * Ready are retried every second as this status takes a while to 26405 * change. Unit attention is retried for pkt_time (60) times 26406 * with no delay. 26407 */ 26408 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 26409 26410 for (busy_count = 0; busy_count < timeout; busy_count++) { 26411 int rc; 26412 int poll_delay; 26413 26414 /* 26415 * Initialize pkt status variables. 26416 */ 26417 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26418 26419 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26420 if (rc != TRAN_BUSY) { 26421 /* Transport failed - give up. */ 26422 break; 26423 } else { 26424 /* Transport busy - try again. */ 26425 poll_delay = 1 * SD_CSEC; /* 10 msec */ 26426 } 26427 } else { 26428 /* 26429 * Transport accepted - check pkt status. 26430 */ 26431 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26432 if (pkt->pkt_reason == CMD_CMPLT && 26433 rc == STATUS_CHECK && 26434 pkt->pkt_state & STATE_ARQ_DONE) { 26435 struct scsi_arq_status *arqstat = 26436 (struct scsi_arq_status *)(pkt->pkt_scbp); 26437 26438 sensep = &arqstat->sts_sensedata; 26439 } else { 26440 sensep = NULL; 26441 } 26442 26443 if ((pkt->pkt_reason == CMD_CMPLT) && 26444 (rc == STATUS_GOOD)) { 26445 /* No error - we're done */ 26446 rval = SD_SUCCESS; 26447 break; 26448 26449 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26450 /* Lost connection - give up */ 26451 break; 26452 26453 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26454 (pkt->pkt_state == 0)) { 26455 /* Pkt not dispatched - try again. */ 26456 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26457 26458 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26459 (rc == STATUS_QFULL)) { 26460 /* Queue full - try again. */ 26461 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26462 26463 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26464 (rc == STATUS_BUSY)) { 26465 /* Busy - try again. */ 26466 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26467 busy_count += (SD_SEC_TO_CSEC - 1); 26468 26469 } else if ((sensep != NULL) && 26470 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26471 /* Unit Attention - try again */ 26472 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26473 continue; 26474 26475 } else if ((sensep != NULL) && 26476 (sensep->es_key == KEY_NOT_READY) && 26477 (sensep->es_add_code == 0x04) && 26478 (sensep->es_qual_code == 0x01)) { 26479 /* Not ready -> ready - try again. */ 26480 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26481 busy_count += (SD_SEC_TO_CSEC - 1); 26482 26483 } else { 26484 /* BAD status - give up. */ 26485 break; 26486 } 26487 } 26488 26489 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26490 !do_polled_io) { 26491 delay(drv_usectohz(poll_delay)); 26492 } else { 26493 /* we busy wait during cpr_dump or interrupt threads */ 26494 drv_usecwait(poll_delay); 26495 } 26496 } 26497 26498 pkt->pkt_flags = savef; 26499 pkt->pkt_comp = savec; 26500 pkt->pkt_time = savet; 26501 return (rval); 26502 } 26503 26504 26505 /* 26506 * Function: sd_persistent_reservation_in_read_keys 26507 * 26508 * Description: This routine is the driver entry point for handling CD-ROM 26509 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26510 * by sending the SCSI-3 PRIN commands to the device. 26511 * Processes the read keys command response by copying the 26512 * reservation key information into the user provided buffer. 26513 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26514 * 26515 * Arguments: un - Pointer to soft state struct for the target. 26516 * usrp - user provided pointer to multihost Persistent In Read 26517 * Keys structure (mhioc_inkeys_t) 26518 * flag - this argument is a pass through to ddi_copyxxx() 26519 * directly from the mode argument of ioctl(). 26520 * 26521 * Return Code: 0 - Success 26522 * EACCES 26523 * ENOTSUP 26524 * errno return code from sd_send_scsi_cmd() 26525 * 26526 * Context: Can sleep. Does not return until command is completed. 26527 */ 26528 26529 static int 26530 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26531 mhioc_inkeys_t *usrp, int flag) 26532 { 26533 #ifdef _MULTI_DATAMODEL 26534 struct mhioc_key_list32 li32; 26535 #endif 26536 sd_prin_readkeys_t *in; 26537 mhioc_inkeys_t *ptr; 26538 mhioc_key_list_t li; 26539 uchar_t *data_bufp; 26540 int data_len; 26541 int rval; 26542 size_t copysz; 26543 26544 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26545 return (EINVAL); 26546 } 26547 bzero(&li, sizeof (mhioc_key_list_t)); 26548 26549 /* 26550 * Get the listsize from user 26551 */ 26552 #ifdef _MULTI_DATAMODEL 26553 26554 switch (ddi_model_convert_from(flag & FMODELS)) { 26555 case DDI_MODEL_ILP32: 26556 copysz = sizeof (struct mhioc_key_list32); 26557 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26558 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26559 "sd_persistent_reservation_in_read_keys: " 26560 "failed ddi_copyin: mhioc_key_list32_t\n"); 26561 rval = EFAULT; 26562 goto done; 26563 } 26564 li.listsize = li32.listsize; 26565 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26566 break; 26567 26568 case DDI_MODEL_NONE: 26569 copysz = sizeof (mhioc_key_list_t); 26570 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26571 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26572 "sd_persistent_reservation_in_read_keys: " 26573 "failed ddi_copyin: mhioc_key_list_t\n"); 26574 rval = EFAULT; 26575 goto done; 26576 } 26577 break; 26578 } 26579 26580 #else /* ! _MULTI_DATAMODEL */ 26581 copysz = sizeof (mhioc_key_list_t); 26582 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26583 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26584 "sd_persistent_reservation_in_read_keys: " 26585 "failed ddi_copyin: mhioc_key_list_t\n"); 26586 rval = EFAULT; 26587 goto done; 26588 } 26589 #endif 26590 26591 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26592 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26593 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26594 26595 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26596 data_len, data_bufp)) != 0) { 26597 goto done; 26598 } 26599 in = (sd_prin_readkeys_t *)data_bufp; 26600 ptr->generation = BE_32(in->generation); 26601 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26602 26603 /* 26604 * Return the min(listsize, listlen) keys 26605 */ 26606 #ifdef _MULTI_DATAMODEL 26607 26608 switch (ddi_model_convert_from(flag & FMODELS)) { 26609 case DDI_MODEL_ILP32: 26610 li32.listlen = li.listlen; 26611 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26612 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26613 "sd_persistent_reservation_in_read_keys: " 26614 "failed ddi_copyout: mhioc_key_list32_t\n"); 26615 rval = EFAULT; 26616 goto done; 26617 } 26618 break; 26619 26620 case DDI_MODEL_NONE: 26621 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26622 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26623 "sd_persistent_reservation_in_read_keys: " 26624 "failed ddi_copyout: mhioc_key_list_t\n"); 26625 rval = EFAULT; 26626 goto done; 26627 } 26628 break; 26629 } 26630 26631 #else /* ! _MULTI_DATAMODEL */ 26632 26633 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26634 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26635 "sd_persistent_reservation_in_read_keys: " 26636 "failed ddi_copyout: mhioc_key_list_t\n"); 26637 rval = EFAULT; 26638 goto done; 26639 } 26640 26641 #endif /* _MULTI_DATAMODEL */ 26642 26643 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26644 li.listsize * MHIOC_RESV_KEY_SIZE); 26645 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26646 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26647 "sd_persistent_reservation_in_read_keys: " 26648 "failed ddi_copyout: keylist\n"); 26649 rval = EFAULT; 26650 } 26651 done: 26652 kmem_free(data_bufp, data_len); 26653 return (rval); 26654 } 26655 26656 26657 /* 26658 * Function: sd_persistent_reservation_in_read_resv 26659 * 26660 * Description: This routine is the driver entry point for handling CD-ROM 26661 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26662 * by sending the SCSI-3 PRIN commands to the device. 26663 * Process the read persistent reservations command response by 26664 * copying the reservation information into the user provided 26665 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26666 * 26667 * Arguments: un - Pointer to soft state struct for the target. 26668 * usrp - user provided pointer to multihost Persistent In Read 26669 * Keys structure (mhioc_inkeys_t) 26670 * flag - this argument is a pass through to ddi_copyxxx() 26671 * directly from the mode argument of ioctl(). 26672 * 26673 * Return Code: 0 - Success 26674 * EACCES 26675 * ENOTSUP 26676 * errno return code from sd_send_scsi_cmd() 26677 * 26678 * Context: Can sleep. Does not return until command is completed. 26679 */ 26680 26681 static int 26682 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26683 mhioc_inresvs_t *usrp, int flag) 26684 { 26685 #ifdef _MULTI_DATAMODEL 26686 struct mhioc_resv_desc_list32 resvlist32; 26687 #endif 26688 sd_prin_readresv_t *in; 26689 mhioc_inresvs_t *ptr; 26690 sd_readresv_desc_t *readresv_ptr; 26691 mhioc_resv_desc_list_t resvlist; 26692 mhioc_resv_desc_t resvdesc; 26693 uchar_t *data_bufp; 26694 int data_len; 26695 int rval; 26696 int i; 26697 size_t copysz; 26698 mhioc_resv_desc_t *bufp; 26699 26700 if ((ptr = usrp) == NULL) { 26701 return (EINVAL); 26702 } 26703 26704 /* 26705 * Get the listsize from user 26706 */ 26707 #ifdef _MULTI_DATAMODEL 26708 switch (ddi_model_convert_from(flag & FMODELS)) { 26709 case DDI_MODEL_ILP32: 26710 copysz = sizeof (struct mhioc_resv_desc_list32); 26711 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26712 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26713 "sd_persistent_reservation_in_read_resv: " 26714 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26715 rval = EFAULT; 26716 goto done; 26717 } 26718 resvlist.listsize = resvlist32.listsize; 26719 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26720 break; 26721 26722 case DDI_MODEL_NONE: 26723 copysz = sizeof (mhioc_resv_desc_list_t); 26724 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26725 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26726 "sd_persistent_reservation_in_read_resv: " 26727 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26728 rval = EFAULT; 26729 goto done; 26730 } 26731 break; 26732 } 26733 #else /* ! _MULTI_DATAMODEL */ 26734 copysz = sizeof (mhioc_resv_desc_list_t); 26735 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26736 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26737 "sd_persistent_reservation_in_read_resv: " 26738 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26739 rval = EFAULT; 26740 goto done; 26741 } 26742 #endif /* ! _MULTI_DATAMODEL */ 26743 26744 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26745 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26746 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26747 26748 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26749 data_len, data_bufp)) != 0) { 26750 goto done; 26751 } 26752 in = (sd_prin_readresv_t *)data_bufp; 26753 ptr->generation = BE_32(in->generation); 26754 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26755 26756 /* 26757 * Return the min(listsize, listlen( keys 26758 */ 26759 #ifdef _MULTI_DATAMODEL 26760 26761 switch (ddi_model_convert_from(flag & FMODELS)) { 26762 case DDI_MODEL_ILP32: 26763 resvlist32.listlen = resvlist.listlen; 26764 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26765 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26766 "sd_persistent_reservation_in_read_resv: " 26767 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26768 rval = EFAULT; 26769 goto done; 26770 } 26771 break; 26772 26773 case DDI_MODEL_NONE: 26774 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26775 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26776 "sd_persistent_reservation_in_read_resv: " 26777 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26778 rval = EFAULT; 26779 goto done; 26780 } 26781 break; 26782 } 26783 26784 #else /* ! _MULTI_DATAMODEL */ 26785 26786 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26787 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26788 "sd_persistent_reservation_in_read_resv: " 26789 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26790 rval = EFAULT; 26791 goto done; 26792 } 26793 26794 #endif /* ! _MULTI_DATAMODEL */ 26795 26796 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26797 bufp = resvlist.list; 26798 copysz = sizeof (mhioc_resv_desc_t); 26799 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26800 i++, readresv_ptr++, bufp++) { 26801 26802 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26803 MHIOC_RESV_KEY_SIZE); 26804 resvdesc.type = readresv_ptr->type; 26805 resvdesc.scope = readresv_ptr->scope; 26806 resvdesc.scope_specific_addr = 26807 BE_32(readresv_ptr->scope_specific_addr); 26808 26809 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26810 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26811 "sd_persistent_reservation_in_read_resv: " 26812 "failed ddi_copyout: resvlist\n"); 26813 rval = EFAULT; 26814 goto done; 26815 } 26816 } 26817 done: 26818 kmem_free(data_bufp, data_len); 26819 return (rval); 26820 } 26821 26822 26823 /* 26824 * Function: sr_change_blkmode() 26825 * 26826 * Description: This routine is the driver entry point for handling CD-ROM 26827 * block mode ioctl requests. Support for returning and changing 26828 * the current block size in use by the device is implemented. The 26829 * LBA size is changed via a MODE SELECT Block Descriptor. 26830 * 26831 * This routine issues a mode sense with an allocation length of 26832 * 12 bytes for the mode page header and a single block descriptor. 26833 * 26834 * Arguments: dev - the device 'dev_t' 26835 * cmd - the request type; one of CDROMGBLKMODE (get) or 26836 * CDROMSBLKMODE (set) 26837 * data - current block size or requested block size 26838 * flag - this argument is a pass through to ddi_copyxxx() directly 26839 * from the mode argument of ioctl(). 26840 * 26841 * Return Code: the code returned by sd_send_scsi_cmd() 26842 * EINVAL if invalid arguments are provided 26843 * EFAULT if ddi_copyxxx() fails 26844 * ENXIO if fail ddi_get_soft_state 26845 * EIO if invalid mode sense block descriptor length 26846 * 26847 */ 26848 26849 static int 26850 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26851 { 26852 struct sd_lun *un = NULL; 26853 struct mode_header *sense_mhp, *select_mhp; 26854 struct block_descriptor *sense_desc, *select_desc; 26855 int current_bsize; 26856 int rval = EINVAL; 26857 uchar_t *sense = NULL; 26858 uchar_t *select = NULL; 26859 26860 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26861 26862 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26863 return (ENXIO); 26864 } 26865 26866 /* 26867 * The block length is changed via the Mode Select block descriptor, the 26868 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26869 * required as part of this routine. Therefore the mode sense allocation 26870 * length is specified to be the length of a mode page header and a 26871 * block descriptor. 26872 */ 26873 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26874 26875 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26876 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26878 "sr_change_blkmode: Mode Sense Failed\n"); 26879 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26880 return (rval); 26881 } 26882 26883 /* Check the block descriptor len to handle only 1 block descriptor */ 26884 sense_mhp = (struct mode_header *)sense; 26885 if ((sense_mhp->bdesc_length == 0) || 26886 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26887 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26888 "sr_change_blkmode: Mode Sense returned invalid block" 26889 " descriptor length\n"); 26890 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26891 return (EIO); 26892 } 26893 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26894 current_bsize = ((sense_desc->blksize_hi << 16) | 26895 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26896 26897 /* Process command */ 26898 switch (cmd) { 26899 case CDROMGBLKMODE: 26900 /* Return the block size obtained during the mode sense */ 26901 if (ddi_copyout(¤t_bsize, (void *)data, 26902 sizeof (int), flag) != 0) 26903 rval = EFAULT; 26904 break; 26905 case CDROMSBLKMODE: 26906 /* Validate the requested block size */ 26907 switch (data) { 26908 case CDROM_BLK_512: 26909 case CDROM_BLK_1024: 26910 case CDROM_BLK_2048: 26911 case CDROM_BLK_2056: 26912 case CDROM_BLK_2336: 26913 case CDROM_BLK_2340: 26914 case CDROM_BLK_2352: 26915 case CDROM_BLK_2368: 26916 case CDROM_BLK_2448: 26917 case CDROM_BLK_2646: 26918 case CDROM_BLK_2647: 26919 break; 26920 default: 26921 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26922 "sr_change_blkmode: " 26923 "Block Size '%ld' Not Supported\n", data); 26924 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26925 return (EINVAL); 26926 } 26927 26928 /* 26929 * The current block size matches the requested block size so 26930 * there is no need to send the mode select to change the size 26931 */ 26932 if (current_bsize == data) { 26933 break; 26934 } 26935 26936 /* Build the select data for the requested block size */ 26937 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26938 select_mhp = (struct mode_header *)select; 26939 select_desc = 26940 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26941 /* 26942 * The LBA size is changed via the block descriptor, so the 26943 * descriptor is built according to the user data 26944 */ 26945 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26946 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26947 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26948 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26949 26950 /* Send the mode select for the requested block size */ 26951 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26952 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26953 SD_PATH_STANDARD)) != 0) { 26954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26955 "sr_change_blkmode: Mode Select Failed\n"); 26956 /* 26957 * The mode select failed for the requested block size, 26958 * so reset the data for the original block size and 26959 * send it to the target. The error is indicated by the 26960 * return value for the failed mode select. 26961 */ 26962 select_desc->blksize_hi = sense_desc->blksize_hi; 26963 select_desc->blksize_mid = sense_desc->blksize_mid; 26964 select_desc->blksize_lo = sense_desc->blksize_lo; 26965 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26966 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26967 SD_PATH_STANDARD); 26968 } else { 26969 ASSERT(!mutex_owned(SD_MUTEX(un))); 26970 mutex_enter(SD_MUTEX(un)); 26971 sd_update_block_info(un, (uint32_t)data, 0); 26972 26973 mutex_exit(SD_MUTEX(un)); 26974 } 26975 break; 26976 default: 26977 /* should not reach here, but check anyway */ 26978 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26979 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26980 rval = EINVAL; 26981 break; 26982 } 26983 26984 if (select) { 26985 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26986 } 26987 if (sense) { 26988 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26989 } 26990 return (rval); 26991 } 26992 26993 26994 /* 26995 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26996 * implement driver support for getting and setting the CD speed. The command 26997 * set used will be based on the device type. If the device has not been 26998 * identified as MMC the Toshiba vendor specific mode page will be used. If 26999 * the device is MMC but does not support the Real Time Streaming feature 27000 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27001 * be used to read the speed. 27002 */ 27003 27004 /* 27005 * Function: sr_change_speed() 27006 * 27007 * Description: This routine is the driver entry point for handling CD-ROM 27008 * drive speed ioctl requests for devices supporting the Toshiba 27009 * vendor specific drive speed mode page. Support for returning 27010 * and changing the current drive speed in use by the device is 27011 * implemented. 27012 * 27013 * Arguments: dev - the device 'dev_t' 27014 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27015 * CDROMSDRVSPEED (set) 27016 * data - current drive speed or requested drive speed 27017 * flag - this argument is a pass through to ddi_copyxxx() directly 27018 * from the mode argument of ioctl(). 27019 * 27020 * Return Code: the code returned by sd_send_scsi_cmd() 27021 * EINVAL if invalid arguments are provided 27022 * EFAULT if ddi_copyxxx() fails 27023 * ENXIO if fail ddi_get_soft_state 27024 * EIO if invalid mode sense block descriptor length 27025 */ 27026 27027 static int 27028 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27029 { 27030 struct sd_lun *un = NULL; 27031 struct mode_header *sense_mhp, *select_mhp; 27032 struct mode_speed *sense_page, *select_page; 27033 int current_speed; 27034 int rval = EINVAL; 27035 int bd_len; 27036 uchar_t *sense = NULL; 27037 uchar_t *select = NULL; 27038 27039 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27040 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27041 return (ENXIO); 27042 } 27043 27044 /* 27045 * Note: The drive speed is being modified here according to a Toshiba 27046 * vendor specific mode page (0x31). 27047 */ 27048 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27049 27050 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 27051 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27052 SD_PATH_STANDARD)) != 0) { 27053 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27054 "sr_change_speed: Mode Sense Failed\n"); 27055 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27056 return (rval); 27057 } 27058 sense_mhp = (struct mode_header *)sense; 27059 27060 /* Check the block descriptor len to handle only 1 block descriptor */ 27061 bd_len = sense_mhp->bdesc_length; 27062 if (bd_len > MODE_BLK_DESC_LENGTH) { 27063 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27064 "sr_change_speed: Mode Sense returned invalid block " 27065 "descriptor length\n"); 27066 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27067 return (EIO); 27068 } 27069 27070 sense_page = (struct mode_speed *) 27071 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27072 current_speed = sense_page->speed; 27073 27074 /* Process command */ 27075 switch (cmd) { 27076 case CDROMGDRVSPEED: 27077 /* Return the drive speed obtained during the mode sense */ 27078 if (current_speed == 0x2) { 27079 current_speed = CDROM_TWELVE_SPEED; 27080 } 27081 if (ddi_copyout(¤t_speed, (void *)data, 27082 sizeof (int), flag) != 0) { 27083 rval = EFAULT; 27084 } 27085 break; 27086 case CDROMSDRVSPEED: 27087 /* Validate the requested drive speed */ 27088 switch ((uchar_t)data) { 27089 case CDROM_TWELVE_SPEED: 27090 data = 0x2; 27091 /*FALLTHROUGH*/ 27092 case CDROM_NORMAL_SPEED: 27093 case CDROM_DOUBLE_SPEED: 27094 case CDROM_QUAD_SPEED: 27095 case CDROM_MAXIMUM_SPEED: 27096 break; 27097 default: 27098 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27099 "sr_change_speed: " 27100 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27101 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27102 return (EINVAL); 27103 } 27104 27105 /* 27106 * The current drive speed matches the requested drive speed so 27107 * there is no need to send the mode select to change the speed 27108 */ 27109 if (current_speed == data) { 27110 break; 27111 } 27112 27113 /* Build the select data for the requested drive speed */ 27114 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27115 select_mhp = (struct mode_header *)select; 27116 select_mhp->bdesc_length = 0; 27117 select_page = 27118 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27119 select_page = 27120 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27121 select_page->mode_page.code = CDROM_MODE_SPEED; 27122 select_page->mode_page.length = 2; 27123 select_page->speed = (uchar_t)data; 27124 27125 /* Send the mode select for the requested block size */ 27126 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 27127 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27128 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 27129 /* 27130 * The mode select failed for the requested drive speed, 27131 * so reset the data for the original drive speed and 27132 * send it to the target. The error is indicated by the 27133 * return value for the failed mode select. 27134 */ 27135 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27136 "sr_drive_speed: Mode Select Failed\n"); 27137 select_page->speed = sense_page->speed; 27138 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 27139 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27140 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27141 } 27142 break; 27143 default: 27144 /* should not reach here, but check anyway */ 27145 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27146 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27147 rval = EINVAL; 27148 break; 27149 } 27150 27151 if (select) { 27152 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27153 } 27154 if (sense) { 27155 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27156 } 27157 27158 return (rval); 27159 } 27160 27161 27162 /* 27163 * Function: sr_atapi_change_speed() 27164 * 27165 * Description: This routine is the driver entry point for handling CD-ROM 27166 * drive speed ioctl requests for MMC devices that do not support 27167 * the Real Time Streaming feature (0x107). 27168 * 27169 * Note: This routine will use the SET SPEED command which may not 27170 * be supported by all devices. 27171 * 27172 * Arguments: dev- the device 'dev_t' 27173 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27174 * CDROMSDRVSPEED (set) 27175 * data- current drive speed or requested drive speed 27176 * flag- this argument is a pass through to ddi_copyxxx() directly 27177 * from the mode argument of ioctl(). 27178 * 27179 * Return Code: the code returned by sd_send_scsi_cmd() 27180 * EINVAL if invalid arguments are provided 27181 * EFAULT if ddi_copyxxx() fails 27182 * ENXIO if fail ddi_get_soft_state 27183 * EIO if invalid mode sense block descriptor length 27184 */ 27185 27186 static int 27187 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27188 { 27189 struct sd_lun *un; 27190 struct uscsi_cmd *com = NULL; 27191 struct mode_header_grp2 *sense_mhp; 27192 uchar_t *sense_page; 27193 uchar_t *sense = NULL; 27194 char cdb[CDB_GROUP5]; 27195 int bd_len; 27196 int current_speed = 0; 27197 int max_speed = 0; 27198 int rval; 27199 27200 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27201 27202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27203 return (ENXIO); 27204 } 27205 27206 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27207 27208 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 27209 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27210 SD_PATH_STANDARD)) != 0) { 27211 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27212 "sr_atapi_change_speed: Mode Sense Failed\n"); 27213 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27214 return (rval); 27215 } 27216 27217 /* Check the block descriptor len to handle only 1 block descriptor */ 27218 sense_mhp = (struct mode_header_grp2 *)sense; 27219 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27220 if (bd_len > MODE_BLK_DESC_LENGTH) { 27221 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27222 "sr_atapi_change_speed: Mode Sense returned invalid " 27223 "block descriptor length\n"); 27224 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27225 return (EIO); 27226 } 27227 27228 /* Calculate the current and maximum drive speeds */ 27229 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27230 current_speed = (sense_page[14] << 8) | sense_page[15]; 27231 max_speed = (sense_page[8] << 8) | sense_page[9]; 27232 27233 /* Process the command */ 27234 switch (cmd) { 27235 case CDROMGDRVSPEED: 27236 current_speed /= SD_SPEED_1X; 27237 if (ddi_copyout(¤t_speed, (void *)data, 27238 sizeof (int), flag) != 0) 27239 rval = EFAULT; 27240 break; 27241 case CDROMSDRVSPEED: 27242 /* Convert the speed code to KB/sec */ 27243 switch ((uchar_t)data) { 27244 case CDROM_NORMAL_SPEED: 27245 current_speed = SD_SPEED_1X; 27246 break; 27247 case CDROM_DOUBLE_SPEED: 27248 current_speed = 2 * SD_SPEED_1X; 27249 break; 27250 case CDROM_QUAD_SPEED: 27251 current_speed = 4 * SD_SPEED_1X; 27252 break; 27253 case CDROM_TWELVE_SPEED: 27254 current_speed = 12 * SD_SPEED_1X; 27255 break; 27256 case CDROM_MAXIMUM_SPEED: 27257 current_speed = 0xffff; 27258 break; 27259 default: 27260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27261 "sr_atapi_change_speed: invalid drive speed %d\n", 27262 (uchar_t)data); 27263 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27264 return (EINVAL); 27265 } 27266 27267 /* Check the request against the drive's max speed. */ 27268 if (current_speed != 0xffff) { 27269 if (current_speed > max_speed) { 27270 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27271 return (EINVAL); 27272 } 27273 } 27274 27275 /* 27276 * Build and send the SET SPEED command 27277 * 27278 * Note: The SET SPEED (0xBB) command used in this routine is 27279 * obsolete per the SCSI MMC spec but still supported in the 27280 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27281 * therefore the command is still implemented in this routine. 27282 */ 27283 bzero(cdb, sizeof (cdb)); 27284 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27285 cdb[2] = (uchar_t)(current_speed >> 8); 27286 cdb[3] = (uchar_t)current_speed; 27287 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27288 com->uscsi_cdb = (caddr_t)cdb; 27289 com->uscsi_cdblen = CDB_GROUP5; 27290 com->uscsi_bufaddr = NULL; 27291 com->uscsi_buflen = 0; 27292 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27293 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 27294 UIO_SYSSPACE, SD_PATH_STANDARD); 27295 break; 27296 default: 27297 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27298 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27299 rval = EINVAL; 27300 } 27301 27302 if (sense) { 27303 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27304 } 27305 if (com) { 27306 kmem_free(com, sizeof (*com)); 27307 } 27308 return (rval); 27309 } 27310 27311 27312 /* 27313 * Function: sr_pause_resume() 27314 * 27315 * Description: This routine is the driver entry point for handling CD-ROM 27316 * pause/resume ioctl requests. This only affects the audio play 27317 * operation. 27318 * 27319 * Arguments: dev - the device 'dev_t' 27320 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27321 * for setting the resume bit of the cdb. 27322 * 27323 * Return Code: the code returned by sd_send_scsi_cmd() 27324 * EINVAL if invalid mode specified 27325 * 27326 */ 27327 27328 static int 27329 sr_pause_resume(dev_t dev, int cmd) 27330 { 27331 struct sd_lun *un; 27332 struct uscsi_cmd *com; 27333 char cdb[CDB_GROUP1]; 27334 int rval; 27335 27336 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27337 return (ENXIO); 27338 } 27339 27340 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27341 bzero(cdb, CDB_GROUP1); 27342 cdb[0] = SCMD_PAUSE_RESUME; 27343 switch (cmd) { 27344 case CDROMRESUME: 27345 cdb[8] = 1; 27346 break; 27347 case CDROMPAUSE: 27348 cdb[8] = 0; 27349 break; 27350 default: 27351 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27352 " Command '%x' Not Supported\n", cmd); 27353 rval = EINVAL; 27354 goto done; 27355 } 27356 27357 com->uscsi_cdb = cdb; 27358 com->uscsi_cdblen = CDB_GROUP1; 27359 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27360 27361 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27362 UIO_SYSSPACE, SD_PATH_STANDARD); 27363 27364 done: 27365 kmem_free(com, sizeof (*com)); 27366 return (rval); 27367 } 27368 27369 27370 /* 27371 * Function: sr_play_msf() 27372 * 27373 * Description: This routine is the driver entry point for handling CD-ROM 27374 * ioctl requests to output the audio signals at the specified 27375 * starting address and continue the audio play until the specified 27376 * ending address (CDROMPLAYMSF) The address is in Minute Second 27377 * Frame (MSF) format. 27378 * 27379 * Arguments: dev - the device 'dev_t' 27380 * data - pointer to user provided audio msf structure, 27381 * specifying start/end addresses. 27382 * flag - this argument is a pass through to ddi_copyxxx() 27383 * directly from the mode argument of ioctl(). 27384 * 27385 * Return Code: the code returned by sd_send_scsi_cmd() 27386 * EFAULT if ddi_copyxxx() fails 27387 * ENXIO if fail ddi_get_soft_state 27388 * EINVAL if data pointer is NULL 27389 */ 27390 27391 static int 27392 sr_play_msf(dev_t dev, caddr_t data, int flag) 27393 { 27394 struct sd_lun *un; 27395 struct uscsi_cmd *com; 27396 struct cdrom_msf msf_struct; 27397 struct cdrom_msf *msf = &msf_struct; 27398 char cdb[CDB_GROUP1]; 27399 int rval; 27400 27401 if (data == NULL) { 27402 return (EINVAL); 27403 } 27404 27405 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27406 return (ENXIO); 27407 } 27408 27409 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27410 return (EFAULT); 27411 } 27412 27413 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27414 bzero(cdb, CDB_GROUP1); 27415 cdb[0] = SCMD_PLAYAUDIO_MSF; 27416 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27417 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27418 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27419 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27420 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27421 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27422 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27423 } else { 27424 cdb[3] = msf->cdmsf_min0; 27425 cdb[4] = msf->cdmsf_sec0; 27426 cdb[5] = msf->cdmsf_frame0; 27427 cdb[6] = msf->cdmsf_min1; 27428 cdb[7] = msf->cdmsf_sec1; 27429 cdb[8] = msf->cdmsf_frame1; 27430 } 27431 com->uscsi_cdb = cdb; 27432 com->uscsi_cdblen = CDB_GROUP1; 27433 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27434 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27435 UIO_SYSSPACE, SD_PATH_STANDARD); 27436 kmem_free(com, sizeof (*com)); 27437 return (rval); 27438 } 27439 27440 27441 /* 27442 * Function: sr_play_trkind() 27443 * 27444 * Description: This routine is the driver entry point for handling CD-ROM 27445 * ioctl requests to output the audio signals at the specified 27446 * starting address and continue the audio play until the specified 27447 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27448 * format. 27449 * 27450 * Arguments: dev - the device 'dev_t' 27451 * data - pointer to user provided audio track/index structure, 27452 * specifying start/end addresses. 27453 * flag - this argument is a pass through to ddi_copyxxx() 27454 * directly from the mode argument of ioctl(). 27455 * 27456 * Return Code: the code returned by sd_send_scsi_cmd() 27457 * EFAULT if ddi_copyxxx() fails 27458 * ENXIO if fail ddi_get_soft_state 27459 * EINVAL if data pointer is NULL 27460 */ 27461 27462 static int 27463 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27464 { 27465 struct cdrom_ti ti_struct; 27466 struct cdrom_ti *ti = &ti_struct; 27467 struct uscsi_cmd *com = NULL; 27468 char cdb[CDB_GROUP1]; 27469 int rval; 27470 27471 if (data == NULL) { 27472 return (EINVAL); 27473 } 27474 27475 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27476 return (EFAULT); 27477 } 27478 27479 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27480 bzero(cdb, CDB_GROUP1); 27481 cdb[0] = SCMD_PLAYAUDIO_TI; 27482 cdb[4] = ti->cdti_trk0; 27483 cdb[5] = ti->cdti_ind0; 27484 cdb[7] = ti->cdti_trk1; 27485 cdb[8] = ti->cdti_ind1; 27486 com->uscsi_cdb = cdb; 27487 com->uscsi_cdblen = CDB_GROUP1; 27488 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27489 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27490 UIO_SYSSPACE, SD_PATH_STANDARD); 27491 kmem_free(com, sizeof (*com)); 27492 return (rval); 27493 } 27494 27495 27496 /* 27497 * Function: sr_read_all_subcodes() 27498 * 27499 * Description: This routine is the driver entry point for handling CD-ROM 27500 * ioctl requests to return raw subcode data while the target is 27501 * playing audio (CDROMSUBCODE). 27502 * 27503 * Arguments: dev - the device 'dev_t' 27504 * data - pointer to user provided cdrom subcode structure, 27505 * specifying the transfer length and address. 27506 * flag - this argument is a pass through to ddi_copyxxx() 27507 * directly from the mode argument of ioctl(). 27508 * 27509 * Return Code: the code returned by sd_send_scsi_cmd() 27510 * EFAULT if ddi_copyxxx() fails 27511 * ENXIO if fail ddi_get_soft_state 27512 * EINVAL if data pointer is NULL 27513 */ 27514 27515 static int 27516 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27517 { 27518 struct sd_lun *un = NULL; 27519 struct uscsi_cmd *com = NULL; 27520 struct cdrom_subcode *subcode = NULL; 27521 int rval; 27522 size_t buflen; 27523 char cdb[CDB_GROUP5]; 27524 27525 #ifdef _MULTI_DATAMODEL 27526 /* To support ILP32 applications in an LP64 world */ 27527 struct cdrom_subcode32 cdrom_subcode32; 27528 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27529 #endif 27530 if (data == NULL) { 27531 return (EINVAL); 27532 } 27533 27534 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27535 return (ENXIO); 27536 } 27537 27538 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27539 27540 #ifdef _MULTI_DATAMODEL 27541 switch (ddi_model_convert_from(flag & FMODELS)) { 27542 case DDI_MODEL_ILP32: 27543 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27544 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27545 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27546 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27547 return (EFAULT); 27548 } 27549 /* Convert the ILP32 uscsi data from the application to LP64 */ 27550 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27551 break; 27552 case DDI_MODEL_NONE: 27553 if (ddi_copyin(data, subcode, 27554 sizeof (struct cdrom_subcode), flag)) { 27555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27556 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27557 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27558 return (EFAULT); 27559 } 27560 break; 27561 } 27562 #else /* ! _MULTI_DATAMODEL */ 27563 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27564 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27565 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27566 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27567 return (EFAULT); 27568 } 27569 #endif /* _MULTI_DATAMODEL */ 27570 27571 /* 27572 * Since MMC-2 expects max 3 bytes for length, check if the 27573 * length input is greater than 3 bytes 27574 */ 27575 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27577 "sr_read_all_subcodes: " 27578 "cdrom transfer length too large: %d (limit %d)\n", 27579 subcode->cdsc_length, 0xFFFFFF); 27580 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27581 return (EINVAL); 27582 } 27583 27584 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27585 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27586 bzero(cdb, CDB_GROUP5); 27587 27588 if (un->un_f_mmc_cap == TRUE) { 27589 cdb[0] = (char)SCMD_READ_CD; 27590 cdb[2] = (char)0xff; 27591 cdb[3] = (char)0xff; 27592 cdb[4] = (char)0xff; 27593 cdb[5] = (char)0xff; 27594 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27595 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27596 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27597 cdb[10] = 1; 27598 } else { 27599 /* 27600 * Note: A vendor specific command (0xDF) is being used her to 27601 * request a read of all subcodes. 27602 */ 27603 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27604 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27605 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27606 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27607 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27608 } 27609 com->uscsi_cdb = cdb; 27610 com->uscsi_cdblen = CDB_GROUP5; 27611 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27612 com->uscsi_buflen = buflen; 27613 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27614 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27615 UIO_SYSSPACE, SD_PATH_STANDARD); 27616 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27617 kmem_free(com, sizeof (*com)); 27618 return (rval); 27619 } 27620 27621 27622 /* 27623 * Function: sr_read_subchannel() 27624 * 27625 * Description: This routine is the driver entry point for handling CD-ROM 27626 * ioctl requests to return the Q sub-channel data of the CD 27627 * current position block. (CDROMSUBCHNL) The data includes the 27628 * track number, index number, absolute CD-ROM address (LBA or MSF 27629 * format per the user) , track relative CD-ROM address (LBA or MSF 27630 * format per the user), control data and audio status. 27631 * 27632 * Arguments: dev - the device 'dev_t' 27633 * data - pointer to user provided cdrom sub-channel structure 27634 * flag - this argument is a pass through to ddi_copyxxx() 27635 * directly from the mode argument of ioctl(). 27636 * 27637 * Return Code: the code returned by sd_send_scsi_cmd() 27638 * EFAULT if ddi_copyxxx() fails 27639 * ENXIO if fail ddi_get_soft_state 27640 * EINVAL if data pointer is NULL 27641 */ 27642 27643 static int 27644 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27645 { 27646 struct sd_lun *un; 27647 struct uscsi_cmd *com; 27648 struct cdrom_subchnl subchanel; 27649 struct cdrom_subchnl *subchnl = &subchanel; 27650 char cdb[CDB_GROUP1]; 27651 caddr_t buffer; 27652 int rval; 27653 27654 if (data == NULL) { 27655 return (EINVAL); 27656 } 27657 27658 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27659 (un->un_state == SD_STATE_OFFLINE)) { 27660 return (ENXIO); 27661 } 27662 27663 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27664 return (EFAULT); 27665 } 27666 27667 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27668 bzero(cdb, CDB_GROUP1); 27669 cdb[0] = SCMD_READ_SUBCHANNEL; 27670 /* Set the MSF bit based on the user requested address format */ 27671 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27672 /* 27673 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27674 * returned 27675 */ 27676 cdb[2] = 0x40; 27677 /* 27678 * Set byte 3 to specify the return data format. A value of 0x01 27679 * indicates that the CD-ROM current position should be returned. 27680 */ 27681 cdb[3] = 0x01; 27682 cdb[8] = 0x10; 27683 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27684 com->uscsi_cdb = cdb; 27685 com->uscsi_cdblen = CDB_GROUP1; 27686 com->uscsi_bufaddr = buffer; 27687 com->uscsi_buflen = 16; 27688 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27689 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27690 UIO_SYSSPACE, SD_PATH_STANDARD); 27691 if (rval != 0) { 27692 kmem_free(buffer, 16); 27693 kmem_free(com, sizeof (*com)); 27694 return (rval); 27695 } 27696 27697 /* Process the returned Q sub-channel data */ 27698 subchnl->cdsc_audiostatus = buffer[1]; 27699 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27700 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27701 subchnl->cdsc_trk = buffer[6]; 27702 subchnl->cdsc_ind = buffer[7]; 27703 if (subchnl->cdsc_format & CDROM_LBA) { 27704 subchnl->cdsc_absaddr.lba = 27705 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27706 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27707 subchnl->cdsc_reladdr.lba = 27708 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27709 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27710 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27711 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27712 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27713 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27714 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27715 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27716 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27717 } else { 27718 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27719 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27720 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27721 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27722 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27723 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27724 } 27725 kmem_free(buffer, 16); 27726 kmem_free(com, sizeof (*com)); 27727 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27728 != 0) { 27729 return (EFAULT); 27730 } 27731 return (rval); 27732 } 27733 27734 27735 /* 27736 * Function: sr_read_tocentry() 27737 * 27738 * Description: This routine is the driver entry point for handling CD-ROM 27739 * ioctl requests to read from the Table of Contents (TOC) 27740 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27741 * fields, the starting address (LBA or MSF format per the user) 27742 * and the data mode if the user specified track is a data track. 27743 * 27744 * Note: The READ HEADER (0x44) command used in this routine is 27745 * obsolete per the SCSI MMC spec but still supported in the 27746 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27747 * therefore the command is still implemented in this routine. 27748 * 27749 * Arguments: dev - the device 'dev_t' 27750 * data - pointer to user provided toc entry structure, 27751 * specifying the track # and the address format 27752 * (LBA or MSF). 27753 * flag - this argument is a pass through to ddi_copyxxx() 27754 * directly from the mode argument of ioctl(). 27755 * 27756 * Return Code: the code returned by sd_send_scsi_cmd() 27757 * EFAULT if ddi_copyxxx() fails 27758 * ENXIO if fail ddi_get_soft_state 27759 * EINVAL if data pointer is NULL 27760 */ 27761 27762 static int 27763 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27764 { 27765 struct sd_lun *un = NULL; 27766 struct uscsi_cmd *com; 27767 struct cdrom_tocentry toc_entry; 27768 struct cdrom_tocentry *entry = &toc_entry; 27769 caddr_t buffer; 27770 int rval; 27771 char cdb[CDB_GROUP1]; 27772 27773 if (data == NULL) { 27774 return (EINVAL); 27775 } 27776 27777 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27778 (un->un_state == SD_STATE_OFFLINE)) { 27779 return (ENXIO); 27780 } 27781 27782 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27783 return (EFAULT); 27784 } 27785 27786 /* Validate the requested track and address format */ 27787 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27788 return (EINVAL); 27789 } 27790 27791 if (entry->cdte_track == 0) { 27792 return (EINVAL); 27793 } 27794 27795 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27796 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27797 bzero(cdb, CDB_GROUP1); 27798 27799 cdb[0] = SCMD_READ_TOC; 27800 /* Set the MSF bit based on the user requested address format */ 27801 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27802 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27803 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27804 } else { 27805 cdb[6] = entry->cdte_track; 27806 } 27807 27808 /* 27809 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27810 * (4 byte TOC response header + 8 byte track descriptor) 27811 */ 27812 cdb[8] = 12; 27813 com->uscsi_cdb = cdb; 27814 com->uscsi_cdblen = CDB_GROUP1; 27815 com->uscsi_bufaddr = buffer; 27816 com->uscsi_buflen = 0x0C; 27817 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27818 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27819 UIO_SYSSPACE, SD_PATH_STANDARD); 27820 if (rval != 0) { 27821 kmem_free(buffer, 12); 27822 kmem_free(com, sizeof (*com)); 27823 return (rval); 27824 } 27825 27826 /* Process the toc entry */ 27827 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27828 entry->cdte_ctrl = (buffer[5] & 0x0F); 27829 if (entry->cdte_format & CDROM_LBA) { 27830 entry->cdte_addr.lba = 27831 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27832 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27833 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27834 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27835 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27836 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27837 /* 27838 * Send a READ TOC command using the LBA address format to get 27839 * the LBA for the track requested so it can be used in the 27840 * READ HEADER request 27841 * 27842 * Note: The MSF bit of the READ HEADER command specifies the 27843 * output format. The block address specified in that command 27844 * must be in LBA format. 27845 */ 27846 cdb[1] = 0; 27847 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27848 UIO_SYSSPACE, SD_PATH_STANDARD); 27849 if (rval != 0) { 27850 kmem_free(buffer, 12); 27851 kmem_free(com, sizeof (*com)); 27852 return (rval); 27853 } 27854 } else { 27855 entry->cdte_addr.msf.minute = buffer[9]; 27856 entry->cdte_addr.msf.second = buffer[10]; 27857 entry->cdte_addr.msf.frame = buffer[11]; 27858 /* 27859 * Send a READ TOC command using the LBA address format to get 27860 * the LBA for the track requested so it can be used in the 27861 * READ HEADER request 27862 * 27863 * Note: The MSF bit of the READ HEADER command specifies the 27864 * output format. The block address specified in that command 27865 * must be in LBA format. 27866 */ 27867 cdb[1] = 0; 27868 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27869 UIO_SYSSPACE, SD_PATH_STANDARD); 27870 if (rval != 0) { 27871 kmem_free(buffer, 12); 27872 kmem_free(com, sizeof (*com)); 27873 return (rval); 27874 } 27875 } 27876 27877 /* 27878 * Build and send the READ HEADER command to determine the data mode of 27879 * the user specified track. 27880 */ 27881 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27882 (entry->cdte_track != CDROM_LEADOUT)) { 27883 bzero(cdb, CDB_GROUP1); 27884 cdb[0] = SCMD_READ_HEADER; 27885 cdb[2] = buffer[8]; 27886 cdb[3] = buffer[9]; 27887 cdb[4] = buffer[10]; 27888 cdb[5] = buffer[11]; 27889 cdb[8] = 0x08; 27890 com->uscsi_buflen = 0x08; 27891 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27892 UIO_SYSSPACE, SD_PATH_STANDARD); 27893 if (rval == 0) { 27894 entry->cdte_datamode = buffer[0]; 27895 } else { 27896 /* 27897 * READ HEADER command failed, since this is 27898 * obsoleted in one spec, its better to return 27899 * -1 for an invlid track so that we can still 27900 * recieve the rest of the TOC data. 27901 */ 27902 entry->cdte_datamode = (uchar_t)-1; 27903 } 27904 } else { 27905 entry->cdte_datamode = (uchar_t)-1; 27906 } 27907 27908 kmem_free(buffer, 12); 27909 kmem_free(com, sizeof (*com)); 27910 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27911 return (EFAULT); 27912 27913 return (rval); 27914 } 27915 27916 27917 /* 27918 * Function: sr_read_tochdr() 27919 * 27920 * Description: This routine is the driver entry point for handling CD-ROM 27921 * ioctl requests to read the Table of Contents (TOC) header 27922 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27923 * and ending track numbers 27924 * 27925 * Arguments: dev - the device 'dev_t' 27926 * data - pointer to user provided toc header structure, 27927 * specifying the starting and ending track numbers. 27928 * flag - this argument is a pass through to ddi_copyxxx() 27929 * directly from the mode argument of ioctl(). 27930 * 27931 * Return Code: the code returned by sd_send_scsi_cmd() 27932 * EFAULT if ddi_copyxxx() fails 27933 * ENXIO if fail ddi_get_soft_state 27934 * EINVAL if data pointer is NULL 27935 */ 27936 27937 static int 27938 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27939 { 27940 struct sd_lun *un; 27941 struct uscsi_cmd *com; 27942 struct cdrom_tochdr toc_header; 27943 struct cdrom_tochdr *hdr = &toc_header; 27944 char cdb[CDB_GROUP1]; 27945 int rval; 27946 caddr_t buffer; 27947 27948 if (data == NULL) { 27949 return (EINVAL); 27950 } 27951 27952 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27953 (un->un_state == SD_STATE_OFFLINE)) { 27954 return (ENXIO); 27955 } 27956 27957 buffer = kmem_zalloc(4, KM_SLEEP); 27958 bzero(cdb, CDB_GROUP1); 27959 cdb[0] = SCMD_READ_TOC; 27960 /* 27961 * Specifying a track number of 0x00 in the READ TOC command indicates 27962 * that the TOC header should be returned 27963 */ 27964 cdb[6] = 0x00; 27965 /* 27966 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27967 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27968 */ 27969 cdb[8] = 0x04; 27970 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27971 com->uscsi_cdb = cdb; 27972 com->uscsi_cdblen = CDB_GROUP1; 27973 com->uscsi_bufaddr = buffer; 27974 com->uscsi_buflen = 0x04; 27975 com->uscsi_timeout = 300; 27976 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27977 27978 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27979 UIO_SYSSPACE, SD_PATH_STANDARD); 27980 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27981 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27982 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27983 } else { 27984 hdr->cdth_trk0 = buffer[2]; 27985 hdr->cdth_trk1 = buffer[3]; 27986 } 27987 kmem_free(buffer, 4); 27988 kmem_free(com, sizeof (*com)); 27989 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27990 return (EFAULT); 27991 } 27992 return (rval); 27993 } 27994 27995 27996 /* 27997 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27998 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27999 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28000 * digital audio and extended architecture digital audio. These modes are 28001 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28002 * MMC specs. 28003 * 28004 * In addition to support for the various data formats these routines also 28005 * include support for devices that implement only the direct access READ 28006 * commands (0x08, 0x28), devices that implement the READ_CD commands 28007 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28008 * READ CDXA commands (0xD8, 0xDB) 28009 */ 28010 28011 /* 28012 * Function: sr_read_mode1() 28013 * 28014 * Description: This routine is the driver entry point for handling CD-ROM 28015 * ioctl read mode1 requests (CDROMREADMODE1). 28016 * 28017 * Arguments: dev - the device 'dev_t' 28018 * data - pointer to user provided cd read structure specifying 28019 * the lba buffer address and length. 28020 * flag - this argument is a pass through to ddi_copyxxx() 28021 * directly from the mode argument of ioctl(). 28022 * 28023 * Return Code: the code returned by sd_send_scsi_cmd() 28024 * EFAULT if ddi_copyxxx() fails 28025 * ENXIO if fail ddi_get_soft_state 28026 * EINVAL if data pointer is NULL 28027 */ 28028 28029 static int 28030 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28031 { 28032 struct sd_lun *un; 28033 struct cdrom_read mode1_struct; 28034 struct cdrom_read *mode1 = &mode1_struct; 28035 int rval; 28036 #ifdef _MULTI_DATAMODEL 28037 /* To support ILP32 applications in an LP64 world */ 28038 struct cdrom_read32 cdrom_read32; 28039 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28040 #endif /* _MULTI_DATAMODEL */ 28041 28042 if (data == NULL) { 28043 return (EINVAL); 28044 } 28045 28046 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28047 (un->un_state == SD_STATE_OFFLINE)) { 28048 return (ENXIO); 28049 } 28050 28051 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28052 "sd_read_mode1: entry: un:0x%p\n", un); 28053 28054 #ifdef _MULTI_DATAMODEL 28055 switch (ddi_model_convert_from(flag & FMODELS)) { 28056 case DDI_MODEL_ILP32: 28057 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28058 return (EFAULT); 28059 } 28060 /* Convert the ILP32 uscsi data from the application to LP64 */ 28061 cdrom_read32tocdrom_read(cdrd32, mode1); 28062 break; 28063 case DDI_MODEL_NONE: 28064 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28065 return (EFAULT); 28066 } 28067 } 28068 #else /* ! _MULTI_DATAMODEL */ 28069 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28070 return (EFAULT); 28071 } 28072 #endif /* _MULTI_DATAMODEL */ 28073 28074 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 28075 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28076 28077 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28078 "sd_read_mode1: exit: un:0x%p\n", un); 28079 28080 return (rval); 28081 } 28082 28083 28084 /* 28085 * Function: sr_read_cd_mode2() 28086 * 28087 * Description: This routine is the driver entry point for handling CD-ROM 28088 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28089 * support the READ CD (0xBE) command or the 1st generation 28090 * READ CD (0xD4) command. 28091 * 28092 * Arguments: dev - the device 'dev_t' 28093 * data - pointer to user provided cd read structure specifying 28094 * the lba buffer address and length. 28095 * flag - this argument is a pass through to ddi_copyxxx() 28096 * directly from the mode argument of ioctl(). 28097 * 28098 * Return Code: the code returned by sd_send_scsi_cmd() 28099 * EFAULT if ddi_copyxxx() fails 28100 * ENXIO if fail ddi_get_soft_state 28101 * EINVAL if data pointer is NULL 28102 */ 28103 28104 static int 28105 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28106 { 28107 struct sd_lun *un; 28108 struct uscsi_cmd *com; 28109 struct cdrom_read mode2_struct; 28110 struct cdrom_read *mode2 = &mode2_struct; 28111 uchar_t cdb[CDB_GROUP5]; 28112 int nblocks; 28113 int rval; 28114 #ifdef _MULTI_DATAMODEL 28115 /* To support ILP32 applications in an LP64 world */ 28116 struct cdrom_read32 cdrom_read32; 28117 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28118 #endif /* _MULTI_DATAMODEL */ 28119 28120 if (data == NULL) { 28121 return (EINVAL); 28122 } 28123 28124 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28125 (un->un_state == SD_STATE_OFFLINE)) { 28126 return (ENXIO); 28127 } 28128 28129 #ifdef _MULTI_DATAMODEL 28130 switch (ddi_model_convert_from(flag & FMODELS)) { 28131 case DDI_MODEL_ILP32: 28132 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28133 return (EFAULT); 28134 } 28135 /* Convert the ILP32 uscsi data from the application to LP64 */ 28136 cdrom_read32tocdrom_read(cdrd32, mode2); 28137 break; 28138 case DDI_MODEL_NONE: 28139 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28140 return (EFAULT); 28141 } 28142 break; 28143 } 28144 28145 #else /* ! _MULTI_DATAMODEL */ 28146 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28147 return (EFAULT); 28148 } 28149 #endif /* _MULTI_DATAMODEL */ 28150 28151 bzero(cdb, sizeof (cdb)); 28152 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28153 /* Read command supported by 1st generation atapi drives */ 28154 cdb[0] = SCMD_READ_CDD4; 28155 } else { 28156 /* Universal CD Access Command */ 28157 cdb[0] = SCMD_READ_CD; 28158 } 28159 28160 /* 28161 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28162 */ 28163 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28164 28165 /* set the start address */ 28166 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28167 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28168 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28169 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28170 28171 /* set the transfer length */ 28172 nblocks = mode2->cdread_buflen / 2336; 28173 cdb[6] = (uchar_t)(nblocks >> 16); 28174 cdb[7] = (uchar_t)(nblocks >> 8); 28175 cdb[8] = (uchar_t)nblocks; 28176 28177 /* set the filter bits */ 28178 cdb[9] = CDROM_READ_CD_USERDATA; 28179 28180 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28181 com->uscsi_cdb = (caddr_t)cdb; 28182 com->uscsi_cdblen = sizeof (cdb); 28183 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28184 com->uscsi_buflen = mode2->cdread_buflen; 28185 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28186 28187 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28188 UIO_SYSSPACE, SD_PATH_STANDARD); 28189 kmem_free(com, sizeof (*com)); 28190 return (rval); 28191 } 28192 28193 28194 /* 28195 * Function: sr_read_mode2() 28196 * 28197 * Description: This routine is the driver entry point for handling CD-ROM 28198 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28199 * do not support the READ CD (0xBE) command. 28200 * 28201 * Arguments: dev - the device 'dev_t' 28202 * data - pointer to user provided cd read structure specifying 28203 * the lba buffer address and length. 28204 * flag - this argument is a pass through to ddi_copyxxx() 28205 * directly from the mode argument of ioctl(). 28206 * 28207 * Return Code: the code returned by sd_send_scsi_cmd() 28208 * EFAULT if ddi_copyxxx() fails 28209 * ENXIO if fail ddi_get_soft_state 28210 * EINVAL if data pointer is NULL 28211 * EIO if fail to reset block size 28212 * EAGAIN if commands are in progress in the driver 28213 */ 28214 28215 static int 28216 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28217 { 28218 struct sd_lun *un; 28219 struct cdrom_read mode2_struct; 28220 struct cdrom_read *mode2 = &mode2_struct; 28221 int rval; 28222 uint32_t restore_blksize; 28223 struct uscsi_cmd *com; 28224 uchar_t cdb[CDB_GROUP0]; 28225 int nblocks; 28226 28227 #ifdef _MULTI_DATAMODEL 28228 /* To support ILP32 applications in an LP64 world */ 28229 struct cdrom_read32 cdrom_read32; 28230 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28231 #endif /* _MULTI_DATAMODEL */ 28232 28233 if (data == NULL) { 28234 return (EINVAL); 28235 } 28236 28237 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28238 (un->un_state == SD_STATE_OFFLINE)) { 28239 return (ENXIO); 28240 } 28241 28242 /* 28243 * Because this routine will update the device and driver block size 28244 * being used we want to make sure there are no commands in progress. 28245 * If commands are in progress the user will have to try again. 28246 * 28247 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28248 * in sdioctl to protect commands from sdioctl through to the top of 28249 * sd_uscsi_strategy. See sdioctl for details. 28250 */ 28251 mutex_enter(SD_MUTEX(un)); 28252 if (un->un_ncmds_in_driver != 1) { 28253 mutex_exit(SD_MUTEX(un)); 28254 return (EAGAIN); 28255 } 28256 mutex_exit(SD_MUTEX(un)); 28257 28258 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28259 "sd_read_mode2: entry: un:0x%p\n", un); 28260 28261 #ifdef _MULTI_DATAMODEL 28262 switch (ddi_model_convert_from(flag & FMODELS)) { 28263 case DDI_MODEL_ILP32: 28264 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28265 return (EFAULT); 28266 } 28267 /* Convert the ILP32 uscsi data from the application to LP64 */ 28268 cdrom_read32tocdrom_read(cdrd32, mode2); 28269 break; 28270 case DDI_MODEL_NONE: 28271 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28272 return (EFAULT); 28273 } 28274 break; 28275 } 28276 #else /* ! _MULTI_DATAMODEL */ 28277 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28278 return (EFAULT); 28279 } 28280 #endif /* _MULTI_DATAMODEL */ 28281 28282 /* Store the current target block size for restoration later */ 28283 restore_blksize = un->un_tgt_blocksize; 28284 28285 /* Change the device and soft state target block size to 2336 */ 28286 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28287 rval = EIO; 28288 goto done; 28289 } 28290 28291 28292 bzero(cdb, sizeof (cdb)); 28293 28294 /* set READ operation */ 28295 cdb[0] = SCMD_READ; 28296 28297 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28298 mode2->cdread_lba >>= 2; 28299 28300 /* set the start address */ 28301 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28302 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28303 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28304 28305 /* set the transfer length */ 28306 nblocks = mode2->cdread_buflen / 2336; 28307 cdb[4] = (uchar_t)nblocks & 0xFF; 28308 28309 /* build command */ 28310 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28311 com->uscsi_cdb = (caddr_t)cdb; 28312 com->uscsi_cdblen = sizeof (cdb); 28313 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28314 com->uscsi_buflen = mode2->cdread_buflen; 28315 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28316 28317 /* 28318 * Issue SCSI command with user space address for read buffer. 28319 * 28320 * This sends the command through main channel in the driver. 28321 * 28322 * Since this is accessed via an IOCTL call, we go through the 28323 * standard path, so that if the device was powered down, then 28324 * it would be 'awakened' to handle the command. 28325 */ 28326 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28327 UIO_SYSSPACE, SD_PATH_STANDARD); 28328 28329 kmem_free(com, sizeof (*com)); 28330 28331 /* Restore the device and soft state target block size */ 28332 if (sr_sector_mode(dev, restore_blksize) != 0) { 28333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28334 "can't do switch back to mode 1\n"); 28335 /* 28336 * If sd_send_scsi_READ succeeded we still need to report 28337 * an error because we failed to reset the block size 28338 */ 28339 if (rval == 0) { 28340 rval = EIO; 28341 } 28342 } 28343 28344 done: 28345 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28346 "sd_read_mode2: exit: un:0x%p\n", un); 28347 28348 return (rval); 28349 } 28350 28351 28352 /* 28353 * Function: sr_sector_mode() 28354 * 28355 * Description: This utility function is used by sr_read_mode2 to set the target 28356 * block size based on the user specified size. This is a legacy 28357 * implementation based upon a vendor specific mode page 28358 * 28359 * Arguments: dev - the device 'dev_t' 28360 * data - flag indicating if block size is being set to 2336 or 28361 * 512. 28362 * 28363 * Return Code: the code returned by sd_send_scsi_cmd() 28364 * EFAULT if ddi_copyxxx() fails 28365 * ENXIO if fail ddi_get_soft_state 28366 * EINVAL if data pointer is NULL 28367 */ 28368 28369 static int 28370 sr_sector_mode(dev_t dev, uint32_t blksize) 28371 { 28372 struct sd_lun *un; 28373 uchar_t *sense; 28374 uchar_t *select; 28375 int rval; 28376 28377 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28378 (un->un_state == SD_STATE_OFFLINE)) { 28379 return (ENXIO); 28380 } 28381 28382 sense = kmem_zalloc(20, KM_SLEEP); 28383 28384 /* Note: This is a vendor specific mode page (0x81) */ 28385 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 28386 SD_PATH_STANDARD)) != 0) { 28387 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28388 "sr_sector_mode: Mode Sense failed\n"); 28389 kmem_free(sense, 20); 28390 return (rval); 28391 } 28392 select = kmem_zalloc(20, KM_SLEEP); 28393 select[3] = 0x08; 28394 select[10] = ((blksize >> 8) & 0xff); 28395 select[11] = (blksize & 0xff); 28396 select[12] = 0x01; 28397 select[13] = 0x06; 28398 select[14] = sense[14]; 28399 select[15] = sense[15]; 28400 if (blksize == SD_MODE2_BLKSIZE) { 28401 select[14] |= 0x01; 28402 } 28403 28404 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 28405 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 28406 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28407 "sr_sector_mode: Mode Select failed\n"); 28408 } else { 28409 /* 28410 * Only update the softstate block size if we successfully 28411 * changed the device block mode. 28412 */ 28413 mutex_enter(SD_MUTEX(un)); 28414 sd_update_block_info(un, blksize, 0); 28415 mutex_exit(SD_MUTEX(un)); 28416 } 28417 kmem_free(sense, 20); 28418 kmem_free(select, 20); 28419 return (rval); 28420 } 28421 28422 28423 /* 28424 * Function: sr_read_cdda() 28425 * 28426 * Description: This routine is the driver entry point for handling CD-ROM 28427 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28428 * the target supports CDDA these requests are handled via a vendor 28429 * specific command (0xD8) If the target does not support CDDA 28430 * these requests are handled via the READ CD command (0xBE). 28431 * 28432 * Arguments: dev - the device 'dev_t' 28433 * data - pointer to user provided CD-DA structure specifying 28434 * the track starting address, transfer length, and 28435 * subcode options. 28436 * flag - this argument is a pass through to ddi_copyxxx() 28437 * directly from the mode argument of ioctl(). 28438 * 28439 * Return Code: the code returned by sd_send_scsi_cmd() 28440 * EFAULT if ddi_copyxxx() fails 28441 * ENXIO if fail ddi_get_soft_state 28442 * EINVAL if invalid arguments are provided 28443 * ENOTTY 28444 */ 28445 28446 static int 28447 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28448 { 28449 struct sd_lun *un; 28450 struct uscsi_cmd *com; 28451 struct cdrom_cdda *cdda; 28452 int rval; 28453 size_t buflen; 28454 char cdb[CDB_GROUP5]; 28455 28456 #ifdef _MULTI_DATAMODEL 28457 /* To support ILP32 applications in an LP64 world */ 28458 struct cdrom_cdda32 cdrom_cdda32; 28459 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28460 #endif /* _MULTI_DATAMODEL */ 28461 28462 if (data == NULL) { 28463 return (EINVAL); 28464 } 28465 28466 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28467 return (ENXIO); 28468 } 28469 28470 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28471 28472 #ifdef _MULTI_DATAMODEL 28473 switch (ddi_model_convert_from(flag & FMODELS)) { 28474 case DDI_MODEL_ILP32: 28475 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28476 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28477 "sr_read_cdda: ddi_copyin Failed\n"); 28478 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28479 return (EFAULT); 28480 } 28481 /* Convert the ILP32 uscsi data from the application to LP64 */ 28482 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28483 break; 28484 case DDI_MODEL_NONE: 28485 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28486 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28487 "sr_read_cdda: ddi_copyin Failed\n"); 28488 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28489 return (EFAULT); 28490 } 28491 break; 28492 } 28493 #else /* ! _MULTI_DATAMODEL */ 28494 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28495 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28496 "sr_read_cdda: ddi_copyin Failed\n"); 28497 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28498 return (EFAULT); 28499 } 28500 #endif /* _MULTI_DATAMODEL */ 28501 28502 /* 28503 * Since MMC-2 expects max 3 bytes for length, check if the 28504 * length input is greater than 3 bytes 28505 */ 28506 if ((cdda->cdda_length & 0xFF000000) != 0) { 28507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28508 "cdrom transfer length too large: %d (limit %d)\n", 28509 cdda->cdda_length, 0xFFFFFF); 28510 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28511 return (EINVAL); 28512 } 28513 28514 switch (cdda->cdda_subcode) { 28515 case CDROM_DA_NO_SUBCODE: 28516 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28517 break; 28518 case CDROM_DA_SUBQ: 28519 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28520 break; 28521 case CDROM_DA_ALL_SUBCODE: 28522 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28523 break; 28524 case CDROM_DA_SUBCODE_ONLY: 28525 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28526 break; 28527 default: 28528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28529 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28530 cdda->cdda_subcode); 28531 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28532 return (EINVAL); 28533 } 28534 28535 /* Build and send the command */ 28536 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28537 bzero(cdb, CDB_GROUP5); 28538 28539 if (un->un_f_cfg_cdda == TRUE) { 28540 cdb[0] = (char)SCMD_READ_CD; 28541 cdb[1] = 0x04; 28542 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28543 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28544 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28545 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28546 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28547 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28548 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28549 cdb[9] = 0x10; 28550 switch (cdda->cdda_subcode) { 28551 case CDROM_DA_NO_SUBCODE : 28552 cdb[10] = 0x0; 28553 break; 28554 case CDROM_DA_SUBQ : 28555 cdb[10] = 0x2; 28556 break; 28557 case CDROM_DA_ALL_SUBCODE : 28558 cdb[10] = 0x1; 28559 break; 28560 case CDROM_DA_SUBCODE_ONLY : 28561 /* FALLTHROUGH */ 28562 default : 28563 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28564 kmem_free(com, sizeof (*com)); 28565 return (ENOTTY); 28566 } 28567 } else { 28568 cdb[0] = (char)SCMD_READ_CDDA; 28569 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28570 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28571 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28572 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28573 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28574 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28575 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28576 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28577 cdb[10] = cdda->cdda_subcode; 28578 } 28579 28580 com->uscsi_cdb = cdb; 28581 com->uscsi_cdblen = CDB_GROUP5; 28582 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28583 com->uscsi_buflen = buflen; 28584 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28585 28586 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28587 UIO_SYSSPACE, SD_PATH_STANDARD); 28588 28589 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28590 kmem_free(com, sizeof (*com)); 28591 return (rval); 28592 } 28593 28594 28595 /* 28596 * Function: sr_read_cdxa() 28597 * 28598 * Description: This routine is the driver entry point for handling CD-ROM 28599 * ioctl requests to return CD-XA (Extended Architecture) data. 28600 * (CDROMCDXA). 28601 * 28602 * Arguments: dev - the device 'dev_t' 28603 * data - pointer to user provided CD-XA structure specifying 28604 * the data starting address, transfer length, and format 28605 * flag - this argument is a pass through to ddi_copyxxx() 28606 * directly from the mode argument of ioctl(). 28607 * 28608 * Return Code: the code returned by sd_send_scsi_cmd() 28609 * EFAULT if ddi_copyxxx() fails 28610 * ENXIO if fail ddi_get_soft_state 28611 * EINVAL if data pointer is NULL 28612 */ 28613 28614 static int 28615 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28616 { 28617 struct sd_lun *un; 28618 struct uscsi_cmd *com; 28619 struct cdrom_cdxa *cdxa; 28620 int rval; 28621 size_t buflen; 28622 char cdb[CDB_GROUP5]; 28623 uchar_t read_flags; 28624 28625 #ifdef _MULTI_DATAMODEL 28626 /* To support ILP32 applications in an LP64 world */ 28627 struct cdrom_cdxa32 cdrom_cdxa32; 28628 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28629 #endif /* _MULTI_DATAMODEL */ 28630 28631 if (data == NULL) { 28632 return (EINVAL); 28633 } 28634 28635 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28636 return (ENXIO); 28637 } 28638 28639 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28640 28641 #ifdef _MULTI_DATAMODEL 28642 switch (ddi_model_convert_from(flag & FMODELS)) { 28643 case DDI_MODEL_ILP32: 28644 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28645 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28646 return (EFAULT); 28647 } 28648 /* 28649 * Convert the ILP32 uscsi data from the 28650 * application to LP64 for internal use. 28651 */ 28652 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28653 break; 28654 case DDI_MODEL_NONE: 28655 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28656 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28657 return (EFAULT); 28658 } 28659 break; 28660 } 28661 #else /* ! _MULTI_DATAMODEL */ 28662 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28663 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28664 return (EFAULT); 28665 } 28666 #endif /* _MULTI_DATAMODEL */ 28667 28668 /* 28669 * Since MMC-2 expects max 3 bytes for length, check if the 28670 * length input is greater than 3 bytes 28671 */ 28672 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28673 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28674 "cdrom transfer length too large: %d (limit %d)\n", 28675 cdxa->cdxa_length, 0xFFFFFF); 28676 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28677 return (EINVAL); 28678 } 28679 28680 switch (cdxa->cdxa_format) { 28681 case CDROM_XA_DATA: 28682 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28683 read_flags = 0x10; 28684 break; 28685 case CDROM_XA_SECTOR_DATA: 28686 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28687 read_flags = 0xf8; 28688 break; 28689 case CDROM_XA_DATA_W_ERROR: 28690 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28691 read_flags = 0xfc; 28692 break; 28693 default: 28694 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28695 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28696 cdxa->cdxa_format); 28697 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28698 return (EINVAL); 28699 } 28700 28701 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28702 bzero(cdb, CDB_GROUP5); 28703 if (un->un_f_mmc_cap == TRUE) { 28704 cdb[0] = (char)SCMD_READ_CD; 28705 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28706 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28707 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28708 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28709 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28710 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28711 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28712 cdb[9] = (char)read_flags; 28713 } else { 28714 /* 28715 * Note: A vendor specific command (0xDB) is being used her to 28716 * request a read of all subcodes. 28717 */ 28718 cdb[0] = (char)SCMD_READ_CDXA; 28719 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28720 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28721 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28722 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28723 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28724 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28725 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28726 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28727 cdb[10] = cdxa->cdxa_format; 28728 } 28729 com->uscsi_cdb = cdb; 28730 com->uscsi_cdblen = CDB_GROUP5; 28731 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28732 com->uscsi_buflen = buflen; 28733 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28734 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28735 UIO_SYSSPACE, SD_PATH_STANDARD); 28736 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28737 kmem_free(com, sizeof (*com)); 28738 return (rval); 28739 } 28740 28741 28742 /* 28743 * Function: sr_eject() 28744 * 28745 * Description: This routine is the driver entry point for handling CD-ROM 28746 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28747 * 28748 * Arguments: dev - the device 'dev_t' 28749 * 28750 * Return Code: the code returned by sd_send_scsi_cmd() 28751 */ 28752 28753 static int 28754 sr_eject(dev_t dev) 28755 { 28756 struct sd_lun *un; 28757 int rval; 28758 28759 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28760 (un->un_state == SD_STATE_OFFLINE)) { 28761 return (ENXIO); 28762 } 28763 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28764 SD_PATH_STANDARD)) != 0) { 28765 return (rval); 28766 } 28767 28768 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28769 SD_PATH_STANDARD); 28770 28771 if (rval == 0) { 28772 mutex_enter(SD_MUTEX(un)); 28773 sr_ejected(un); 28774 un->un_mediastate = DKIO_EJECTED; 28775 cv_broadcast(&un->un_state_cv); 28776 mutex_exit(SD_MUTEX(un)); 28777 } 28778 return (rval); 28779 } 28780 28781 28782 /* 28783 * Function: sr_ejected() 28784 * 28785 * Description: This routine updates the soft state structure to invalidate the 28786 * geometry information after the media has been ejected or a 28787 * media eject has been detected. 28788 * 28789 * Arguments: un - driver soft state (unit) structure 28790 */ 28791 28792 static void 28793 sr_ejected(struct sd_lun *un) 28794 { 28795 struct sd_errstats *stp; 28796 28797 ASSERT(un != NULL); 28798 ASSERT(mutex_owned(SD_MUTEX(un))); 28799 28800 un->un_f_blockcount_is_valid = FALSE; 28801 un->un_f_tgt_blocksize_is_valid = FALSE; 28802 un->un_f_geometry_is_valid = FALSE; 28803 28804 if (un->un_errstats != NULL) { 28805 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28806 stp->sd_capacity.value.ui64 = 0; 28807 } 28808 } 28809 28810 28811 /* 28812 * Function: sr_check_wp() 28813 * 28814 * Description: This routine checks the write protection of a removable media 28815 * disk via the write protect bit of the Mode Page Header device 28816 * specific field. This routine has been implemented to use the 28817 * error recovery mode page for all device types. 28818 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28819 * 28820 * Arguments: dev - the device 'dev_t' 28821 * 28822 * Return Code: int indicating if the device is write protected (1) or not (0) 28823 * 28824 * Context: Kernel thread. 28825 * 28826 */ 28827 28828 static int 28829 sr_check_wp(dev_t dev) 28830 { 28831 struct sd_lun *un; 28832 uchar_t device_specific; 28833 uchar_t *sense; 28834 int hdrlen; 28835 int rval; 28836 int retry_flag = FALSE; 28837 28838 /* 28839 * Note: The return codes for this routine should be reworked to 28840 * properly handle the case of a NULL softstate. 28841 */ 28842 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28843 return (FALSE); 28844 } 28845 28846 if (un->un_f_cfg_is_atapi == TRUE) { 28847 retry_flag = TRUE; 28848 } 28849 28850 retry: 28851 if (un->un_f_cfg_is_atapi == TRUE) { 28852 /* 28853 * The mode page contents are not required; set the allocation 28854 * length for the mode page header only 28855 */ 28856 hdrlen = MODE_HEADER_LENGTH_GRP2; 28857 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28858 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28859 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28860 device_specific = 28861 ((struct mode_header_grp2 *)sense)->device_specific; 28862 } else { 28863 hdrlen = MODE_HEADER_LENGTH; 28864 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28865 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28866 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28867 device_specific = 28868 ((struct mode_header *)sense)->device_specific; 28869 } 28870 28871 if (rval != 0) { 28872 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28873 /* 28874 * For an Atapi Zip drive, observed the drive 28875 * reporting check condition for the first attempt. 28876 * Sense data indicating power on or bus device/reset. 28877 * Hence in case of failure need to try at least once 28878 * for Atapi devices. 28879 */ 28880 retry_flag = FALSE; 28881 kmem_free(sense, hdrlen); 28882 goto retry; 28883 } else { 28884 /* 28885 * Write protect mode sense failed; not all disks 28886 * understand this query. Return FALSE assuming that 28887 * these devices are not writable. 28888 */ 28889 rval = FALSE; 28890 } 28891 } else { 28892 if (device_specific & WRITE_PROTECT) { 28893 rval = TRUE; 28894 } else { 28895 rval = FALSE; 28896 } 28897 } 28898 kmem_free(sense, hdrlen); 28899 return (rval); 28900 } 28901 28902 28903 /* 28904 * Function: sr_volume_ctrl() 28905 * 28906 * Description: This routine is the driver entry point for handling CD-ROM 28907 * audio output volume ioctl requests. (CDROMVOLCTRL) 28908 * 28909 * Arguments: dev - the device 'dev_t' 28910 * data - pointer to user audio volume control structure 28911 * flag - this argument is a pass through to ddi_copyxxx() 28912 * directly from the mode argument of ioctl(). 28913 * 28914 * Return Code: the code returned by sd_send_scsi_cmd() 28915 * EFAULT if ddi_copyxxx() fails 28916 * ENXIO if fail ddi_get_soft_state 28917 * EINVAL if data pointer is NULL 28918 * 28919 */ 28920 28921 static int 28922 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28923 { 28924 struct sd_lun *un; 28925 struct cdrom_volctrl volume; 28926 struct cdrom_volctrl *vol = &volume; 28927 uchar_t *sense_page; 28928 uchar_t *select_page; 28929 uchar_t *sense; 28930 uchar_t *select; 28931 int sense_buflen; 28932 int select_buflen; 28933 int rval; 28934 28935 if (data == NULL) { 28936 return (EINVAL); 28937 } 28938 28939 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28940 (un->un_state == SD_STATE_OFFLINE)) { 28941 return (ENXIO); 28942 } 28943 28944 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28945 return (EFAULT); 28946 } 28947 28948 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28949 struct mode_header_grp2 *sense_mhp; 28950 struct mode_header_grp2 *select_mhp; 28951 int bd_len; 28952 28953 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28954 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28955 MODEPAGE_AUDIO_CTRL_LEN; 28956 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28957 select = kmem_zalloc(select_buflen, KM_SLEEP); 28958 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28959 sense_buflen, MODEPAGE_AUDIO_CTRL, 28960 SD_PATH_STANDARD)) != 0) { 28961 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28962 "sr_volume_ctrl: Mode Sense Failed\n"); 28963 kmem_free(sense, sense_buflen); 28964 kmem_free(select, select_buflen); 28965 return (rval); 28966 } 28967 sense_mhp = (struct mode_header_grp2 *)sense; 28968 select_mhp = (struct mode_header_grp2 *)select; 28969 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28970 sense_mhp->bdesc_length_lo; 28971 if (bd_len > MODE_BLK_DESC_LENGTH) { 28972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28973 "sr_volume_ctrl: Mode Sense returned invalid " 28974 "block descriptor length\n"); 28975 kmem_free(sense, sense_buflen); 28976 kmem_free(select, select_buflen); 28977 return (EIO); 28978 } 28979 sense_page = (uchar_t *) 28980 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28981 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28982 select_mhp->length_msb = 0; 28983 select_mhp->length_lsb = 0; 28984 select_mhp->bdesc_length_hi = 0; 28985 select_mhp->bdesc_length_lo = 0; 28986 } else { 28987 struct mode_header *sense_mhp, *select_mhp; 28988 28989 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28990 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28991 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28992 select = kmem_zalloc(select_buflen, KM_SLEEP); 28993 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28994 sense_buflen, MODEPAGE_AUDIO_CTRL, 28995 SD_PATH_STANDARD)) != 0) { 28996 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28997 "sr_volume_ctrl: Mode Sense Failed\n"); 28998 kmem_free(sense, sense_buflen); 28999 kmem_free(select, select_buflen); 29000 return (rval); 29001 } 29002 sense_mhp = (struct mode_header *)sense; 29003 select_mhp = (struct mode_header *)select; 29004 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29005 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29006 "sr_volume_ctrl: Mode Sense returned invalid " 29007 "block descriptor length\n"); 29008 kmem_free(sense, sense_buflen); 29009 kmem_free(select, select_buflen); 29010 return (EIO); 29011 } 29012 sense_page = (uchar_t *) 29013 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29014 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29015 select_mhp->length = 0; 29016 select_mhp->bdesc_length = 0; 29017 } 29018 /* 29019 * Note: An audio control data structure could be created and overlayed 29020 * on the following in place of the array indexing method implemented. 29021 */ 29022 29023 /* Build the select data for the user volume data */ 29024 select_page[0] = MODEPAGE_AUDIO_CTRL; 29025 select_page[1] = 0xE; 29026 /* Set the immediate bit */ 29027 select_page[2] = 0x04; 29028 /* Zero out reserved fields */ 29029 select_page[3] = 0x00; 29030 select_page[4] = 0x00; 29031 /* Return sense data for fields not to be modified */ 29032 select_page[5] = sense_page[5]; 29033 select_page[6] = sense_page[6]; 29034 select_page[7] = sense_page[7]; 29035 /* Set the user specified volume levels for channel 0 and 1 */ 29036 select_page[8] = 0x01; 29037 select_page[9] = vol->channel0; 29038 select_page[10] = 0x02; 29039 select_page[11] = vol->channel1; 29040 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29041 select_page[12] = sense_page[12]; 29042 select_page[13] = sense_page[13]; 29043 select_page[14] = sense_page[14]; 29044 select_page[15] = sense_page[15]; 29045 29046 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29047 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 29048 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29049 } else { 29050 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 29051 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29052 } 29053 29054 kmem_free(sense, sense_buflen); 29055 kmem_free(select, select_buflen); 29056 return (rval); 29057 } 29058 29059 29060 /* 29061 * Function: sr_read_sony_session_offset() 29062 * 29063 * Description: This routine is the driver entry point for handling CD-ROM 29064 * ioctl requests for session offset information. (CDROMREADOFFSET) 29065 * The address of the first track in the last session of a 29066 * multi-session CD-ROM is returned 29067 * 29068 * Note: This routine uses a vendor specific key value in the 29069 * command control field without implementing any vendor check here 29070 * or in the ioctl routine. 29071 * 29072 * Arguments: dev - the device 'dev_t' 29073 * data - pointer to an int to hold the requested address 29074 * flag - this argument is a pass through to ddi_copyxxx() 29075 * directly from the mode argument of ioctl(). 29076 * 29077 * Return Code: the code returned by sd_send_scsi_cmd() 29078 * EFAULT if ddi_copyxxx() fails 29079 * ENXIO if fail ddi_get_soft_state 29080 * EINVAL if data pointer is NULL 29081 */ 29082 29083 static int 29084 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29085 { 29086 struct sd_lun *un; 29087 struct uscsi_cmd *com; 29088 caddr_t buffer; 29089 char cdb[CDB_GROUP1]; 29090 int session_offset = 0; 29091 int rval; 29092 29093 if (data == NULL) { 29094 return (EINVAL); 29095 } 29096 29097 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29098 (un->un_state == SD_STATE_OFFLINE)) { 29099 return (ENXIO); 29100 } 29101 29102 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29103 bzero(cdb, CDB_GROUP1); 29104 cdb[0] = SCMD_READ_TOC; 29105 /* 29106 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29107 * (4 byte TOC response header + 8 byte response data) 29108 */ 29109 cdb[8] = SONY_SESSION_OFFSET_LEN; 29110 /* Byte 9 is the control byte. A vendor specific value is used */ 29111 cdb[9] = SONY_SESSION_OFFSET_KEY; 29112 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29113 com->uscsi_cdb = cdb; 29114 com->uscsi_cdblen = CDB_GROUP1; 29115 com->uscsi_bufaddr = buffer; 29116 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29117 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 29118 29119 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 29120 UIO_SYSSPACE, SD_PATH_STANDARD); 29121 if (rval != 0) { 29122 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29123 kmem_free(com, sizeof (*com)); 29124 return (rval); 29125 } 29126 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29127 session_offset = 29128 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29129 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29130 /* 29131 * Offset returned offset in current lbasize block's. Convert to 29132 * 2k block's to return to the user 29133 */ 29134 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29135 session_offset >>= 2; 29136 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29137 session_offset >>= 1; 29138 } 29139 } 29140 29141 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29142 rval = EFAULT; 29143 } 29144 29145 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29146 kmem_free(com, sizeof (*com)); 29147 return (rval); 29148 } 29149 29150 29151 /* 29152 * Function: sd_wm_cache_constructor() 29153 * 29154 * Description: Cache Constructor for the wmap cache for the read/modify/write 29155 * devices. 29156 * 29157 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29158 * un - sd_lun structure for the device. 29159 * flag - the km flags passed to constructor 29160 * 29161 * Return Code: 0 on success. 29162 * -1 on failure. 29163 */ 29164 29165 /*ARGSUSED*/ 29166 static int 29167 sd_wm_cache_constructor(void *wm, void *un, int flags) 29168 { 29169 bzero(wm, sizeof (struct sd_w_map)); 29170 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29171 return (0); 29172 } 29173 29174 29175 /* 29176 * Function: sd_wm_cache_destructor() 29177 * 29178 * Description: Cache destructor for the wmap cache for the read/modify/write 29179 * devices. 29180 * 29181 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29182 * un - sd_lun structure for the device. 29183 */ 29184 /*ARGSUSED*/ 29185 static void 29186 sd_wm_cache_destructor(void *wm, void *un) 29187 { 29188 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29189 } 29190 29191 29192 /* 29193 * Function: sd_range_lock() 29194 * 29195 * Description: Lock the range of blocks specified as parameter to ensure 29196 * that read, modify write is atomic and no other i/o writes 29197 * to the same location. The range is specified in terms 29198 * of start and end blocks. Block numbers are the actual 29199 * media block numbers and not system. 29200 * 29201 * Arguments: un - sd_lun structure for the device. 29202 * startb - The starting block number 29203 * endb - The end block number 29204 * typ - type of i/o - simple/read_modify_write 29205 * 29206 * Return Code: wm - pointer to the wmap structure. 29207 * 29208 * Context: This routine can sleep. 29209 */ 29210 29211 static struct sd_w_map * 29212 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29213 { 29214 struct sd_w_map *wmp = NULL; 29215 struct sd_w_map *sl_wmp = NULL; 29216 struct sd_w_map *tmp_wmp; 29217 wm_state state = SD_WM_CHK_LIST; 29218 29219 29220 ASSERT(un != NULL); 29221 ASSERT(!mutex_owned(SD_MUTEX(un))); 29222 29223 mutex_enter(SD_MUTEX(un)); 29224 29225 while (state != SD_WM_DONE) { 29226 29227 switch (state) { 29228 case SD_WM_CHK_LIST: 29229 /* 29230 * This is the starting state. Check the wmap list 29231 * to see if the range is currently available. 29232 */ 29233 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29234 /* 29235 * If this is a simple write and no rmw 29236 * i/o is pending then try to lock the 29237 * range as the range should be available. 29238 */ 29239 state = SD_WM_LOCK_RANGE; 29240 } else { 29241 tmp_wmp = sd_get_range(un, startb, endb); 29242 if (tmp_wmp != NULL) { 29243 if ((wmp != NULL) && ONLIST(un, wmp)) { 29244 /* 29245 * Should not keep onlist wmps 29246 * while waiting this macro 29247 * will also do wmp = NULL; 29248 */ 29249 FREE_ONLIST_WMAP(un, wmp); 29250 } 29251 /* 29252 * sl_wmp is the wmap on which wait 29253 * is done, since the tmp_wmp points 29254 * to the inuse wmap, set sl_wmp to 29255 * tmp_wmp and change the state to sleep 29256 */ 29257 sl_wmp = tmp_wmp; 29258 state = SD_WM_WAIT_MAP; 29259 } else { 29260 state = SD_WM_LOCK_RANGE; 29261 } 29262 29263 } 29264 break; 29265 29266 case SD_WM_LOCK_RANGE: 29267 ASSERT(un->un_wm_cache); 29268 /* 29269 * The range need to be locked, try to get a wmap. 29270 * First attempt it with NO_SLEEP, want to avoid a sleep 29271 * if possible as we will have to release the sd mutex 29272 * if we have to sleep. 29273 */ 29274 if (wmp == NULL) 29275 wmp = kmem_cache_alloc(un->un_wm_cache, 29276 KM_NOSLEEP); 29277 if (wmp == NULL) { 29278 mutex_exit(SD_MUTEX(un)); 29279 _NOTE(DATA_READABLE_WITHOUT_LOCK 29280 (sd_lun::un_wm_cache)) 29281 wmp = kmem_cache_alloc(un->un_wm_cache, 29282 KM_SLEEP); 29283 mutex_enter(SD_MUTEX(un)); 29284 /* 29285 * we released the mutex so recheck and go to 29286 * check list state. 29287 */ 29288 state = SD_WM_CHK_LIST; 29289 } else { 29290 /* 29291 * We exit out of state machine since we 29292 * have the wmap. Do the housekeeping first. 29293 * place the wmap on the wmap list if it is not 29294 * on it already and then set the state to done. 29295 */ 29296 wmp->wm_start = startb; 29297 wmp->wm_end = endb; 29298 wmp->wm_flags = typ | SD_WM_BUSY; 29299 if (typ & SD_WTYPE_RMW) { 29300 un->un_rmw_count++; 29301 } 29302 /* 29303 * If not already on the list then link 29304 */ 29305 if (!ONLIST(un, wmp)) { 29306 wmp->wm_next = un->un_wm; 29307 wmp->wm_prev = NULL; 29308 if (wmp->wm_next) 29309 wmp->wm_next->wm_prev = wmp; 29310 un->un_wm = wmp; 29311 } 29312 state = SD_WM_DONE; 29313 } 29314 break; 29315 29316 case SD_WM_WAIT_MAP: 29317 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29318 /* 29319 * Wait is done on sl_wmp, which is set in the 29320 * check_list state. 29321 */ 29322 sl_wmp->wm_wanted_count++; 29323 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29324 sl_wmp->wm_wanted_count--; 29325 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 29326 if (wmp != NULL) 29327 CHK_N_FREEWMP(un, wmp); 29328 wmp = sl_wmp; 29329 } 29330 sl_wmp = NULL; 29331 /* 29332 * After waking up, need to recheck for availability of 29333 * range. 29334 */ 29335 state = SD_WM_CHK_LIST; 29336 break; 29337 29338 default: 29339 panic("sd_range_lock: " 29340 "Unknown state %d in sd_range_lock", state); 29341 /*NOTREACHED*/ 29342 } /* switch(state) */ 29343 29344 } /* while(state != SD_WM_DONE) */ 29345 29346 mutex_exit(SD_MUTEX(un)); 29347 29348 ASSERT(wmp != NULL); 29349 29350 return (wmp); 29351 } 29352 29353 29354 /* 29355 * Function: sd_get_range() 29356 * 29357 * Description: Find if there any overlapping I/O to this one 29358 * Returns the write-map of 1st such I/O, NULL otherwise. 29359 * 29360 * Arguments: un - sd_lun structure for the device. 29361 * startb - The starting block number 29362 * endb - The end block number 29363 * 29364 * Return Code: wm - pointer to the wmap structure. 29365 */ 29366 29367 static struct sd_w_map * 29368 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29369 { 29370 struct sd_w_map *wmp; 29371 29372 ASSERT(un != NULL); 29373 29374 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29375 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29376 continue; 29377 } 29378 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29379 break; 29380 } 29381 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29382 break; 29383 } 29384 } 29385 29386 return (wmp); 29387 } 29388 29389 29390 /* 29391 * Function: sd_free_inlist_wmap() 29392 * 29393 * Description: Unlink and free a write map struct. 29394 * 29395 * Arguments: un - sd_lun structure for the device. 29396 * wmp - sd_w_map which needs to be unlinked. 29397 */ 29398 29399 static void 29400 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29401 { 29402 ASSERT(un != NULL); 29403 29404 if (un->un_wm == wmp) { 29405 un->un_wm = wmp->wm_next; 29406 } else { 29407 wmp->wm_prev->wm_next = wmp->wm_next; 29408 } 29409 29410 if (wmp->wm_next) { 29411 wmp->wm_next->wm_prev = wmp->wm_prev; 29412 } 29413 29414 wmp->wm_next = wmp->wm_prev = NULL; 29415 29416 kmem_cache_free(un->un_wm_cache, wmp); 29417 } 29418 29419 29420 /* 29421 * Function: sd_range_unlock() 29422 * 29423 * Description: Unlock the range locked by wm. 29424 * Free write map if nobody else is waiting on it. 29425 * 29426 * Arguments: un - sd_lun structure for the device. 29427 * wmp - sd_w_map which needs to be unlinked. 29428 */ 29429 29430 static void 29431 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29432 { 29433 ASSERT(un != NULL); 29434 ASSERT(wm != NULL); 29435 ASSERT(!mutex_owned(SD_MUTEX(un))); 29436 29437 mutex_enter(SD_MUTEX(un)); 29438 29439 if (wm->wm_flags & SD_WTYPE_RMW) { 29440 un->un_rmw_count--; 29441 } 29442 29443 if (wm->wm_wanted_count) { 29444 wm->wm_flags = 0; 29445 /* 29446 * Broadcast that the wmap is available now. 29447 */ 29448 cv_broadcast(&wm->wm_avail); 29449 } else { 29450 /* 29451 * If no one is waiting on the map, it should be free'ed. 29452 */ 29453 sd_free_inlist_wmap(un, wm); 29454 } 29455 29456 mutex_exit(SD_MUTEX(un)); 29457 } 29458 29459 29460 /* 29461 * Function: sd_read_modify_write_task 29462 * 29463 * Description: Called from a taskq thread to initiate the write phase of 29464 * a read-modify-write request. This is used for targets where 29465 * un->un_sys_blocksize != un->un_tgt_blocksize. 29466 * 29467 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29468 * 29469 * Context: Called under taskq thread context. 29470 */ 29471 29472 static void 29473 sd_read_modify_write_task(void *arg) 29474 { 29475 struct sd_mapblocksize_info *bsp; 29476 struct buf *bp; 29477 struct sd_xbuf *xp; 29478 struct sd_lun *un; 29479 29480 bp = arg; /* The bp is given in arg */ 29481 ASSERT(bp != NULL); 29482 29483 /* Get the pointer to the layer-private data struct */ 29484 xp = SD_GET_XBUF(bp); 29485 ASSERT(xp != NULL); 29486 bsp = xp->xb_private; 29487 ASSERT(bsp != NULL); 29488 29489 un = SD_GET_UN(bp); 29490 ASSERT(un != NULL); 29491 ASSERT(!mutex_owned(SD_MUTEX(un))); 29492 29493 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29494 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29495 29496 /* 29497 * This is the write phase of a read-modify-write request, called 29498 * under the context of a taskq thread in response to the completion 29499 * of the read portion of the rmw request completing under interrupt 29500 * context. The write request must be sent from here down the iostart 29501 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29502 * we use the layer index saved in the layer-private data area. 29503 */ 29504 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29505 29506 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29507 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29508 } 29509 29510 29511 /* 29512 * Function: sddump_do_read_of_rmw() 29513 * 29514 * Description: This routine will be called from sddump, If sddump is called 29515 * with an I/O which not aligned on device blocksize boundary 29516 * then the write has to be converted to read-modify-write. 29517 * Do the read part here in order to keep sddump simple. 29518 * Note - That the sd_mutex is held across the call to this 29519 * routine. 29520 * 29521 * Arguments: un - sd_lun 29522 * blkno - block number in terms of media block size. 29523 * nblk - number of blocks. 29524 * bpp - pointer to pointer to the buf structure. On return 29525 * from this function, *bpp points to the valid buffer 29526 * to which the write has to be done. 29527 * 29528 * Return Code: 0 for success or errno-type return code 29529 */ 29530 29531 static int 29532 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29533 struct buf **bpp) 29534 { 29535 int err; 29536 int i; 29537 int rval; 29538 struct buf *bp; 29539 struct scsi_pkt *pkt = NULL; 29540 uint32_t target_blocksize; 29541 29542 ASSERT(un != NULL); 29543 ASSERT(mutex_owned(SD_MUTEX(un))); 29544 29545 target_blocksize = un->un_tgt_blocksize; 29546 29547 mutex_exit(SD_MUTEX(un)); 29548 29549 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29550 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29551 if (bp == NULL) { 29552 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29553 "no resources for dumping; giving up"); 29554 err = ENOMEM; 29555 goto done; 29556 } 29557 29558 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29559 blkno, nblk); 29560 if (rval != 0) { 29561 scsi_free_consistent_buf(bp); 29562 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29563 "no resources for dumping; giving up"); 29564 err = ENOMEM; 29565 goto done; 29566 } 29567 29568 pkt->pkt_flags |= FLAG_NOINTR; 29569 29570 err = EIO; 29571 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29572 29573 /* 29574 * Scsi_poll returns 0 (success) if the command completes and 29575 * the status block is STATUS_GOOD. We should only check 29576 * errors if this condition is not true. Even then we should 29577 * send our own request sense packet only if we have a check 29578 * condition and auto request sense has not been performed by 29579 * the hba. 29580 */ 29581 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29582 29583 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29584 err = 0; 29585 break; 29586 } 29587 29588 /* 29589 * Check CMD_DEV_GONE 1st, give up if device is gone, 29590 * no need to read RQS data. 29591 */ 29592 if (pkt->pkt_reason == CMD_DEV_GONE) { 29593 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29594 "Device is gone\n"); 29595 break; 29596 } 29597 29598 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29599 SD_INFO(SD_LOG_DUMP, un, 29600 "sddump: read failed with CHECK, try # %d\n", i); 29601 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29602 (void) sd_send_polled_RQS(un); 29603 } 29604 29605 continue; 29606 } 29607 29608 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29609 int reset_retval = 0; 29610 29611 SD_INFO(SD_LOG_DUMP, un, 29612 "sddump: read failed with BUSY, try # %d\n", i); 29613 29614 if (un->un_f_lun_reset_enabled == TRUE) { 29615 reset_retval = scsi_reset(SD_ADDRESS(un), 29616 RESET_LUN); 29617 } 29618 if (reset_retval == 0) { 29619 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29620 } 29621 (void) sd_send_polled_RQS(un); 29622 29623 } else { 29624 SD_INFO(SD_LOG_DUMP, un, 29625 "sddump: read failed with 0x%x, try # %d\n", 29626 SD_GET_PKT_STATUS(pkt), i); 29627 mutex_enter(SD_MUTEX(un)); 29628 sd_reset_target(un, pkt); 29629 mutex_exit(SD_MUTEX(un)); 29630 } 29631 29632 /* 29633 * If we are not getting anywhere with lun/target resets, 29634 * let's reset the bus. 29635 */ 29636 if (i > SD_NDUMP_RETRIES/2) { 29637 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29638 (void) sd_send_polled_RQS(un); 29639 } 29640 29641 } 29642 scsi_destroy_pkt(pkt); 29643 29644 if (err != 0) { 29645 scsi_free_consistent_buf(bp); 29646 *bpp = NULL; 29647 } else { 29648 *bpp = bp; 29649 } 29650 29651 done: 29652 mutex_enter(SD_MUTEX(un)); 29653 return (err); 29654 } 29655 29656 29657 /* 29658 * Function: sd_failfast_flushq 29659 * 29660 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29661 * in b_flags and move them onto the failfast queue, then kick 29662 * off a thread to return all bp's on the failfast queue to 29663 * their owners with an error set. 29664 * 29665 * Arguments: un - pointer to the soft state struct for the instance. 29666 * 29667 * Context: may execute in interrupt context. 29668 */ 29669 29670 static void 29671 sd_failfast_flushq(struct sd_lun *un) 29672 { 29673 struct buf *bp; 29674 struct buf *next_waitq_bp; 29675 struct buf *prev_waitq_bp = NULL; 29676 29677 ASSERT(un != NULL); 29678 ASSERT(mutex_owned(SD_MUTEX(un))); 29679 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29680 ASSERT(un->un_failfast_bp == NULL); 29681 29682 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29683 "sd_failfast_flushq: entry: un:0x%p\n", un); 29684 29685 /* 29686 * Check if we should flush all bufs when entering failfast state, or 29687 * just those with B_FAILFAST set. 29688 */ 29689 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29690 /* 29691 * Move *all* bp's on the wait queue to the failfast flush 29692 * queue, including those that do NOT have B_FAILFAST set. 29693 */ 29694 if (un->un_failfast_headp == NULL) { 29695 ASSERT(un->un_failfast_tailp == NULL); 29696 un->un_failfast_headp = un->un_waitq_headp; 29697 } else { 29698 ASSERT(un->un_failfast_tailp != NULL); 29699 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29700 } 29701 29702 un->un_failfast_tailp = un->un_waitq_tailp; 29703 29704 /* update kstat for each bp moved out of the waitq */ 29705 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29706 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29707 } 29708 29709 /* empty the waitq */ 29710 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29711 29712 } else { 29713 /* 29714 * Go thru the wait queue, pick off all entries with 29715 * B_FAILFAST set, and move these onto the failfast queue. 29716 */ 29717 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29718 /* 29719 * Save the pointer to the next bp on the wait queue, 29720 * so we get to it on the next iteration of this loop. 29721 */ 29722 next_waitq_bp = bp->av_forw; 29723 29724 /* 29725 * If this bp from the wait queue does NOT have 29726 * B_FAILFAST set, just move on to the next element 29727 * in the wait queue. Note, this is the only place 29728 * where it is correct to set prev_waitq_bp. 29729 */ 29730 if ((bp->b_flags & B_FAILFAST) == 0) { 29731 prev_waitq_bp = bp; 29732 continue; 29733 } 29734 29735 /* 29736 * Remove the bp from the wait queue. 29737 */ 29738 if (bp == un->un_waitq_headp) { 29739 /* The bp is the first element of the waitq. */ 29740 un->un_waitq_headp = next_waitq_bp; 29741 if (un->un_waitq_headp == NULL) { 29742 /* The wait queue is now empty */ 29743 un->un_waitq_tailp = NULL; 29744 } 29745 } else { 29746 /* 29747 * The bp is either somewhere in the middle 29748 * or at the end of the wait queue. 29749 */ 29750 ASSERT(un->un_waitq_headp != NULL); 29751 ASSERT(prev_waitq_bp != NULL); 29752 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29753 == 0); 29754 if (bp == un->un_waitq_tailp) { 29755 /* bp is the last entry on the waitq. */ 29756 ASSERT(next_waitq_bp == NULL); 29757 un->un_waitq_tailp = prev_waitq_bp; 29758 } 29759 prev_waitq_bp->av_forw = next_waitq_bp; 29760 } 29761 bp->av_forw = NULL; 29762 29763 /* 29764 * update kstat since the bp is moved out of 29765 * the waitq 29766 */ 29767 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29768 29769 /* 29770 * Now put the bp onto the failfast queue. 29771 */ 29772 if (un->un_failfast_headp == NULL) { 29773 /* failfast queue is currently empty */ 29774 ASSERT(un->un_failfast_tailp == NULL); 29775 un->un_failfast_headp = 29776 un->un_failfast_tailp = bp; 29777 } else { 29778 /* Add the bp to the end of the failfast q */ 29779 ASSERT(un->un_failfast_tailp != NULL); 29780 ASSERT(un->un_failfast_tailp->b_flags & 29781 B_FAILFAST); 29782 un->un_failfast_tailp->av_forw = bp; 29783 un->un_failfast_tailp = bp; 29784 } 29785 } 29786 } 29787 29788 /* 29789 * Now return all bp's on the failfast queue to their owners. 29790 */ 29791 while ((bp = un->un_failfast_headp) != NULL) { 29792 29793 un->un_failfast_headp = bp->av_forw; 29794 if (un->un_failfast_headp == NULL) { 29795 un->un_failfast_tailp = NULL; 29796 } 29797 29798 /* 29799 * We want to return the bp with a failure error code, but 29800 * we do not want a call to sd_start_cmds() to occur here, 29801 * so use sd_return_failed_command_no_restart() instead of 29802 * sd_return_failed_command(). 29803 */ 29804 sd_return_failed_command_no_restart(un, bp, EIO); 29805 } 29806 29807 /* Flush the xbuf queues if required. */ 29808 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29809 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29810 } 29811 29812 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29813 "sd_failfast_flushq: exit: un:0x%p\n", un); 29814 } 29815 29816 29817 /* 29818 * Function: sd_failfast_flushq_callback 29819 * 29820 * Description: Return TRUE if the given bp meets the criteria for failfast 29821 * flushing. Used with ddi_xbuf_flushq(9F). 29822 * 29823 * Arguments: bp - ptr to buf struct to be examined. 29824 * 29825 * Context: Any 29826 */ 29827 29828 static int 29829 sd_failfast_flushq_callback(struct buf *bp) 29830 { 29831 /* 29832 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29833 * state is entered; OR (2) the given bp has B_FAILFAST set. 29834 */ 29835 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29836 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29837 } 29838 29839 29840 29841 #if defined(__i386) || defined(__amd64) 29842 /* 29843 * Function: sd_setup_next_xfer 29844 * 29845 * Description: Prepare next I/O operation using DMA_PARTIAL 29846 * 29847 */ 29848 29849 static int 29850 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29851 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29852 { 29853 ssize_t num_blks_not_xfered; 29854 daddr_t strt_blk_num; 29855 ssize_t bytes_not_xfered; 29856 int rval; 29857 29858 ASSERT(pkt->pkt_resid == 0); 29859 29860 /* 29861 * Calculate next block number and amount to be transferred. 29862 * 29863 * How much data NOT transfered to the HBA yet. 29864 */ 29865 bytes_not_xfered = xp->xb_dma_resid; 29866 29867 /* 29868 * figure how many blocks NOT transfered to the HBA yet. 29869 */ 29870 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29871 29872 /* 29873 * set starting block number to the end of what WAS transfered. 29874 */ 29875 strt_blk_num = xp->xb_blkno + 29876 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29877 29878 /* 29879 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29880 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29881 * the disk mutex here. 29882 */ 29883 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29884 strt_blk_num, num_blks_not_xfered); 29885 29886 if (rval == 0) { 29887 29888 /* 29889 * Success. 29890 * 29891 * Adjust things if there are still more blocks to be 29892 * transfered. 29893 */ 29894 xp->xb_dma_resid = pkt->pkt_resid; 29895 pkt->pkt_resid = 0; 29896 29897 return (1); 29898 } 29899 29900 /* 29901 * There's really only one possible return value from 29902 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29903 * returns NULL. 29904 */ 29905 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29906 29907 bp->b_resid = bp->b_bcount; 29908 bp->b_flags |= B_ERROR; 29909 29910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29911 "Error setting up next portion of DMA transfer\n"); 29912 29913 return (0); 29914 } 29915 #endif 29916 29917 /* 29918 * Note: The following sd_faultinjection_ioctl( ) routines implement 29919 * driver support for handling fault injection for error analysis 29920 * causing faults in multiple layers of the driver. 29921 * 29922 */ 29923 29924 #ifdef SD_FAULT_INJECTION 29925 static uint_t sd_fault_injection_on = 0; 29926 29927 /* 29928 * Function: sd_faultinjection_ioctl() 29929 * 29930 * Description: This routine is the driver entry point for handling 29931 * faultinjection ioctls to inject errors into the 29932 * layer model 29933 * 29934 * Arguments: cmd - the ioctl cmd recieved 29935 * arg - the arguments from user and returns 29936 */ 29937 29938 static void 29939 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29940 29941 uint_t i; 29942 uint_t rval; 29943 29944 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29945 29946 mutex_enter(SD_MUTEX(un)); 29947 29948 switch (cmd) { 29949 case SDIOCRUN: 29950 /* Allow pushed faults to be injected */ 29951 SD_INFO(SD_LOG_SDTEST, un, 29952 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29953 29954 sd_fault_injection_on = 1; 29955 29956 SD_INFO(SD_LOG_IOERR, un, 29957 "sd_faultinjection_ioctl: run finished\n"); 29958 break; 29959 29960 case SDIOCSTART: 29961 /* Start Injection Session */ 29962 SD_INFO(SD_LOG_SDTEST, un, 29963 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29964 29965 sd_fault_injection_on = 0; 29966 un->sd_injection_mask = 0xFFFFFFFF; 29967 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29968 un->sd_fi_fifo_pkt[i] = NULL; 29969 un->sd_fi_fifo_xb[i] = NULL; 29970 un->sd_fi_fifo_un[i] = NULL; 29971 un->sd_fi_fifo_arq[i] = NULL; 29972 } 29973 un->sd_fi_fifo_start = 0; 29974 un->sd_fi_fifo_end = 0; 29975 29976 mutex_enter(&(un->un_fi_mutex)); 29977 un->sd_fi_log[0] = '\0'; 29978 un->sd_fi_buf_len = 0; 29979 mutex_exit(&(un->un_fi_mutex)); 29980 29981 SD_INFO(SD_LOG_IOERR, un, 29982 "sd_faultinjection_ioctl: start finished\n"); 29983 break; 29984 29985 case SDIOCSTOP: 29986 /* Stop Injection Session */ 29987 SD_INFO(SD_LOG_SDTEST, un, 29988 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29989 sd_fault_injection_on = 0; 29990 un->sd_injection_mask = 0x0; 29991 29992 /* Empty stray or unuseds structs from fifo */ 29993 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29994 if (un->sd_fi_fifo_pkt[i] != NULL) { 29995 kmem_free(un->sd_fi_fifo_pkt[i], 29996 sizeof (struct sd_fi_pkt)); 29997 } 29998 if (un->sd_fi_fifo_xb[i] != NULL) { 29999 kmem_free(un->sd_fi_fifo_xb[i], 30000 sizeof (struct sd_fi_xb)); 30001 } 30002 if (un->sd_fi_fifo_un[i] != NULL) { 30003 kmem_free(un->sd_fi_fifo_un[i], 30004 sizeof (struct sd_fi_un)); 30005 } 30006 if (un->sd_fi_fifo_arq[i] != NULL) { 30007 kmem_free(un->sd_fi_fifo_arq[i], 30008 sizeof (struct sd_fi_arq)); 30009 } 30010 un->sd_fi_fifo_pkt[i] = NULL; 30011 un->sd_fi_fifo_un[i] = NULL; 30012 un->sd_fi_fifo_xb[i] = NULL; 30013 un->sd_fi_fifo_arq[i] = NULL; 30014 } 30015 un->sd_fi_fifo_start = 0; 30016 un->sd_fi_fifo_end = 0; 30017 30018 SD_INFO(SD_LOG_IOERR, un, 30019 "sd_faultinjection_ioctl: stop finished\n"); 30020 break; 30021 30022 case SDIOCINSERTPKT: 30023 /* Store a packet struct to be pushed onto fifo */ 30024 SD_INFO(SD_LOG_SDTEST, un, 30025 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30026 30027 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30028 30029 sd_fault_injection_on = 0; 30030 30031 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30032 if (un->sd_fi_fifo_pkt[i] != NULL) { 30033 kmem_free(un->sd_fi_fifo_pkt[i], 30034 sizeof (struct sd_fi_pkt)); 30035 } 30036 if (arg != NULL) { 30037 un->sd_fi_fifo_pkt[i] = 30038 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30039 if (un->sd_fi_fifo_pkt[i] == NULL) { 30040 /* Alloc failed don't store anything */ 30041 break; 30042 } 30043 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30044 sizeof (struct sd_fi_pkt), 0); 30045 if (rval == -1) { 30046 kmem_free(un->sd_fi_fifo_pkt[i], 30047 sizeof (struct sd_fi_pkt)); 30048 un->sd_fi_fifo_pkt[i] = NULL; 30049 } 30050 } else { 30051 SD_INFO(SD_LOG_IOERR, un, 30052 "sd_faultinjection_ioctl: pkt null\n"); 30053 } 30054 break; 30055 30056 case SDIOCINSERTXB: 30057 /* Store a xb struct to be pushed onto fifo */ 30058 SD_INFO(SD_LOG_SDTEST, un, 30059 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30060 30061 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30062 30063 sd_fault_injection_on = 0; 30064 30065 if (un->sd_fi_fifo_xb[i] != NULL) { 30066 kmem_free(un->sd_fi_fifo_xb[i], 30067 sizeof (struct sd_fi_xb)); 30068 un->sd_fi_fifo_xb[i] = NULL; 30069 } 30070 if (arg != NULL) { 30071 un->sd_fi_fifo_xb[i] = 30072 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30073 if (un->sd_fi_fifo_xb[i] == NULL) { 30074 /* Alloc failed don't store anything */ 30075 break; 30076 } 30077 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30078 sizeof (struct sd_fi_xb), 0); 30079 30080 if (rval == -1) { 30081 kmem_free(un->sd_fi_fifo_xb[i], 30082 sizeof (struct sd_fi_xb)); 30083 un->sd_fi_fifo_xb[i] = NULL; 30084 } 30085 } else { 30086 SD_INFO(SD_LOG_IOERR, un, 30087 "sd_faultinjection_ioctl: xb null\n"); 30088 } 30089 break; 30090 30091 case SDIOCINSERTUN: 30092 /* Store a un struct to be pushed onto fifo */ 30093 SD_INFO(SD_LOG_SDTEST, un, 30094 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30095 30096 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30097 30098 sd_fault_injection_on = 0; 30099 30100 if (un->sd_fi_fifo_un[i] != NULL) { 30101 kmem_free(un->sd_fi_fifo_un[i], 30102 sizeof (struct sd_fi_un)); 30103 un->sd_fi_fifo_un[i] = NULL; 30104 } 30105 if (arg != NULL) { 30106 un->sd_fi_fifo_un[i] = 30107 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30108 if (un->sd_fi_fifo_un[i] == NULL) { 30109 /* Alloc failed don't store anything */ 30110 break; 30111 } 30112 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30113 sizeof (struct sd_fi_un), 0); 30114 if (rval == -1) { 30115 kmem_free(un->sd_fi_fifo_un[i], 30116 sizeof (struct sd_fi_un)); 30117 un->sd_fi_fifo_un[i] = NULL; 30118 } 30119 30120 } else { 30121 SD_INFO(SD_LOG_IOERR, un, 30122 "sd_faultinjection_ioctl: un null\n"); 30123 } 30124 30125 break; 30126 30127 case SDIOCINSERTARQ: 30128 /* Store a arq struct to be pushed onto fifo */ 30129 SD_INFO(SD_LOG_SDTEST, un, 30130 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30131 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30132 30133 sd_fault_injection_on = 0; 30134 30135 if (un->sd_fi_fifo_arq[i] != NULL) { 30136 kmem_free(un->sd_fi_fifo_arq[i], 30137 sizeof (struct sd_fi_arq)); 30138 un->sd_fi_fifo_arq[i] = NULL; 30139 } 30140 if (arg != NULL) { 30141 un->sd_fi_fifo_arq[i] = 30142 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30143 if (un->sd_fi_fifo_arq[i] == NULL) { 30144 /* Alloc failed don't store anything */ 30145 break; 30146 } 30147 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30148 sizeof (struct sd_fi_arq), 0); 30149 if (rval == -1) { 30150 kmem_free(un->sd_fi_fifo_arq[i], 30151 sizeof (struct sd_fi_arq)); 30152 un->sd_fi_fifo_arq[i] = NULL; 30153 } 30154 30155 } else { 30156 SD_INFO(SD_LOG_IOERR, un, 30157 "sd_faultinjection_ioctl: arq null\n"); 30158 } 30159 30160 break; 30161 30162 case SDIOCPUSH: 30163 /* Push stored xb, pkt, un, and arq onto fifo */ 30164 sd_fault_injection_on = 0; 30165 30166 if (arg != NULL) { 30167 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30168 if (rval != -1 && 30169 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30170 un->sd_fi_fifo_end += i; 30171 } 30172 } else { 30173 SD_INFO(SD_LOG_IOERR, un, 30174 "sd_faultinjection_ioctl: push arg null\n"); 30175 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30176 un->sd_fi_fifo_end++; 30177 } 30178 } 30179 SD_INFO(SD_LOG_IOERR, un, 30180 "sd_faultinjection_ioctl: push to end=%d\n", 30181 un->sd_fi_fifo_end); 30182 break; 30183 30184 case SDIOCRETRIEVE: 30185 /* Return buffer of log from Injection session */ 30186 SD_INFO(SD_LOG_SDTEST, un, 30187 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30188 30189 sd_fault_injection_on = 0; 30190 30191 mutex_enter(&(un->un_fi_mutex)); 30192 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30193 un->sd_fi_buf_len+1, 0); 30194 mutex_exit(&(un->un_fi_mutex)); 30195 30196 if (rval == -1) { 30197 /* 30198 * arg is possibly invalid setting 30199 * it to NULL for return 30200 */ 30201 arg = NULL; 30202 } 30203 break; 30204 } 30205 30206 mutex_exit(SD_MUTEX(un)); 30207 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 30208 " exit\n"); 30209 } 30210 30211 30212 /* 30213 * Function: sd_injection_log() 30214 * 30215 * Description: This routine adds buff to the already existing injection log 30216 * for retrieval via faultinjection_ioctl for use in fault 30217 * detection and recovery 30218 * 30219 * Arguments: buf - the string to add to the log 30220 */ 30221 30222 static void 30223 sd_injection_log(char *buf, struct sd_lun *un) 30224 { 30225 uint_t len; 30226 30227 ASSERT(un != NULL); 30228 ASSERT(buf != NULL); 30229 30230 mutex_enter(&(un->un_fi_mutex)); 30231 30232 len = min(strlen(buf), 255); 30233 /* Add logged value to Injection log to be returned later */ 30234 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30235 uint_t offset = strlen((char *)un->sd_fi_log); 30236 char *destp = (char *)un->sd_fi_log + offset; 30237 int i; 30238 for (i = 0; i < len; i++) { 30239 *destp++ = *buf++; 30240 } 30241 un->sd_fi_buf_len += len; 30242 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30243 } 30244 30245 mutex_exit(&(un->un_fi_mutex)); 30246 } 30247 30248 30249 /* 30250 * Function: sd_faultinjection() 30251 * 30252 * Description: This routine takes the pkt and changes its 30253 * content based on error injection scenerio. 30254 * 30255 * Arguments: pktp - packet to be changed 30256 */ 30257 30258 static void 30259 sd_faultinjection(struct scsi_pkt *pktp) 30260 { 30261 uint_t i; 30262 struct sd_fi_pkt *fi_pkt; 30263 struct sd_fi_xb *fi_xb; 30264 struct sd_fi_un *fi_un; 30265 struct sd_fi_arq *fi_arq; 30266 struct buf *bp; 30267 struct sd_xbuf *xb; 30268 struct sd_lun *un; 30269 30270 ASSERT(pktp != NULL); 30271 30272 /* pull bp xb and un from pktp */ 30273 bp = (struct buf *)pktp->pkt_private; 30274 xb = SD_GET_XBUF(bp); 30275 un = SD_GET_UN(bp); 30276 30277 ASSERT(un != NULL); 30278 30279 mutex_enter(SD_MUTEX(un)); 30280 30281 SD_TRACE(SD_LOG_SDTEST, un, 30282 "sd_faultinjection: entry Injection from sdintr\n"); 30283 30284 /* if injection is off return */ 30285 if (sd_fault_injection_on == 0 || 30286 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30287 mutex_exit(SD_MUTEX(un)); 30288 return; 30289 } 30290 30291 30292 /* take next set off fifo */ 30293 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30294 30295 fi_pkt = un->sd_fi_fifo_pkt[i]; 30296 fi_xb = un->sd_fi_fifo_xb[i]; 30297 fi_un = un->sd_fi_fifo_un[i]; 30298 fi_arq = un->sd_fi_fifo_arq[i]; 30299 30300 30301 /* set variables accordingly */ 30302 /* set pkt if it was on fifo */ 30303 if (fi_pkt != NULL) { 30304 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30305 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30306 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30307 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30308 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30309 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30310 30311 } 30312 30313 /* set xb if it was on fifo */ 30314 if (fi_xb != NULL) { 30315 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30316 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30317 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30318 SD_CONDSET(xb, xb, xb_victim_retry_count, 30319 "xb_victim_retry_count"); 30320 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30321 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30322 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30323 30324 /* copy in block data from sense */ 30325 if (fi_xb->xb_sense_data[0] != -1) { 30326 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30327 SENSE_LENGTH); 30328 } 30329 30330 /* copy in extended sense codes */ 30331 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 30332 "es_code"); 30333 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 30334 "es_key"); 30335 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 30336 "es_add_code"); 30337 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 30338 es_qual_code, "es_qual_code"); 30339 } 30340 30341 /* set un if it was on fifo */ 30342 if (fi_un != NULL) { 30343 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30344 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30345 SD_CONDSET(un, un, un_reset_retry_count, 30346 "un_reset_retry_count"); 30347 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30348 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30349 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30350 SD_CONDSET(un, un, un_f_geometry_is_valid, 30351 "un_f_geometry_is_valid"); 30352 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30353 "un_f_allow_bus_device_reset"); 30354 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30355 30356 } 30357 30358 /* copy in auto request sense if it was on fifo */ 30359 if (fi_arq != NULL) { 30360 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30361 } 30362 30363 /* free structs */ 30364 if (un->sd_fi_fifo_pkt[i] != NULL) { 30365 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30366 } 30367 if (un->sd_fi_fifo_xb[i] != NULL) { 30368 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30369 } 30370 if (un->sd_fi_fifo_un[i] != NULL) { 30371 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30372 } 30373 if (un->sd_fi_fifo_arq[i] != NULL) { 30374 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30375 } 30376 30377 /* 30378 * kmem_free does not gurantee to set to NULL 30379 * since we uses these to determine if we set 30380 * values or not lets confirm they are always 30381 * NULL after free 30382 */ 30383 un->sd_fi_fifo_pkt[i] = NULL; 30384 un->sd_fi_fifo_un[i] = NULL; 30385 un->sd_fi_fifo_xb[i] = NULL; 30386 un->sd_fi_fifo_arq[i] = NULL; 30387 30388 un->sd_fi_fifo_start++; 30389 30390 mutex_exit(SD_MUTEX(un)); 30391 30392 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30393 } 30394 30395 #endif /* SD_FAULT_INJECTION */ 30396